def pad_up_to(vector, size, rank):
length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
return tf.pad(vector, padding, mode='constant')
python类assert_non_negative()的实例源码
threepart_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def pad_up_to(vector, size):
rank = vector.get_shape().ndims - 1
length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
return tf.pad(vector, padding, mode='constant')
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def add_loss_op(self, result):
logits = result.rnn_output
with tf.control_dependencies([tf.assert_positive(tf.shape(logits)[1], data=[tf.shape(logits)])]):
length_diff = tf.reshape(self.config.max_length - tf.shape(logits)[1], shape=(1,))
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0]], axis=0), shape=(3, 2))
preds = tf.pad(logits, padding, mode='constant')
# add epsilon to avoid division by 0
preds = preds + 1e-5
mask = tf.sequence_mask(self.output_length_placeholder, self.config.max_length, dtype=tf.float32)
loss = tf.contrib.seq2seq.sequence_loss(preds, self.output_placeholder, mask)
with tf.control_dependencies([tf.assert_non_negative(loss, data=[preds, mask], summarize=256*60*300)]):
return tf.identity(loss)
def mix_prediction(losses, lam=0., mean_typ='arithmetic', weight_typ='normal', sign=-1., sf=1e-3):
# losses is shape (# of discriminators x batch_size)
# output is scalar
tf.assert_non_negative(lam)
assert mean_typ in ['arithmetic','geometric','harmonic']
assert weight_typ in ['normal','log']
assert sign == 1. or sign == -1.
assert sf > 0.
if lam == 0.:
weights = tf.ones_like(losses)
else:
if weight_typ == 'log':
weights = tf.pow(losses, lam)
else:
weights = tf.exp(lam * losses)
if mean_typ == 'arithmetic':
loss = weighted_arithmetic(weights, losses)
elif mean_typ == 'geometric':
log_losses = tf.log(sign*losses)
loss = sign*tf.exp(weighted_arithmetic(weights, log_losses))
else:
mn = tf.reduce_min(losses) - sf
inv_losses = tf.reciprocal(losses-mn)
loss = mn + tf.reciprocal(weighted_arithmetic(weights, inv_losses))
return loss
def five_crops(image, crop_size):
""" Returns the central and four corner crops of `crop_size` from `image`. """
image_size = tf.shape(image)[:2]
crop_margin = tf.subtract(image_size, crop_size)
assert_size = tf.assert_non_negative(
crop_margin, message='Crop size must be smaller or equal to the image size.')
with tf.control_dependencies([assert_size]):
top_left = tf.floor_div(crop_margin, 2)
bottom_right = tf.add(top_left, crop_size)
center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
top_left = image[:-crop_margin[0], :-crop_margin[1]]
top_right = image[:-crop_margin[0], crop_margin[1]:]
bottom_left = image[crop_margin[0]:, :-crop_margin[1]]
bottom_right = image[crop_margin[0]:, crop_margin[1]:]
return center, top_left, top_right, bottom_left, bottom_right
def op(name,
images,
max_outputs=3,
display_name=None,
description=None,
collections=None):
"""Create an image summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
images: A `Tensor` representing pixel data with shape `[k, w, h, c]`,
where `k` is the number of images, `w` and `h` are the width and
height of the images, and `c` is the number of channels, which
should be 1, 3, or 4. Any of the dimensions may be statically
unknown (i.e., `None`).
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name), \
tf.control_dependencies([tf.assert_rank(images, 4),
tf.assert_type(images, tf.uint8),
tf.assert_non_negative(max_outputs)]):
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
dtype=tf.string,
name='encode_each_image')
image_shape = tf.shape(images)
dimensions = tf.stack([tf.as_string(image_shape[1], name='width'),
tf.as_string(image_shape[2], name='height')],
name='dimensions')
tensor = tf.concat([dimensions, encoded_images], axis=0)
return tf.summary.tensor_summary(name='image_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata)