def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
python类fill()的实例源码
def init_memory(self, batch_size):
"""
Returns the memory state for step 0. Used in DNC for the argument to tf.while_loop
:return:
"""
read_weightings = tf.fill([batch_size, self.memory_size, self.num_read_heads], Memory.epsilon)
write_weighting = tf.fill([batch_size, self.memory_size], Memory.epsilon, name="Write_weighting")
precedence_weighting = tf.zeros([batch_size, self.memory_size], name="Precedence_weighting")
m = tf.fill([batch_size, self.memory_size, self.word_size], Memory.epsilon) # initial memory matrix
usage_vector = tf.zeros([batch_size, self.memory_size], name="Usage_vector")
link_matrix = tf.zeros([batch_size, self.memory_size, self.memory_size])
read_vectors = tf.fill([batch_size, self.num_read_heads, self.word_size], Memory.epsilon)
return [read_weightings, write_weighting, usage_vector, precedence_weighting, m, link_matrix, read_vectors]
def _cls_mining(self, scores, status, hard_neg_ratio=3.0, scope=None):
"""
Positive classification loss and hard negative classificatin loss
ARGS
scores: [n, n_classes]
status: int [n] node or link matching status
RETURNS
pos_loss: []
n_pos: int []
hard_neg_loss: []
n_hard_neg: []
"""
with tf.variable_scope(scope or 'cls_mining'):
# positive classification loss
pos_mask = tf.equal(status, MATCH_STATUS_POS)
pos_scores = tf.boolean_mask(scores, pos_mask)
n_pos = tf.shape(pos_scores)[0]
pos_labels = tf.fill([n_pos], POS_LABEL)
pos_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pos_scores, labels=pos_labels))
# hard negative classification loss
neg_mask = tf.equal(status, MATCH_STATUS_NEG)
neg_scores = tf.boolean_mask(scores, neg_mask)
n_neg = tf.shape(neg_scores)[0]
n_hard_neg = tf.cast(n_pos, tf.float32) * hard_neg_ratio
n_hard_neg = tf.minimum(n_hard_neg, tf.cast(n_neg, tf.float32))
n_hard_neg = tf.cast(n_hard_neg, tf.int32)
neg_prob = tf.nn.softmax(neg_scores)[:, NEG_LABEL]
# find the k examples with the least negative probabilities
_, hard_neg_indices = tf.nn.top_k(-neg_prob, k=n_hard_neg)
hard_neg_scores = tf.gather(neg_scores, hard_neg_indices)
hard_neg_labels = tf.fill([n_hard_neg], NEG_LABEL)
hard_neg_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=hard_neg_scores, labels=hard_neg_labels))
return pos_loss, n_pos, hard_neg_loss, n_hard_neg
def apply_attention(attn_scores, states, length, is_self=False, with_sentinel=True, reuse=False):
attn_scores += tf.expand_dims(misc.mask_for_lengths(length, tf.shape(attn_scores)[2]), 1)
if is_self:
# exclude attending to state itself
attn_scores += tf.expand_dims(tf.diag(tf.fill([tf.shape(attn_scores)[1]], -1e6)), 0)
if with_sentinel:
with tf.variable_scope('sentinel', reuse=reuse):
s = tf.get_variable('score', [1, 1, 1], tf.float32, tf.zeros_initializer())
s = tf.tile(s, [tf.shape(attn_scores)[0], tf.shape(attn_scores)[1], 1])
attn_probs = tf.nn.softmax(tf.concat([s, attn_scores], 2))
attn_probs = attn_probs[:, :, 1:]
else:
attn_probs = tf.nn.softmax(attn_scores)
attn_states = tf.einsum('abd,adc->abc', attn_probs, states)
return attn_scores, attn_probs, attn_states
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
# Take off the last column
sliced = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
# Append a column filled with <GO>
decoder_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), sliced], 1)
return decoder_input
def ctc_label_dense_to_sparse(labels, label_lengths):
# undocumented feature soon to be made public
from tensorflow.python.ops import functional_ops
label_shape = tf.shape(labels)
num_batches_tns = tf.pack([label_shape[0]])
max_num_labels_tns = tf.pack([label_shape[1]])
def range_less_than(previous_state, current_input):
return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)
init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
dense_mask = functional_ops.scan(range_less_than, label_lengths,
initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = tf.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
max_num_labels_tns), tf.reverse(label_shape, [True])))
batch_ind = tf.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))
vals_sparse = tf.gather_nd(labels, indices)
return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
def testRandomPatchImageBboxes(self):
"""Tests the integrity of the return values of random_patch
When bboxes is not None.
"""
im_shape = (800, 600, 3)
total_boxes = 5
# We don't care about the label
label = 3
# First test case, we use randomly generated image and bboxes.
image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
# Add a label to each bbox.
bboxes_w_label = tf.concat(
[
bboxes,
tf.fill((bboxes.shape[0], 1), label)
],
axis=1
)
config = self._random_patch_config
ret_image, ret_bboxes = self._random_patch(
image, config, bboxes_w_label
)
# Assertions
self.assertLessEqual(ret_bboxes.shape[0], total_boxes)
self.assertGreater(ret_bboxes.shape[0], 0)
self.assertTrue(np.all(ret_bboxes >= 0))
self.assertTrue(np.all(
ret_bboxes[:, 0] <= ret_image.shape[1]
))
self.assertTrue(np.all(
ret_bboxes[:, 1] <= ret_image.shape[0]
))
self.assertTrue(np.all(
ret_bboxes[:, 2] <= ret_image.shape[1]
))
self.assertTrue(np.all(
ret_bboxes[:, 3] <= ret_image.shape[0]
))
self.assertTrue(np.all(ret_image.shape <= im_shape))
def testRandomPatchLargerThanImage(self):
"""Tests random_patch normalizes the minimum sizes.
"""
im_shape = (600, 800, 3)
total_boxes = 5
config = EasyDict({
'min_height': 900,
'min_width': 900
})
label = 3
image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
# Add a label to each bbox.
bboxes_w_label = tf.concat(
[
bboxes,
tf.fill((bboxes.shape[0], 1), label)
],
axis=1
)
ret_image, ret_bboxes = self._random_patch(
image, config, bboxes_w_label
)
# Assertions
self.assertLessEqual(ret_bboxes.shape[0], total_boxes)
self.assertGreater(ret_bboxes.shape[0], 0)
self.assertTrue(np.all(ret_bboxes >= 0))
self.assertTrue(np.all(
ret_bboxes[:, 0] <= ret_image.shape[1]
))
self.assertTrue(np.all(
ret_bboxes[:, 1] <= ret_image.shape[0]
))
self.assertTrue(np.all(
ret_bboxes[:, 2] <= ret_image.shape[1]
))
self.assertTrue(np.all(
ret_bboxes[:, 3] <= ret_image.shape[0]
))
self.assertTrue(np.all(ret_image.shape <= im_shape))
def testRandomResizeImageBboxes(self):
"""Tests the integrity of the return values of random_resize
This tests the case when bboxes is not None.
"""
im_shape = (600, 800, 3)
config = self._random_resize_config
total_boxes = 5
label = 3
image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
# Add a label to each bbox.
bboxes_w_label = tf.concat(
[
bboxes,
tf.fill((bboxes.shape[0], 1), label)
],
axis=1
)
ret_image, ret_bboxes = self._random_resize(
image, config, bboxes_w_label
)
# Assertions
self.assertEqual(ret_bboxes.shape[0], total_boxes)
self.assertTrue(np.all(
np.asarray(ret_image.shape[:2]) >= config.min_size
))
self.assertTrue(np.all(
np.asarray(ret_image.shape[:2]) <= config.max_size
))
def testRandomDistort(self):
"""Tests the integrity of the return values of random_distortion.
"""
im_shape = (600, 900, 3)
config = self._random_distort_config
total_boxes = 5
label = 3
image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
# Add a label to each bbox.
bboxes_w_label = tf.concat(
[
bboxes,
tf.fill((bboxes.shape[0], 1), label)
],
axis=1
)
ret_image, ret_bboxes = self._random_distort(
image, config, bboxes_w_label
)
# Assertions
self.assertEqual(im_shape, ret_image.shape)
self.assertAllEqual(
bboxes, ret_bboxes[:, :4]
)
def testSmallRandomDistort(self):
"""Tests random_distort with small-change arguments.
We pass parameters to random_distort that make it so that it should
change the image relatively little, and then check that in fact it
changed relatively little.
"""
total_boxes = 3
im_shape = (600, 900, 3)
config = EasyDict({
'brightness': {
'max_delta': 0.00001,
},
'hue': {
'max_delta': 0.00001,
},
'saturation': {
'lower': 0.99999,
'upper': 1.00001,
},
'contrast': {
'lower': 0.99999,
'upper': 1.00001
}
})
label = 3
image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
# Add a label to each bbox.
bboxes_w_label = tf.concat(
[
bboxes,
tf.fill((bboxes.shape[0], 1), label)
],
axis=1
)
ret_image, ret_bboxes = self._random_distort(
image, config, bboxes_w_label
)
# Assertions
large_number = 0.1
self.assertAllClose(image, ret_image, rtol=0.05, atol=large_number)
def test_fill(self):
# computation
f = tf.fill([2, 3], 5)
# test
self.run(f)
def testInitialStateTuple(self, trainable, use_custom_initial_value,
state_size):
batch_size = 6
# Set the attribute to the class since it we can't set properties of
# abstract classes
snt.RNNCore.state_size = state_size
flat_state_size = nest.flatten(state_size)
core = snt.RNNCore(name="dummy_core")
if use_custom_initial_value:
flat_initializer = [tf.constant_initializer(2)] * len(flat_state_size)
trainable_initializers = nest.pack_sequence_as(
structure=state_size, flat_sequence=flat_initializer)
else:
trainable_initializers = None
initial_state = core.initial_state(
batch_size, dtype=tf.float32, trainable=trainable,
trainable_initializers=trainable_initializers)
nest.assert_same_structure(initial_state, state_size)
flat_initial_state = nest.flatten(initial_state)
for state, size in zip(flat_initial_state, flat_state_size):
self.assertEqual(state.get_shape(), [batch_size, size])
with self.test_session() as sess:
tf.global_variables_initializer().run()
flat_initial_state_value = sess.run(flat_initial_state)
for value, size in zip(flat_initial_state_value, flat_state_size):
expected_initial_state = np.empty([batch_size, size])
if not trainable:
expected_initial_state.fill(0)
elif use_custom_initial_value:
expected_initial_state.fill(2)
else:
value_row = value[0]
expected_initial_state = np.tile(value_row, (batch_size, 1))
self.assertAllClose(value, expected_initial_state)
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
batch_size = self.batch_size(features, labels)
if self.use_beam_search:
batch_size = self.params["inference.beam_search.beam_width"]
target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
embedding=self.target_embedding,
start_tokens=tf.fill([batch_size], target_start_id),
end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
decoder_initial_state = bridge()
return decoder(decoder_initial_state, helper_infer)
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
def scale_by_min_max(x, output_min=0.0, output_max=1.0, name=None):
"""Scale a numerical column into the range [output_min, output_max].
Args:
x: A numeric `Tensor`.
output_min: The minimum of the range of output values.
output_max: The maximum of the range of output values.
name: (Optional) A name for this operation.
Returns:
A `Tensor` containing the input column scaled to [output_min, output_max].
Raises:
ValueError: If output_min, output_max have the wrong order.
"""
with tf.name_scope(name, 'scale_by_min_max'):
if output_min >= output_max:
raise ValueError('output_min must be less than output_max')
x = tf.to_float(x)
min_x_value = analyzers.min(x)
max_x_value = analyzers.max(x)
x_shape = tf.shape(x)
# If min==max, the result will be the mean of the requested range.
# Note that both the options of tf.where are computed, which means that this
# will compute unused NaNs.
scaled_result = tf.where(
tf.fill(x_shape, min_x_value < max_x_value),
(x - min_x_value) / (max_x_value - min_x_value), tf.fill(x_shape, 0.5))
return (scaled_result * (output_max - output_min)) + output_min