def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
python类gather_nd()的实例源码
def value_transition(self, curr_state, next_symbols, batch_size):
first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
num_value_tokens = self.output_size - first_value_token
with tf.name_scope('grammar_transition'):
adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)
assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
with tf.control_dependencies([assert1]):
transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
assert transitions.get_shape()[1:] == (self.output_size,)
indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
next_state = tf.gather_nd(transitions, indices)
assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
with tf.control_dependencies([assert2]):
return tf.identity(next_state)
def levenshtein(a,b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = list(range(n+1))
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
# gather_nd is taken from https://github.com/tensorflow/tensorflow/issues/206#issuecomment-229678962
#
# Unfortunately we can't just use tf.gather_nd because it does not have gradients
# implemented yet, so we need this workaround.
#
def levenshtein(a,b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = list(range(n+1))
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
# gather_nd is taken from https://github.com/tensorflow/tensorflow/issues/206#issuecomment-229678962
#
# Unfortunately we can't just use tf.gather_nd because it does not have gradients
# implemented yet, so we need this workaround.
#
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def _get_top_k(scores1, scores2, k, max_span_size, support2question):
max_support_length = tf.shape(scores1)[1]
doc_idx, pointer1, topk_scores1 = segment_top_k(scores1, support2question, k)
# [num_questions * beam_size]
doc_idx_flat = tf.reshape(doc_idx, [-1])
pointer_flat1 = tf.reshape(pointer1, [-1])
# [num_questions * beam_size, support_length]
scores_gathered2 = tf.gather(scores2, doc_idx_flat)
if max_span_size < 0:
pointer_flat1, max_span_size = pointer_flat1 + max_span_size + 1, -max_span_size
left_mask = misc.mask_for_lengths(tf.cast(pointer_flat1, tf.int32),
max_support_length, mask_right=False)
right_mask = misc.mask_for_lengths(tf.cast(pointer_flat1 + max_span_size, tf.int32),
max_support_length)
scores_gathered2 = scores_gathered2 + left_mask + right_mask
pointer2 = tf.argmax(scores_gathered2, axis=1, output_type=tf.int32)
topk_score2 = tf.gather_nd(scores2, tf.stack([doc_idx_flat, pointer2], 1))
return doc_idx, pointer1, tf.reshape(pointer2, [-1, k]), topk_scores1 + tf.reshape(topk_score2, [-1, k])
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def batch_gather(reference, indices):
'''Batchwise gathering of row indices.
The numpy equivalent is reference[np.arange(batch_size), indices].
# Arguments
reference: tensor with ndim >= 2 of shape
(batch_size, dim1, dim2, ..., dimN)
indices: 1d integer tensor of shape (batch_size) satisfiying
0 <= i < dim2 for each element i.
# Returns
A tensor with shape (batch_size, dim2, ..., dimN)
equal to reference[1:batch_size, indices]
'''
batch_size = K.shape(reference)[0]
indices = tf.pack([tf.range(batch_size), indices], axis=1)
return tf.gather_nd(reference, indices)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def scanline_error(tensor, shape):
"""
"""
height, width, channels = shape
value_shape = [height, width, 1]
error_line = tf.maximum(basic([int(height * .75), 1], value_shape, distrib=ValueDistribution.exp) - .5, 0)
error_swerve = tf.maximum(basic([int(height * .01), 1], value_shape, distrib=ValueDistribution.exp) - .5, 0)
error_line *= error_swerve
error_swerve *= 2
white_noise = basic([int(height * .75), 1], value_shape)
white_noise = effects.blend(0, white_noise, error_swerve)
error = error_line + white_noise
y_index = effects.column_index(shape)
x_index = (effects.row_index(shape) - tf.cast(effects.value_map(error, value_shape) * width * .025, tf.int32)) % width
return tf.minimum(tf.gather_nd(tensor, tf.stack([y_index, x_index], 2)) + error_line * white_noise * 4, 1)
def blend_layers(control, shape, feather=1.0, *layers):
layer_count = len(layers)
control = normalize(control)
control *= layer_count
control_floor = tf.cast(control, tf.int32)
x_index = row_index(shape)
y_index = column_index(shape)
layers = tf.stack(list(layers) + [layers[-1]])
layer_count += 1
floor_values = control_floor[:, :, 0]
# I'm not sure why the mod operation is needed, but tensorflow-cpu explodes without it.
combined_layer_0 = tf.gather_nd(layers, tf.stack([floor_values % layer_count, y_index, x_index], 2))
combined_layer_1 = tf.gather_nd(layers, tf.stack([(floor_values + 1) % layer_count, y_index, x_index], 2))
control_floor_fract = control - tf.floor(control)
control_floor_fract = tf.minimum(tf.maximum(control_floor_fract - (1.0 - feather), 0.0) / feather, 1.0)
return blend(combined_layer_0, combined_layer_1, control_floor_fract)
def inner_tile(tensor, shape, freq):
"""
"""
if isinstance(freq, int):
freq = freq_for_shape(freq, shape)
small_shape = [int(shape[0] / freq[0]), int(shape[1] / freq[1]), shape[2]]
y_index = tf.tile(column_index(small_shape) * freq[0], [freq[0], freq[0]])
x_index = tf.tile(row_index(small_shape) * freq[1], [freq[0], freq[0]])
tiled = tf.gather_nd(tensor, tf.stack([y_index, x_index], 2))
tiled = resample(tiled, shape, spline_order=1)
return tiled
def wln(graph_inputs, batch_size, hidden_size, depth):
input_atom, input_bond, atom_graph, bond_graph, num_nbs, node_mask = graph_inputs
atom_features = tf.nn.relu(linearND(input_atom, hidden_size, "atom_embedding", init_bias=None))
layers = []
for i in xrange(depth):
with tf.variable_scope("WL", reuse=(i>0)) as scope:
fatom_nei = tf.gather_nd(atom_features, atom_graph)
fbond_nei = tf.gather_nd(input_bond, bond_graph)
h_nei_atom = linearND(fatom_nei, hidden_size, "nei_atom", init_bias=None)
h_nei_bond = linearND(fbond_nei, hidden_size, "nei_bond", init_bias=None)
h_nei = h_nei_atom * h_nei_bond
mask_nei = tf.reshape(tf.sequence_mask(tf.reshape(num_nbs, [-1]), max_nb, dtype=tf.float32), [batch_size,-1,max_nb,1])
f_nei = tf.reduce_sum(h_nei * mask_nei, -2)
f_self = linearND(atom_features, hidden_size, "self_atom", init_bias=None)
layers.append(f_nei * f_self * node_mask)
l_nei = tf.concat(3, [fatom_nei, fbond_nei])
nei_label = tf.nn.relu(linearND(l_nei, hidden_size, "label_U2"))
nei_label = tf.reduce_sum(nei_label * mask_nei, -2)
new_label = tf.concat(2, [atom_features, nei_label])
new_label = linearND(new_label, hidden_size, "label_U1")
atom_features = tf.nn.relu(new_label)
#kernels = tf.concat(1, layers)
kernels = layers[-1]
fp = tf.reduce_sum(kernels, 1)
return atom_features, fp
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def _search_ann(self, search_keys, dnd_keys, update_LRU_order):
batch_indices = []
for act, ann in self.anns.items():
# These are the indices we get back from ANN search
indices = ann.query(search_keys)
log.debug("ANN indices for action {}: {}".format(act, indices))
# Create numpy array with full of corresponding action vector index
action_indices = np.full(indices.shape, self.action_vector.index(act))
log.debug("Action indices for action {}: {}".format(act, action_indices))
# Riffle two arrays
tf_indices = self._riffle_arrays(action_indices, indices)
batch_indices.append(tf_indices)
# Very important part: Modify LRU Order here
# Doesn't work without tabular update of course!
if update_LRU_order == 1:
_ = [self.tf_index__state_hash[act][i] for i in indices.ravel()]
np_batch = np.asarray(batch_indices)
log.debug("Batch update indices: {}".format(np_batch))
# Reshaping to gather_nd compatible format
final_indices = np.asarray([np_batch[:, j, :, :] for j in range(np_batch.shape[1])], dtype=np.int32)
return final_indices
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
## A function to sample evenly spaced frames
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
## A function to sample evenly spaced frames
def batch_gather(reference, indices):
'''Batchwise gathering of row indices.
The numpy equivalent is reference[np.arange(batch_size), indices].
# Arguments
reference: tensor with ndim >= 2 of shape
(batch_size, dim1, dim2, ..., dimN)
indices: 1d integer tensor of shape (batch_size) satisfiying
0 <= i < dim2 for each element i.
# Returns
A tensor with shape (batch_size, dim2, ..., dimN)
equal to reference[1:batch_size, indices]
'''
batch_size = tf.shape(reference)[0]
indices = tf.stack([tf.range(batch_size), indices], axis=1)
return tf.gather_nd(reference, indices)
def extract_dense_weights(sess):
for key in dense_layers.keys():
layer = dense_layers[key]
# sparse kernel
dense_kernel = layer.kernel
dense_kernel_shape = dense_kernel.get_shape().as_list()
# dense_kernel = tf.reshape(dense_kernel, [dense_kernel_shape[0] * dense_kernel_shape[1] * dense_kernel_shape[2],
# dense_kernel_shape[3]])
# dense_kernel = tf.transpose(dense_kernel)
idx = tf.where(tf.not_equal(dense_kernel, 0))
sparse_kernel = tf.SparseTensor(idx, tf.gather_nd(dense_kernel, idx), dense_kernel.get_shape())
if layer.bias is not None:
dk, k, b = sess.run([dense_kernel, sparse_kernel, layer.bias])
else:
dk, k = sess.run([dense_kernel, sparse_kernel])
b = None
dense_weights['%s/%s' % (key, 'kernel_dense')] = dk
dense_weights['%s/%s' % (key, 'kernel')] = k
dense_weights['%s/%s' % (key, 'kernel_shape')] = dense_kernel_shape
dense_weights['%s/%s' % (key, 'bias')] = b
def skip_example(*args):
print 'skipping every second example in every batch !!'
res = []
for arg in args:
indices = np.zeros((FLAGS.batch_size, 15, 2))
for i in range(32):
for j in range(15):
indices[i, j] = np.array([i, j * 2])
indices = np.int64(indices)
arg = tf.gather_nd(arg, indices)
res.append(arg)
return res
model_utils.py 文件源码
项目:Youtube8mdataset_kagglechallenge
作者: jasonlee27
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def children_tensor(nodes, children, feature_size):
"""Build the children tensor from the input nodes and child lookup."""
with tf.name_scope('children_tensor'):
max_children = tf.shape(children)[2]
batch_size = tf.shape(nodes)[0]
num_nodes = tf.shape(nodes)[1]
# replace the root node with the zero vector so lookups for the 0th
# vector return 0 instead of the root vector
# zero_vecs is (batch_size, num_nodes, 1)
zero_vecs = tf.zeros((batch_size, 1, feature_size))
# vector_lookup is (batch_size x num_nodes x feature_size)
vector_lookup = tf.concat([zero_vecs, nodes[:, 1:, :]], axis=1)
# children is (batch_size x num_nodes x num_children x 1)
children = tf.expand_dims(children, axis=3)
# prepend the batch indices to the 4th dimension of children
# batch_indices is (batch_size x 1 x 1 x 1)
batch_indices = tf.reshape(tf.range(0, batch_size), (batch_size, 1, 1, 1))
# batch_indices is (batch_size x num_nodes x num_children x 1)
batch_indices = tf.tile(batch_indices, [1, num_nodes, max_children, 1])
# children is (batch_size x num_nodes x num_children x 2)
children = tf.concat([batch_indices, children], axis=3)
# output will have shape (batch_size x num_nodes x num_children x feature_size)
# NOTE: tf < 1.1 contains a bug that makes backprop not work for this!
return tf.gather_nd(vector_lookup, children, name='children')
def skip_example(*args):
print 'skipping every second example in every batch !!'
res = []
for arg in args:
indices = np.zeros((FLAGS.batch_size, 15, 2))
for i in range(32):
for j in range(15):
indices[i, j] = np.array([i, j * 2])
indices = np.int64(indices)
arg = tf.gather_nd(arg, indices)
res.append(arg)
return res
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
"""Mike Henry's implementation, with some minor modifications."""
with self.G.as_default():
label_shape = tf.shape( labels )
num_batches_tns = tf.stack( [label_shape[0]] )
max_num_labels_tns = tf.stack( [label_shape[1]] )
def range_less_than(previous_state, current_input):
return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input
init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
init = tf.expand_dims( init, 0 )
dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
dense_mask = dense_mask[ :, 0, : ]
label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
label_ind = tf.boolean_mask( label_array, dense_mask )
batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0, label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
batch_ind = tf.boolean_mask( batch_array, dense_mask )
indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
vals_sparse = tf.gather_nd( labels, indices )
return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
def eligibility_dutch_traces(Qs_t, states_t, actions_t, lr, discount, lambda_value):
# Beware this trace has to be used with a different learning rule
et = tf.get_variable(
"eligibilitytraces"
, shape=Qs_t.get_shape()
, dtype=tf.float32
, trainable=False
, initializer=tf.zeros_initializer()
)
tf.summary.histogram('eligibilitytraces', et)
state_action_pairs = tf.stack([states_t, actions_t], 1)
current_trace = tf.gather_nd(et, state_action_pairs)
updates = 1 - lr * discount * lambda_value * current_trace
with tf.control_dependencies([updates]):
dec_et_op = tf.assign(et, discount * lambda_value * et)
with tf.control_dependencies([dec_et_op]):
update_et_op = tf.scatter_nd_add(et, indices=state_action_pairs, updates=updates)
reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))
return (et, update_et_op, reset_et_op)
def tabular_learning_with_lr(init_lr, decay_steps, Qs_t, states_t, actions_t, targets):
reusing_scope = tf.get_variable_scope().reuse
state_action_pairs = tf.stack([states_t, actions_t], 1)
estimates = tf.gather_nd(Qs_t, state_action_pairs)
err_estimates = targets - estimates
loss = tf.reduce_mean(err_estimates)
global_step = tf.Variable(0, trainable=False, name="global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
lr = tf.train.exponential_decay(tf.constant(init_lr, dtype=tf.float32), global_step, decay_steps, 0.5, staircase=True)
if reusing_scope is False:
tf.summary.scalar('lr', lr)
inc_global_step = global_step.assign_add(1)
with tf.control_dependencies([inc_global_step]):
updates = lr * err_estimates
train_op = tf.scatter_nd_add(Qs_t, state_action_pairs, updates)
return loss, train_op
def fast_rotate(input_image, dx = 0, dy = 0):
# Basic rotations (constant disparities) for equirectangular
# images. For image augmentations (y-axis rotations), this method is preferable compared
# to the more general rotation function.
height = tf.shape(input_image)[0]
width = tf.shape(input_image)[1]
# Shift coordinate grid for inverse warp.
ix, iy = tf.meshgrid(tf.range(width), tf.range(height))
ox = tf.mod(ix - dx, width)
oy = tf.mod(iy - dy, height)
indices = tf.stack([oy, ox], 2)
# Perform exact sampling (as we are using integer coordinates).
return tf.gather_nd(input_image, indices)
# Project equirectangular image onto a cube face.