def calculate_outputs(self, x):
h = lstm_layer(x, self.history_length, self.lstm_size, scope='lstm1')
h = tf.concat([h, x], axis=2)
self.h_final = time_distributed_dense_layer(h, 50, activation=tf.nn.relu, scope='dense1')
y_hat = tf.squeeze(time_distributed_dense_layer(self.h_final, 1, activation=tf.nn.sigmoid, scope='dense2'), 2)
final_temporal_idx = tf.stack([tf.range(tf.shape(self.history_length)[0]), self.history_length - 1], axis=1)
self.final_states = tf.gather_nd(self.h_final, final_temporal_idx)
self.final_predictions = tf.gather_nd(y_hat, final_temporal_idx)
self.prediction_tensors = {
'user_ids': self.user_id,
'aisle_ids': self.aisle_id,
'final_states': self.final_states,
'predictions': self.final_predictions
}
return y_hat
python类gather_nd()的实例源码
def calculate_outputs(self, x):
h = lstm_layer(x, self.history_length, self.lstm_size, scope='lstm1')
h = tf.concat([h, x], axis=2)
self.h_final = time_distributed_dense_layer(h, 50, activation=tf.nn.relu, scope='dense1')
y_hat = tf.squeeze(time_distributed_dense_layer(self.h_final, 1, activation=tf.nn.sigmoid, scope='dense2'), 2)
final_temporal_idx = tf.stack([tf.range(tf.shape(self.history_length)[0]), self.history_length - 1], axis=1)
self.final_states = tf.gather_nd(self.h_final, final_temporal_idx)
self.final_predictions = tf.gather_nd(y_hat, final_temporal_idx)
self.prediction_tensors = {
'user_ids': self.user_id,
'department_ids': self.department_id,
'final_states': self.final_states,
'predictions': self.final_predictions
}
return y_hat
def calculate_outputs(self, x):
h = lstm_layer(x, self.history_length, self.lstm_size)
c = wavenet(x, self.dilations, self.filter_widths, self.skip_channels, self.residual_channels)
h = tf.concat([h, c, x], axis=2)
self.h_final = time_distributed_dense_layer(h, 50, activation=tf.nn.relu, scope='dense-1')
y_hat = time_distributed_dense_layer(self.h_final, 1, activation=tf.nn.sigmoid, scope='dense-2')
y_hat = tf.squeeze(y_hat, 2)
final_temporal_idx = tf.stack([tf.range(tf.shape(self.history_length)[0]), self.history_length - 1], axis=1)
self.final_states = tf.gather_nd(self.h_final, final_temporal_idx)
self.final_predictions = tf.gather_nd(y_hat, final_temporal_idx)
self.prediction_tensors = {
'user_ids': self.user_id,
'product_ids': self.product_id,
'final_states': self.final_states,
'predictions': self.final_predictions
}
return y_hat
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def train(y_hat, regularizer, document, doc_weight, answer):
# Trick while we wait for tf.gather_nd - https://github.com/tensorflow/tensorflow/issues/206
# This unfortunately causes us to expand a sparse tensor into the full vocabulary
index = tf.range(0, FLAGS.batch_size) * FLAGS.vocab_size + tf.to_int32(answer)
flat = tf.reshape(y_hat, [-1])
relevant = tf.gather(flat, index)
# mean cause reg is independent of batch size
loss = -tf.reduce_mean(tf.log(relevant)) + FLAGS.l2_reg * regularizer
global_step = tf.Variable(0, name="global_step", trainable=False)
accuracy = tf.reduce_mean(tf.to_float(tf.equal(tf.argmax(y_hat, 1), answer)))
optimizer = tf.train.AdamOptimizer()
grads_and_vars = optimizer.compute_gradients(loss)
capped_grads_and_vars = [(tf.clip_by_value(grad, -5, 5), var) for (grad, var) in grads_and_vars]
train_op = optimizer.apply_gradients(capped_grads_and_vars, global_step=global_step)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
return loss, train_op, global_step, accuracy
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def select_dim_value(x, indices, name=None):
with tf.name_scope(name, "select-dim-value", values=[x, indices]):
# x.shape = (rest..., dims)
rest = tf.shape(x)[:-1]
dims = tf.shape(x)[-1]
size = tf.size(indices, out_type=indices.dtype)
# reshape to (size, dims)
t = tf.reshape(x, shape=[-1, dims])
# then index as ([1,2,3,...,size], indices.ravel())
nd_indices = tf.stack([
tf.range(0, size, dtype=indices.dtype),
tf.reshape(indices, shape=[-1])
], axis=1)
t = tf.gather_nd(t, indices=nd_indices)
# reshape back to (rest...)
t = tf.reshape(t, rest)
t.set_shape(x.get_shape()[:-1])
return t
def batch_beam_gather(tensor, indices, name=None):
with tf.name_scope(name, 'batch-beam-gather', values=[tensor, indices]):
beam_size = int(indices.get_shape()[1])
batch_indices = tf.range(tf.shape(indices, out_type=indices.dtype)[0])
batch_indices = tf.expand_dims(batch_indices, -1)
batch_indices = tf.tile(batch_indices, [1, beam_size])
gather_indices = tf.stack([batch_indices, indices], -1)
collect = tf.gather_nd(tensor, gather_indices)
collect.set_shape(
indices.get_shape().concatenate(tensor.get_shape()[2:])
)
return collect
def gather_prev_stack_state_index(pointer_vals, prev_index, transition_state,
batch_size):
"""Gathers new previous state index."""
new_pointer_vals = tf.reshape(pointer_vals, [-1, 1])
# Helper tensors.
prev_vals = tf.reshape(tf.fill(
tf.pack([batch_size]), prev_index), [-1, 1])
trans_inds = tf.transpose(tf.pack(
[tf.range(batch_size), transition_state]))
# Gather new prev state for main tf.nn. Pointer vals if reduce, else prev.
# State inds dimension [batch_size, NUM_TR_STATES]
state_inds = tf.concat(1, [prev_vals]*6 + [new_pointer_vals, prev_vals])
prev_state_index = tf.gather_nd(state_inds, trans_inds)
return prev_state_index
def gather_prev_stack_aux_state_index(pointer_vals, prev_index, transition_state,
batch_size):
"""Gather new prev state index for aux rnn: as for main, but zero if shift."""
new_pointer_vals = tf.reshape(pointer_vals, [-1, 1])
# Helper tensors.
prev_vals = tf.reshape(tf.fill(
tf.pack([batch_size]), prev_index), [-1, 1])
trans_inds = tf.transpose(tf.pack(
[tf.range(batch_size), transition_state]))
batch_zeros = tf.reshape(tf.zeros(
tf.pack([batch_size]), dtype=tf.int32), [-1, 1])
# Gather new prev state for aux tf.nn.
# State inds dimension [batch_size, NUM_TR_STATES]
state_inds = tf.concat(1,
[prev_vals, batch_zeros] + [prev_vals]*4 + [new_pointer_vals, prev_vals])
prev_state_index = tf.gather_nd(state_inds, trans_inds)
return prev_state_index
def quantParam(): #pass saved n/w * suffix
paramDict = {}
suffix = ["fc","_w:0"]
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./LenetParam.meta')
saver.restore(sess,'./LenetParam')
fc_wts = [v.name for v in tf.trainable_variables() if (v.name.startswith(suffix[0]) & v.name.endswith(suffix[1]))]
lay_name = [v.name for v in tf.trainable_variables() if (v.name.endswith("_w:0") | v.name.endswith("_b:0"))]
print(lay_name)
for v in lay_name:
print(v)
curLay = [a for a in tf.trainable_variables() if (a.name==v)]
curWt = curLay[0].eval()
#if v in fc_wts:
# ind = tf.where(tf.not_equal(curWt, 0))
# sparse = tf.SparseTensor(ind, tf.gather_nd(curWt, ind), curLay[0].get_shape())
# tmp = sess.run(sparse)
#else:
tmp = curWt
paramDict.update({v:tmp})
print(paramDict.keys())
return paramDict
def sample(params, eps, dist='gauss'):
""" utility function for sampling from distributions, given noise """
if 'bin' in dist:
logits = params[-1]
params = params[:-1]
if 'gauss' in dist:
mean, cov = params
s = mean + tf.sqrt(cov) * eps
elif 'gm' in dist:
means, covs, pi_logits = params
choices = tf.multinomial(pi_logits, num_samples=1)
batch_size = choices.get_shape()[0]
ids = tf.constant(list(range(batch_size)), dtype=tf.int64, shape=(batch_size, 1))
idx_tensor = tf.concat([ids, choices], axis=1)
chosen_means = tf.gather_nd(means, idx_tensor)
chosen_covs = tf.gather_nd(covs, idx_tensor)
s = chosen_means + tf.sqrt(chosen_covs) * eps
else:
raise NotImplementedError
if 'bin' in dist:
sig = tf.sigmoid(logits)
s = tf.concat([s, sig], axis=1)
return s
def sim_occlusions(poses, dm_shape, batch_size, max_length, n_dims, body_splits, _int_type=tf.int32, _float_type=tf.float32):
def occluded_poses():
body_splits_tf = tf.constant(body_splits, dtype=_int_type)
occ_idcs = tf.random_uniform([batch_size, 1], minval=0, maxval=len(body_splits), dtype=_int_type)
occ_idcs = tf.gather_nd(body_splits_tf, occ_idcs)
noise_mask = tf.tile(
tf.reshape(
tf.cast(tf.reduce_sum(tf.one_hot(occ_idcs, dm_shape[0]), axis=1), dtype=tf.bool),
[batch_size, 1, dm_shape[0], 1]),
[1, max_length, 1, n_dims])
noisy_poses = poses * tf.random_uniform([batch_size, max_length, 1, n_dims], minval=0.8, maxval=1.2, dtype=_float_type)
return tf.where(noise_mask, noisy_poses, poses)
occlude_rate = 0.5
return tf.cond(tf.cast(tf.round(tf.random_uniform([], minval=-0.5, maxval=0.5) + occlude_rate), tf.bool),
occluded_poses, lambda: poses)
def remove(self, x):
"""Remove padding from the given tensor.
Args:
x (tf.Tensor): of shape [dim_origin,...]
Returns:
a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin
"""
with tf.name_scope("pad_reduce/remove"):
x_shape = x.get_shape().as_list()
x = tf.gather_nd(
x,
indices=self.nonpad_ids,
)
if not context.in_eager_mode():
# This is a hack but for some reason, gather_nd return a tensor of
# undefined shape, so the shape is set up manually
x.set_shape([None] + x_shape[1:])
return x
fft_tree_constrained_inference.py 文件源码
项目:wip-constrained-extractor
作者: brain-research
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def add_leading_idx_3(t):
"""Utility to automatically add indices used by gather_nd.
Args:
t: tensor of shape [b,s,...,n]
Returns:
t: tensor of shape [b,s,...,n+2] where the (b,s)-indices are
prepended onto the values in the final mode.
"""
dims = [d.value for d in t.get_shape().dims]
b_size, s_size, ss_size = dims[:3]
b_idx = tf.reshape(tf.range(0, b_size), [b_size] + [1] * (len(dims) - 1))
s_idx = tf.reshape(tf.range(0, s_size), [1, s_size] + [1] * (len(dims) - 2))
ss_idx = tf.reshape(
tf.range(0, ss_size), [1, 1, ss_size] + [1] * (len(dims) - 3))
tiled_b_idx = tf.tile(b_idx, [1] + dims[1:-1] + [1])
tiled_s_idx = tf.tile(s_idx, [dims[0], 1] + dims[2:-1] + [1])
tiled_ss_idx = tf.tile(ss_idx, [dims[0], dims[1], 1] + dims[3:-1] + [1])
t_with_b_s_ss_idx = tf.concat(
len(dims) - 1, [tiled_b_idx, tiled_s_idx, tiled_ss_idx, t])
return t_with_b_s_ss_idx
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def transition(self, curr_state, next_symbols, batch_size):
with tf.name_scope('grammar_transition'):
transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
assert transitions.get_shape()[1:] == (self.output_size,)
indices = tf.stack((tf.range(0, batch_size), next_symbols), axis=1)
next_state = tf.gather_nd(transitions, indices)
return next_state
def gather_nd(params, indices, shape):
rank = len(shape)
flat_params = tf.reshape(params, [-1])
multipliers = [reduce(lambda x, y: x*y, shape[i+1:], 1) for i in range(0, rank)]
indices_unpacked = tf.unstack(tf.transpose(indices, [rank - 1] + list(range(0, rank - 1))))
flat_indices = sum([a*b for a,b in zip(multipliers, indices_unpacked)])
return tf.gather(flat_params, flat_indices)
# ctc_label_dense_to_sparse is taken from https://github.com/tensorflow/tensorflow/issues/1742#issuecomment-205291527
#
# The CTC implementation in TensorFlow needs labels in a sparse representation,
# but sparse data and queues don't mix well, so we store padded tensors in the
# queue and convert to a sparse representation after dequeuing a batch.
#