def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
python类identity()的实例源码
def build_forward(self):
verbalise = self.FLAGS.verbalise
# Placeholders
inp_size = [None] + self.meta['inp_size']
self.inp = tf.placeholder(tf.float32, inp_size, 'input')
self.feed = dict() # other placeholders
# Build the forward pass
state = identity(self.inp)
roof = self.num_layer - self.ntrain
self.say(HEADER, LINE)
for i, layer in enumerate(self.darknet.layers):
scope = '{}-{}'.format(str(i),layer.type)
args = [layer, state, i, roof, self.feed]
state = op_create(*args)
mess = state.verbalise()
self.say(mess)
self.say(LINE)
self.top = state
self.out = tf.identity(state.out, name='output')
critic_network.py 文件源码
项目:-NIPS-2017-Learning-to-Run
作者: kyleliang919
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def create_q_network(self,state_dim,action_dim):
# the layer size could be changed
layer1_size = LAYER1_SIZE
layer2_size = LAYER2_SIZE
state_input = tf.placeholder("float",[None,state_dim])
action_input = tf.placeholder("float",[None,action_dim])
W1 = self.variable([state_dim,layer1_size],state_dim)
b1 = self.variable([layer1_size],state_dim)
W2 = self.variable([layer1_size,layer2_size],layer1_size+action_dim)
W2_action = self.variable([action_dim,layer2_size],layer1_size+action_dim)
b2 = self.variable([layer2_size],layer1_size+action_dim)
W3 = tf.Variable(tf.random_uniform([layer2_size,1],-3e-3,3e-3))
b3 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))
layer1 = tf.nn.relu(tf.matmul(state_input,W1) + b1)
layer2 = tf.nn.relu(tf.matmul(layer1,W2) + tf.matmul(action_input,W2_action) + b2)
q_value_output = tf.identity(tf.matmul(layer2,W3) + b3)
return state_input,action_input,q_value_output,[W1,b1,W2,W2_action,b2,W3,b3]
critic_network.py 文件源码
项目:-NIPS-2017-Learning-to-Run
作者: kyleliang919
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def create_q_network(self,state_dim,action_dim,scope):
with tf.variable_scope(scope):
# the layer size could be changed
layer1_size = LAYER1_SIZE
layer2_size = LAYER2_SIZE
state_input = tf.placeholder("float",[None,state_dim])
action_input = tf.placeholder("float",[None,action_dim])
W1 = self.variable([state_dim,layer1_size],state_dim)
b1 = self.variable([layer1_size],state_dim)
W2 = self.variable([layer1_size,layer2_size],layer1_size+action_dim)
W2_action = self.variable([action_dim,layer2_size],layer1_size+action_dim)
b2 = self.variable([layer2_size],layer1_size+action_dim)
W3 = tf.Variable(tf.random_uniform([layer2_size,1],-3e-3,3e-3))
b3 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))
layer1 = tf.nn.relu(tf.matmul(state_input,W1) + b1)
layer2 = tf.nn.relu(tf.matmul(layer1,W2) + tf.matmul(action_input,W2_action) + b2)
q_value_output = tf.identity(tf.matmul(layer2,W3) + b3)
return state_input,action_input,q_value_output,[W1,b1,W2,W2_action,b2,W3,b3]
def deep_q_network():
""" Architecture according to:
http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
x = layers.fully_connected(x, 512)
x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
x = tf.identity(x, name='Q')
return x
return q_network
def deep_q_network():
""" Architecture according to:
http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
x = layers.fully_connected(x, 512)
x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
x = tf.identity(x, name='Q')
return x
return q_network
def testGetSaverPartitioned(self, save_partitioned, load_partitioned):
path = os.path.join(tempfile.mkdtemp(), "ckpt")
# Save checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=save_partitioned, name="a")
saver = snt.get_saver(conv)
sess.run(tf.global_variables_initializer())
saver.save(sess, path)
w = tf.identity(conv.w)
w_value = sess.run(w)
# Restore checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=load_partitioned, name="b")
saver = snt.get_saver(conv)
saver.restore(sess, path)
w = tf.identity(conv.w)
self.assertAllEqual(sess.run(w), w_value)
def testTupleSelect(self):
"""Test where idx is a tuple."""
shape0 = [1, 2]
shape1 = [1, 2, 3]
shape2 = [1, 2, 3, 4]
input0 = tf.random_uniform(shape=shape0)
input1 = tf.random_uniform(shape=shape1)
input2 = tf.random_uniform(shape=shape2)
mod = snt.SelectInput(idx=(0, 2))
output = mod(input0, input1, input2)
output0 = tf.identity(input0)
output2 = tf.identity(input2)
with self.test_session() as sess:
out = sess.run([output, [output0, output2]])
self.assertAllEqual(out[0][0], out[1][0])
self.assertAllEqual(out[0][1], out[1][1])
def testNestedListSelect(self):
"""Test where idx is a nested list."""
shape0 = [1, 2]
shape1 = [1, 2, 3]
shape2 = [1, 2, 3, 4]
input0 = tf.random_uniform(shape=shape0)
input1 = tf.random_uniform(shape=shape1)
input2 = tf.random_uniform(shape=shape2)
mod = snt.SelectInput(idx=[2, [1, 0, 1]])
output = mod(input0, input1, input2)
output0 = tf.identity(input0)
output1 = tf.identity(input1)
output2 = tf.identity(input2)
with self.test_session() as sess:
out = sess.run([output, [output2, [output1, output0, output1]]])
self.assertAllEqual(out[0][0], out[1][0])
self.assertAllEqual(out[0][1][0], out[1][1][0])
self.assertAllEqual(out[0][1][1], out[1][1][1])
self.assertAllEqual(out[0][1][2], out[1][1][2])
def accumulate_strings(values, name="strings"):
"""Accumulates strings into a vector.
Args:
values: A 1-d string tensor that contains values to add to the accumulator.
Returns:
A tuple (value_tensor, update_op).
"""
tf.assert_type(values, tf.string)
strings = tf.Variable(
name=name,
initial_value=[],
dtype=tf.string,
trainable=False,
collections=[],
validate_shape=True)
value_tensor = tf.identity(strings)
update_op = tf.assign(
ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
return value_tensor, update_op
def get_function_init_state(self, function_tokens):
next_state = tf.gather(self.function_states, function_tokens - (self.num_begin_tokens + self.num_control_tokens))
assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [function_tokens])
with tf.control_dependencies([assert2]):
return tf.identity(next_state)
seq2seq_aligner.py 文件源码
项目:almond-nnparser
作者: Stanford-Mobisocial-IoT-Lab
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def add_loss_op(self, result):
logits = result.rnn_output
with tf.control_dependencies([tf.assert_positive(tf.shape(logits)[1], data=[tf.shape(logits)])]):
length_diff = tf.reshape(self.config.max_length - tf.shape(logits)[1], shape=(1,))
padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0]], axis=0), shape=(3, 2))
preds = tf.pad(logits, padding, mode='constant')
# add epsilon to avoid division by 0
preds = preds + 1e-5
mask = tf.sequence_mask(self.output_length_placeholder, self.config.max_length, dtype=tf.float32)
loss = tf.contrib.seq2seq.sequence_loss(preds, self.output_placeholder, mask)
with tf.control_dependencies([tf.assert_non_negative(loss, data=[preds, mask], summarize=256*60*300)]):
return tf.identity(loss)
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
# The second dimension of labels must be equal to the longest label length in the batch
correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
with tf.control_dependencies([correct_shape_assert]):
labels = tf.identity(labels)
label_shape = tf.shape(labels)
num_batches_tns = tf.stack([label_shape[0]])
max_num_labels_tns = tf.stack([label_shape[1]])
def range_less_than(previous_state, current_input):
return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input
init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
init = tf.expand_dims(init, 0)
dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = tf.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
batch_ind = tf.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
shape = [batch_size, tf.reduce_max(label_lengths)]
vals_sparse = gather_nd(labels, indices, shape)
return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
# The second dimension of labels must be equal to the longest label length in the batch
correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
with tf.control_dependencies([correct_shape_assert]):
labels = tf.identity(labels)
label_shape = tf.shape(labels)
num_batches_tns = tf.stack([label_shape[0]])
max_num_labels_tns = tf.stack([label_shape[1]])
def range_less_than(previous_state, current_input):
return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input
init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
init = tf.expand_dims(init, 0)
dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = tf.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
batch_ind = tf.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
shape = [batch_size, tf.reduce_max(label_lengths)]
vals_sparse = gather_nd(labels, indices, shape)
return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
self.beta = tf.get_variable("beta", shape[1:],
initializer=tf.constant_initializer(0.))
self.gamma = tf.get_variable("gamma", shape[1:],
initializer=tf.random_normal_initializer(1.,0.02))
self.mean = tf.get_variable("mean", shape[1:],
initializer=tf.constant_initializer(0.),trainable=False)
self.variance = tf.get_variable("variance",shape[1:],
initializer=tf.constant_initializer(1.),trainable=False)
if train:
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
self.mean.assign(batch_mean)
self.variance.assign(batch_var)
ema_apply_op = self.ema.apply([self.mean, self.variance])
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
mean, var = self.ema.average(self.mean), self.ema.average(self.variance)
normed = tf.nn.batch_normalization(x, mean, var, self.beta, self.gamma, self.epsilon)
return normed
def conv2d_lrelu(inputs, num_outputs, kernel_size, stride):
conv = tf.contrib.layers.convolution2d(inputs, num_outputs, kernel_size, stride,
weights_initializer=tf.contrib.layers.xavier_initializer(),
activation_fn=tf.identity)
conv = lrelu(conv)
return conv
def conv2d_t_relu(inputs, num_outputs, kernel_size, stride):
conv = tf.contrib.layers.convolution2d_transpose(inputs, num_outputs, kernel_size, stride,
weights_initializer=tf.contrib.layers.xavier_initializer(),
activation_fn=tf.identity)
conv = tf.nn.relu(conv)
return conv
def fc_lrelu(inputs, num_outputs):
fc = tf.contrib.layers.fully_connected(inputs, num_outputs,
weights_initializer=tf.contrib.layers.xavier_initializer(),
activation_fn=tf.identity)
fc = lrelu(fc)
return fc