def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
python类get_variable()的实例源码
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
seq2seq.py 文件源码
项目:Variational-Recurrent-Autoencoder-Tensorflow
作者: Chung-I
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def latent_to_decoder(latent_vector,
embedding_size,
latent_dim,
num_layers,
activation=tf.nn.relu,
use_lstm=False,
dtype=None):
concat_state_size = num_layers * embedding_size
if use_lstm:
concat_state_size *= 2
with tf.variable_scope('latent_to_decoder'):
w = tf.get_variable("w",[latent_dim, concat_state_size],
dtype=dtype)
b = tf.get_variable("b", [concat_state_size], dtype=dtype)
decoder_initial_state = prelu(tf.matmul(latent_vector, w) + b)
if num_layers > 1:
decoder_initial_state = tuple(tf.split(1, num_layers, decoder_initial_state))
if use_lstm:
decoder_initial_state = [tuple(tf.split(1, 2, single_layer_state)) for single_layer_state in decoder_initial_state]
elif use_lstm:
decoder_initial_state = tuple(tf.split(1, 2, decoder_initial_state))
return decoder_initial_state
checkpoint_utils_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
checkpoint_utils_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
checkpoint_utils_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
stochastic_variables_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def testStochasticVariables(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale)):
v = variable_scope.get_variable("sv", shape)
self.assertTrue(isinstance(v, st.StochasticTensor))
self.assertTrue(isinstance(v.distribution, dist.NormalWithSoftplusScale))
self.assertEqual(
{"stochastic_variables/sv_loc", "stochastic_variables/sv_scale"},
set([v.op.name for v in variables.global_variables()]))
self.assertEqual(
set(variables.trainable_variables()), set(variables.global_variables()))
v = ops.convert_to_tensor(v)
self.assertEqual(list(shape), v.get_shape().as_list())
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(shape, sess.run(v).shape)
stochastic_variables_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def testStochasticVariablesWithConstantInitializer(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale,
dist_kwargs={"validate_args": True},
param_initializers={
"loc": np.ones(shape) * 4.,
"scale": np.ones(shape) * 2.
})):
v = variable_scope.get_variable("sv")
for var in variables.global_variables():
if "loc" in var.name:
mu_var = var
if "scale" in var.name:
sigma_var = var
v = ops.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
stochastic_variables_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def testStochasticVariablesWithCallablePriorInitializer(self):
def prior_init(shape, dtype):
return dist.Normal(
array_ops.zeros(shape, dtype), array_ops.ones(shape, dtype))
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale, prior=prior_init)):
w = variable_scope.get_variable("weights", (10, 20))
x = random_ops.random_uniform((8, 10))
y = math_ops.matmul(x, w)
prior_map = vi._find_variational_and_priors(y, None)
self.assertTrue(isinstance(prior_map[w], dist.Normal))
elbo = vi.elbo(y, keep_batch_dim=False)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(elbo)
loss_ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
embeddings_ops.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def categorical_variable(tensor_in, n_classes, embedding_size, name):
"""Creates an embedding for categorical variable with given number of classes.
Args:
tensor_in: Input tensor with class identifier (can be batch or
N-dimensional).
n_classes: Number of classes.
embedding_size: Size of embedding vector to represent each class.
name: Name of this categorical variable.
Returns:
Tensor of input shape, with additional dimension for embedding.
Example:
Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
tensor, where each row is representation of the class.
"""
with vs.variable_scope(name):
embeddings = vs.get_variable(name + '_embeddings',
[n_classes, embedding_size])
return embedding_lookup(embeddings, tensor_in)
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testNoGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testUpdateOp(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def testUpdateOpWithNoOpDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def testUpdateOpFromCollection(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
jit_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def embedding_attention_decoder(initial_state,
attention_states,
cell,
num_symbols,
time_steps,
batch_size,
embedding_size,
output_size=None,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=None,
scope=None):
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(
scope or "embedding_attention_decoder", dtype=dtype) as scope:
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = tf.nn.seq2seq._extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
return attention_decoder(
initial_state,
attention_states,
cell,
num_symbols,
time_steps,
batch_size,
output_size=output_size,
loop_function=loop_function)
def get_biases(name, shape, value, trainable = True):
return tf.get_variable('biases{}'.format(name), shape,
initializer = tf.constant_initializer(value),
trainable = trainable)
def _weight_variable(shape, name='weights', initializer=tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)):
return tf.get_variable(shape=shape, initializer=initializer, name=name)