def ln(tensor, scope=None, epsilon=1e-5):
""" Layer normalizes a 2D tensor along its second axis """
assert(len(tensor.get_shape()) == 2)
m, v = tf.nn.moments(tensor, [1], keep_dims=True)
if not isinstance(scope, str):
scope = ''
with tf.variable_scope(scope + 'layer_norm'):
scale = tf.get_variable('scale',
shape=[tensor.get_shape()[1]],
initializer=tf.constant_initializer(1))
shift = tf.get_variable('shift',
shape=[tensor.get_shape()[1]],
initializer=tf.constant_initializer(0))
LN_initial = (tensor - m) / tf.sqrt(v + epsilon)
return LN_initial * scale + shift
python类constant_initializer()的实例源码
ln_lstm2.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def __call__(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = rnn_cell_impl._linear([inputs, state], 2 * self._num_units, True, bias_ones,\
self._kernel_initializer)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r,u=layer_normalization(r,scope="r/"),layer_normalization(u,scope="u/")
r,u=math_ops.sigmoid(r),math_ops.sigmoid(u)
with vs.variable_scope("candidate"):
c = self._activation(rnn_cell_impl._linear([inputs, r * state], self._num_units, True, self._bias_initializer, self._kernel_initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
def multilayer_perceptron(_X, input_size, n_hidden, n_class, forward_only=False):
with variable_scope.variable_scope("DNN"):
bias_start = 0.0
weight_hidden = variable_scope.get_variable("Weight_Hidden", [input_size, n_hidden])
bias_hidden = variable_scope.get_variable("Bias_Hidden", [n_hidden],
initializer=init_ops.constant_initializer(bias_start))
#Hidden layer with RELU activation
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, weight_hidden), bias_hidden))
if not forward_only:
layer_1 = tf.nn.dropout(layer_1, 0.5)
weight_out = variable_scope.get_variable("Weight_Out", [n_hidden, n_class])
bias_out = variable_scope.get_variable("Bias_Out", [n_class],
initializer=init_ops.constant_initializer(bias_start))
output = tf.matmul(layer_1, weight_out) + bias_out
#regularizers = tf.nn.l2_loss(weight_hidden) + tf.nn.l2_loss(bias_hidden) + tf.nn.l2_loss(weight_out) + tf.nn.l2_loss(bias_out)
return output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or "eunn_cell"):
state = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
input_matrix_init = init_ops.random_uniform_initializer(-0.01, 0.01)
if self._comp:
input_matrix_re = vs.get_variable("U_re", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
input_matrix_im = vs.get_variable("U_im", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
inputs_re = math_ops.matmul(inputs, input_matrix_re)
inputs_im = math_ops.matmul(inputs, input_matrix_im)
inputs = math_ops.complex(inputs_re, inputs_im)
else:
input_matrix = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
inputs = math_ops.matmul(inputs, input_matrix)
bias = vs.get_variable("modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer())
output = self._activation((inputs + state), bias, self._comp)
return output, output
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = math_ops.sigmoid(
linear([inputs, state], 2 * self._num_units, True, bias_ones,
self._kernel_initializer))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
with vs.variable_scope("candidate"):
c = self._activation(
linear([inputs, r * state], self._num_units, True,
self._bias_initializer, self._kernel_initializer))
# recurrent dropout as proposed in https://arxiv.org/pdf/1603.05118.pdf (currently disabled)
#if self._is_training and Params.dropout is not None:
#c = tf.nn.dropout(c, 1 - Params.dropout)
new_h = u * state + (1 - u) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
state_out = linearTransformIdentityInit(state, self._num_units)
if self._bottom == True:
input_out = linearTransformWithBias([inputs], self._num_units, bias=False, scope=scope)
else:
input_out = linearTransformIdentityInit(inputs, self._num_units, scope=scope)
bias = vs.get_variable(
"input_bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.abs(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
t_state = tf.transpose(state)
state_out = doRotations(t_state, self._rotations)
input_out = linearTransformWithBias([inputs],
self._num_units, bias=False, scope=scope)
state_out = tf.transpose(state_out)
bias = vs.get_variable(
"Bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.nn.relu(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
state_out = linearTransformIdentityInit(state, self._num_units)
if self._bottom == True:
input_out = linearTransformWithBias([inputs], self._num_units, bias=False, scope=scope)
else:
input_out = linearTransformIdentityInit(inputs, self._num_units, scope=scope)
bias = vs.get_variable(
"input_bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.nn.relu(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
t_state = tf.transpose(state)
state_out = doRotations(t_state, self._rotations)
input_out = linearTransformWithBias([inputs],
self._num_units, bias=False, scope=scope)
state_out = tf.transpose(state_out)
gate = linearTransformWithBias([inputs, state], self._num_units, True, scope='GateLinearTransfrom')
gate = tf.nn.sigmoid(gate, name='GateSigmoid')
bias = vs.get_variable(
"Bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
input_gate = tf.add(-1.0, gate)
# print(input_gate)
output = state * gate + input_gate * tf.abs(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
t_state = tf.transpose(state)
state_out = doRotations(t_state, self._rotations)
input_out = linearTransformWithBias([inputs],
self._num_units, bias=False, scope=scope)
state_out = tf.transpose(state_out)
bias = vs.get_variable(
"Bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.abs(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
t_state = tf.transpose(state)
t_inputs = tf.transpose(inputs)
if self._bottom == True:
[state_out] = rotationTransform([("StateL", t_state)], self._num_units , scope, self._num_rots)
input_out = linearTransformWithBias([inputs],
self._num_units, bias=False, scope=scope)
else:
[state_out, input_out] = \
rotationTransform([("StateL", t_state), ("InputL", t_inputs)],
self._num_units, scope)
input_out = tf.transpose(input_out)
state_out = tf.transpose(state_out)
bias = vs.get_variable(
"Bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.abs(state_out + input_out + bias)
return output, output
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__):
state_rot = rotationTransform(tf.transpose(state), self._num_units, self._num_params,
self._cos_list, self._sin_list, self._nsin_list,
self._cos_idxs, self._sin_idxs, self._nsin_idxs)
state_scale, sigma = diagonalTransform(state_rot, self._num_units)
self.sigma = sigma
state_out = rotationTransform(state_scale, self._num_units, self._num_params,
self._cos_list, self._sin_list, self._nsin_list,
self._cos_idxs, self._sin_idxs, self._nsin_idxs)
state_out = tf.transpose(state_out)
input_out = linearTransformWithBias([inputs], self._num_units, bias=False)
bias = vs.get_variable(
"Bias", [self._num_units],
dtype=tf.float32,
initializer=init_ops.constant_initializer(dtype=tf.float32))
output = tf.abs(state_out + input_out + bias)
return output, output
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testHorzConvWithBlankImageAndPlaceholder(self):
image = array_ops.placeholder(dtypes.float32, shape=(None, None, None, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients,
feed_dict={image: np.ones((1, 10, 10, 1))})
expected = np.zeros((1, 10, 9, 1))
self.assertAllEqual(result, expected)
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testHorzConvWithRandomImageMultiBatch(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 1)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 7)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testHorzConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
layers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testVertConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
vert_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
self.assertAllEqual(result, expected)
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def testNoGlobalStepArg(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def testUpdateOp(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def testUpdateOpWithNoOpDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
optimizers_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def testUpdateOpFromCollection(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
feature_column_ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testEmbeddingColumnWithInitializerSucceedsForDNN(self):
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = feature_column.embedding_column(
hashed_sparse,
10,
initializer=init_ops.constant_initializer(init_value))
output = feature_column_ops.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
feature_column_ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testDeepColumnsSucceedForDNN(self):
real_valued = feature_column.real_valued_column("income", 3)
bucket = feature_column.bucketized_column(
feature_column.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
features = {
"income":
constant_op.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price":
constant_op.constant([[20., 200], [110, 2], [-20, -30]]),
"wire":
sparse_tensor.SparseTensor(
values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}
embeded_sparse = feature_column.embedding_column(
hashed_sparse, 10, initializer=init_ops.constant_initializer(133.7))
output = feature_column_ops.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.test_session():
variables_lib.global_variables_initializer().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def testGrid2BasicLSTMCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.2)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 4])
cell = grid_rnn_cell.Grid2BasicLSTMCell(
2, tied=False, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[0.31667367, 0.31667367]])
self.assertAllClose(res[1], [[0.29530135, 0.37520045, 0.17044567,
0.21292259]])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def testGrid2LSTMCellTied(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 8])
cell = grid_rnn_cell.Grid2LSTMCell(2, tied=True, use_peepholes=True)
self.assertEqual(cell.state_size, 8)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 8))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]])
})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 8))
self.assertAllClose(res[0], [[0.95686918, 0.95686918]])
self.assertAllClose(res[1], [[2.41515064, 2.41515064, 0.95686918,
0.95686918, 1.38917875, 1.49043763,
0.83884692, 0.86036491]])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testGrid2LSTMCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 4])
cell = grid_rnn_cell.Grid2LSTMCell(
2, use_peepholes=True, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, s],
{x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.2, 0.3, 0.4]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 4))
self.assertAllClose(res[0], [[2.1831727, 2.1831727]])
self.assertAllClose(res[1], [[0.92270052, 1.02325559, 0.66159075,
0.70475441]])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def testGrid2BasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = array_ops.zeros([2, 4])
cell = grid_rnn_cell.Grid2BasicRNNCell(2)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])
})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def testGrid2BasicRNNCellTied(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([2, 2])
m = array_ops.zeros([2, 4])
cell = grid_rnn_cell.Grid2BasicRNNCell(2, tied=True)
self.assertEqual(cell.state_size, 4)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (2, 2))
self.assertEqual(s.get_shape(), (2, 4))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x: np.array([[1., 1.], [2., 2.]]),
m: np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])
})
self.assertEqual(res[0].shape, (2, 2))
self.assertEqual(res[1].shape, (2, 4))
self.assertAllClose(res[0], [[0.94685763, 0.94685763],
[0.99480951, 0.99480951]])
self.assertAllClose(res[1],
[[0.94685763, 0.94685763, 0.80049908, 0.80049908],
[0.99480951, 0.99480951, 0.97574311, 0.97574311]])
grid_rnn_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def testGrid2BasicRNNCellWithRelu(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'root', initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = grid_rnn_cell.Grid2BasicRNNCell(2, non_recurrent_fn=nn_ops.relu)
self.assertEqual(cell.state_size, 2)
g, s = cell(x, m)
self.assertEqual(g.get_shape(), (1, 2))
self.assertEqual(s.get_shape(), (1, 2))
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s],
{x: np.array([[1., 1.]]),
m: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
self.assertEqual(res[1].shape, (1, 2))
self.assertAllClose(res[0], [[1.80049896, 1.80049896]])
self.assertAllClose(res[1], [[0.80049896, 0.80049896]])