def test_placeholder(self):
"""Test placeholder functionalities."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1, name="foo")
# Test placeholder name.
self.assertEqual(ge.util.placeholder_name(a0), "geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None), "geph")
self.assertEqual(
ge.util.placeholder_name(
a0, scope="foo/"), "foo/geph__foo_0")
self.assertEqual(
ge.util.placeholder_name(
a0, scope="foo"), "foo/geph__foo_0")
self.assertEqual(ge.util.placeholder_name(None, scope="foo/"), "foo/geph")
self.assertEqual(ge.util.placeholder_name(None, scope="foo"), "foo/geph")
# Test placeholder creation.
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1, dtype=dtypes.float32, name="a0")
c0 = math_ops.add(
ge.util.make_placeholder_from_tensor(a0),
ge.util.make_placeholder_from_dtype_and_shape(dtype=dtypes.float32))
self.assertEqual(c0.op.inputs[0].op.name, "geph__a0_0")
self.assertEqual(c0.op.inputs[1].op.name, "geph")
python类add()的实例源码
util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
reroute_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def test_reroute_can_modify(self):
graph = ops.Graph()
# create a special graph where "a" is an ambiguous tensor. That is
# it is both an input and an output of the ops in sgv0.
with graph.as_default():
a = constant_op.constant(1.0, shape=[2], name="a")
b = constant_op.constant(2.0, shape=[2], name="b")
c = math_ops.add(a, b, name="c")
d = math_ops.add(a, c, name="d")
e = constant_op.constant(1.0, shape=[2], name="e")
f = constant_op.constant(2.0, shape=[2], name="f")
g = math_ops.add(e, f, name="g")
sgv0 = ge.sgv(a.op, b.op, c.op)
sgv1 = ge.sgv(e.op, f.op)
ge.swap_outputs(sgv0, sgv1)
self.assertTrue(
ge.OpMatcher("g").input_ops("a", ge.OpMatcher("c").input_ops("a", "b"))(
g.op))
self.assertTrue(ge.OpMatcher("d").input_ops("e", "f")(d.op))
stochastic_graph_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def testTraversesControlInputs(self):
dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
logits = dt1.value() * 3.
dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits))
dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.))
x = dt3.value()
y = array_ops.ones((2, 2)) * 4.
z = array_ops.ones((2, 2)) * 3.
out = control_flow_ops.cond(
math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y),
lambda: math_ops.square(z))
out += 5.
dep_map = sg._stochastic_dependencies_map([out])
self.assertEqual(dep_map[dt1], set([out]))
self.assertEqual(dep_map[dt2], set([out]))
self.assertEqual(dep_map[dt3], set([out]))
jit_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testIgnoredArguments(self):
"""Tests that JIT computations can ignore formal parameters."""
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.int32)
y = array_ops.placeholder(dtypes.int32)
with jit_scope():
z = math_ops.add(x, x)
w = math_ops.add(y, y)
# Pulls 'w' into the same compilation via control dependencies.
with ops.control_dependencies([w]):
n = control_flow_ops.no_op()
with ops.control_dependencies([n]):
t = math_ops.add(z, z)
run_metadata = config_pb2.RunMetadata()
out = sess.run(t, {x: np.int32(7),
y: np.int32(404)},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assert_(MetadataHasXlaLaunch(run_metadata))
self.assertAllClose(28, out)
make_test_graphs.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.Variable(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.initialize_all_variables()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = '%s/test_graph_tfadd_with_ckpt_saver.ckpt' % out_dir
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = '%s/test_graph_tfadd_with_ckpt_saver.saver' % out_dir
with open(saver_file, 'w') as f:
f.write(saver.as_saver_def().SerializeToString())
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_standard_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
@{tf.set_random_seed}
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._truncated_normal(
shape_tensor, dtype, seed=seed1, seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
id_fn = lambda: inputs
outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
dims = inputs_shape[1:]
if not dims.is_fully_defined():
raise ValueError('Inputs 2nd dimension must be defined.')
k = dims.num_elements()
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using tf.one_hot.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(0, multiples)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: the tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
id_fn = lambda: array_ops.identity(inputs)
outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
dims = inputs_shape[1:]
if not dims.is_fully_defined():
raise ValueError('Inputs 2nd dimension must be defined.')
k = dims.num_elements()
outputs = array_ops.reshape(inputs, [-1, k])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
if isinstance(
ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = set_ops.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, shape=ids_shape)
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(other, self)
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with tf.variable_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
select_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def setUp(self):
self.graph = ops_lib.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops_lib.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops_lib.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops_lib.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
select_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def test_compute_boundary_ts_2(self):
"""Test for ge.compute_boundary_ts."""
graph = ops_lib.Graph()
with graph.as_default():
a = constant_op.constant(1, name="a")
b = constant_op.constant(1, name="b")
c = math_ops.add(a, b, name="c")
_ = a + c
input_ts, output_ts, inside_ts = ge.compute_boundary_ts([a.op, c.op])
self.assertEqual(list(input_ts), [b])
self.assertEqual(list(output_ts), [a, c])
self.assertEqual(list(inside_ts), [a])
transform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
c0 = constant_op.constant(1.0, shape=[10], name="Const")
c1 = constant_op.constant(1.0, shape=[10], name="Const")
c2 = constant_op.constant(1.0, shape=[10], name="Const")
i = constant_op.constant(1.0, shape=[10], name="Input")
self.o = math_ops.add(c2, math_ops.add(c1, math_ops.add(c0, i)))
transform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def test_copy_assert(self):
ops.reset_default_graph()
a = constant_op.constant(1)
b = constant_op.constant(1)
eq = math_ops.equal(a, b)
assert_op = control_flow_ops.Assert(eq, [a, b])
with ops.control_dependencies([assert_op]):
_ = math_ops.add(a, b)
sgv = ge.make_view([assert_op, eq.op, a.op, b.op])
copier = ge.Transformer()
_, info = copier(sgv, sgv.graph, "", "")
new_assert_op = info.transformed(assert_op)
self.assertIsNotNone(new_assert_op)
transform_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def test_transform(self):
transformer = ge.Transformer()
def my_transform_op_handler(info, op):
add_noise = op.name.startswith("Add")
op_, op_outputs_ = ge.transform.copy_op_handler(info, op)
if not add_noise:
return op_, op_outputs_
# add some noise to op
with info.graph_.as_default():
t_ = math_ops.add(
constant_op.constant(1.0, shape=[10], name="Noise"),
op_.outputs[0],
name="AddNoise")
# return the "noisy" op
return op_, [t_]
transformer.transform_op_handler = my_transform_op_handler
graph = ops.Graph()
transformer(self.graph, graph, "", "")
matcher0 = ge.OpMatcher("AddNoise").input_ops(
"Noise", ge.OpMatcher("Add").input_ops("Const", "Input"))
matcher1 = ge.OpMatcher("AddNoise_1").input_ops(
"Noise_1", ge.OpMatcher("Add_1").input_ops("Const_1", matcher0))
matcher2 = ge.OpMatcher("AddNoise_2").input_ops(
"Noise_2", ge.OpMatcher("Add_2").input_ops("Const_2", matcher1))
top = ge.select_ops("^AddNoise_2$", graph=graph)[0]
self.assertTrue(matcher2(top))
edit_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def test_connect(self):
"""Test for ge.connect."""
with self.graph.as_default():
x = constant_op.constant([1., 1.], shape=[2], name="x")
y = constant_op.constant([2., 2.], shape=[2], name="y")
z = math_ops.add(x, y, name="z")
sgv = ge.sgv(x.op, y.op, z.op)
ge.connect(sgv, ge.sgv(self.e.op).remap_inputs([0]))
self.assertTrue(
ge.OpMatcher("^foo/bar/e$").input_ops("^z$", "foo/d$")(self.e.op))
util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def test_make_list_of_t(self):
"""Test for ge.util.make_list_of_t."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable
# Should extract the tensors from tre graph.
self.assertEqual(len(ge.util.make_list_of_t(g0)), 3)
# Should extract the tensors from the tuple
self.assertEqual(len(ge.util.make_list_of_t((a0, b0))), 2)
# Should extract the tensors and ignore the ops.
self.assertEqual(
len(ge.util.make_list_of_t(
(a0, a0.op, b0), ignore_ops=True)), 2)
util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def test_get_generating_consuming(self):
"""Test for ge.util.get_generating_ops and ge.util.get_generating_ops."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
c0 = math_ops.add(a0, b0)
self.assertEqual(len(ge.util.get_generating_ops([a0, b0])), 2)
self.assertEqual(len(ge.util.get_consuming_ops([a0, b0])), 1)
self.assertEqual(len(ge.util.get_generating_ops([c0])), 1)
self.assertEqual(ge.util.get_consuming_ops([c0]), [])
util_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def test_control_outputs(self):
"""Test for the ge.util.ControlOutputs class."""
g0 = ops.Graph()
with g0.as_default():
a0 = constant_op.constant(1)
b0 = constant_op.constant(2)
x0 = constant_op.constant(3)
with ops.control_dependencies([x0.op]):
c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable
control_outputs = ge.util.ControlOutputs(g0).get_all()
self.assertEqual(len(control_outputs), 1)
self.assertEqual(len(control_outputs[x0.op]), 1)
self.assertIs(list(control_outputs[x0.op])[0], c0.op)
subgraph_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
reroute_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a0 = constant_op.constant(1.0, shape=[2], name="a0")
self.b0 = constant_op.constant(2.0, shape=[2], name="b0")
self.c0 = math_ops.add(self.a0, self.b0, name="c0")
self.a1 = constant_op.constant(3.0, shape=[2], name="a1")
self.b1 = constant_op.constant(4.0, shape=[2], name="b1")
self.c1 = math_ops.add(self.a1, self.b1, name="c1")
self.a2 = constant_op.constant(3.0, shape=[3], name="a2")
self.b2 = constant_op.constant(4.0, shape=[3], name="b2")
self.c2 = math_ops.add(self.a2, self.b2, name="c2")
stochastic_graph_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma))
likelihood = st.StochasticTensor(
distributions.Normal(
loc=prior, scale=sigma))
self.assertEqual(
prior.distribution.reparameterization_type,
distributions.FULLY_REPARAMETERIZED)
self.assertEqual(
likelihood.distribution.reparameterization_type,
distributions.FULLY_REPARAMETERIZED)
loss = math_ops.square(array_ops.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = math_ops.reduce_sum(loss)
surrogate_loss = sg.surrogate_loss([loss])
with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"):
_ = sg.surrogate_loss([sum_loss])
surrogate_from_both = sg.surrogate_loss(
[loss, sum_loss * array_ops.ones_like(loss)])
# Pathwise derivative terms do not require add'l surrogate loss terms.
with self.test_session() as sess:
self.assertAllClose(*sess.run([loss, surrogate_loss]))
self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both]))