def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with miminum values of `x`.
"""
axis = _normalize_axis(axis, ndim(x))
return math_ops.reduce_min(x, reduction_indices=axis, keep_dims=keepdims)
python类reduce_min()的实例源码
ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def test_name(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
self.assertIn('lt_reduce_min', result_lt.name)
ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def test(self):
result_lt = ops.reduce_min(self.original_lt, {'channel'})
golden_lt = core.LabeledTensor(
math_ops.reduce_min(self.original_lt.tensor, 1),
[self.a0, self.a2, self.a3])
self.assertLabeledTensorsEqual(result_lt, golden_lt)
reduce_ops_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def testReduceMin(self):
def reference_min(inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.amin(inp, axis)
self._testReduction(math_ops.reduce_min, reference_min, np.float32,
self.FLOAT_DATA)
def seq_labeling_decoder_linear(decoder_inputs, num_decoder_symbols,
scope=None, sequence_length=None, dtype=tf.float32):
with tf.variable_scope(scope or "non-attention_RNN"):
decoder_outputs = list()
# copy over logits once out of sequence_length
if decoder_inputs[0].get_shape().ndims != 1:
(fixed_batch_size, output_size) = decoder_inputs[0].get_shape().with_rank(2)
else:
fixed_batch_size = decoder_inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = tf.shape(decoder_inputs[0])[0]
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length is not None: # Prepare variables
zero_logit = tf.zeros(
tf.stack([batch_size, num_decoder_symbols]), decoder_inputs[0].dtype)
zero_logit.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, num_decoder_symbols]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(decoder_inputs):
# if time == 0:
# hidden_state = zero_state(num_decoder_symbols, batch_size)
if time > 0: tf.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
# call_cell = lambda: cell(input_, state)
generate_logit = lambda: _linear(decoder_inputs[time], num_decoder_symbols, True)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
logit = _step(
time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit)
else:
logit = generate_logit
decoder_outputs.append(logit)
return decoder_outputs
def generate_sequence_output(encoder_outputs,
encoder_state,
num_decoder_symbols,
sequence_length,
num_heads=1,
dtype=dtypes.float32,
use_attention=True,
loop_function=None,
scope=None,
DNN_at_output=False,
forward_only=False):
with variable_scope.variable_scope(scope or "non-attention_RNN"):
attention_encoder_outputs = list()
sequence_attention_weights = list()
# copy over logits once out of sequence_length
if encoder_outputs[0].get_shape().ndims != 1:
(fixed_batch_size, output_size) = encoder_outputs[0].get_shape().with_rank(2)
else:
fixed_batch_size = encoder_outputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(encoder_outputs[0])[0]
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length is not None: # Prepare variables
zero_logit = array_ops.zeros(
array_ops.pack([batch_size, num_decoder_symbols]), encoder_outputs[0].dtype)
zero_logit.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, num_decoder_symbols]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(encoder_outputs):
if time > 0: variable_scope.get_variable_scope().reuse_variables()
if not DNN_at_output:
generate_logit = lambda: linear_transformation(encoder_outputs[time], output_size, num_decoder_symbols)
else:
generate_logit = lambda: multilayer_perceptron(encoder_outputs[time], output_size, 200, num_decoder_symbols, forward_only=forward_only)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
logit = _step(
time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit)
else:
logit = generate_logit
attention_encoder_outputs.append(logit)
if DNN_at_output:
regularizers = get_multilayer_perceptron_regularizers()
else:
regularizers = get_linear_transformation_regularizers()
return attention_encoder_outputs, sequence_attention_weights, regularizers