python类reduce_any()的实例源码

model.py 文件源码 项目:densecap-tensorflow 作者: rampage644 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def recall(proposals, proposals_num, ground_truth, ground_truth_num, iou_threshold):
    '''Calculate recall with given IoU threshold

    proposals: N x 4 tensor (N x (y, x, h, w))
    proposals_num: proposals count
    ground_truth: M x 4 tensor (M x (y, x, h, w))
    ground_truth_num: ground truth boxes count
    iou_threshold: float in range [0; 1]

    returns recall
    '''
    # shape is N x M
    iou_metric = iou(ground_truth, ground_truth_num, proposals, proposals_num)
    # shape is M x 1
    true_positives = tf.reduce_sum(
        tf.cast(tf.reduce_any(iou_metric >= iou_threshold, axis=0), tf.float32))
    return true_positives / tf.cast(ground_truth_num, tf.float32)
model.py 文件源码 项目:densecap-tensorflow 作者: rampage644 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def precision(proposals, proposals_num, ground_truth, ground_truth_num, iou_threshold):
    '''Calculate precision with given IoU threshold

    proposals: N x 4 tensor (N x (y, x, h, w))
    proposals_num: proposals count
    ground_truth: M x 4 tensor (M x (y, x, h, w))
    ground_truth_num: ground truth boxes count
    iou_threshold: float in range [0; 1]

    returns precision
    '''
    # shape is N x M
    iou_metric = iou(ground_truth, ground_truth_num, proposals, proposals_num)
    # shape is M x 1
    true_positives = tf.reduce_sum(
        tf.cast(tf.reduce_any(iou_metric >= iou_threshold, axis=1), tf.float32))
    return true_positives / tf.cast(proposals_num, tf.float32)
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
variable_mgr_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def aggregate_gradients_using_copy_with_device_selection(
    benchmark_cnn, tower_grads, use_mean, check_inf_nan):
  """Aggregate gradients, controlling device for the aggregation.

  Args:
    benchmark_cnn: benchmark_cnn class.
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: If true, check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  if benchmark_cnn.local_parameter_device_flag == 'gpu':
    avail_devices = benchmark_cnn.raw_devices
  else:
    avail_devices = [benchmark_cnn.param_server_device]
  agg_grads = []
  has_nan_or_inf_list = []
  for i, single_grads in enumerate(zip(*tower_grads)):
    with tf.device(avail_devices[i % len(avail_devices)]):
      grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
          single_grads, use_mean, check_inf_nan)
      agg_grads.append(grad_and_var)
      has_nan_or_inf_list.append(has_nan_or_inf)
  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None
variable_mgr_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def aggregate_gradients_using_copy_with_variable_colocation(
    tower_grads, use_mean, check_inf_nan):
  """Aggregate gradients, colocating computation with the gradient's variable.

  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients. All variables
      of the same gradient across towers must be the same (that is,
      tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: If true, check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  agg_grads = []
  has_nan_or_inf_list = []
  for single_grads in zip(*tower_grads):
    # Note that each single_grads looks like the following:
    #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
    var = single_grads[0][1]

    for _, v in single_grads:
      assert v == var

    with tf.device(var.device):
      grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
          single_grads, use_mean, check_inf_nan)
      agg_grads.append(grad_and_var)
      has_nan_or_inf_list.append(has_nan_or_inf)

  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None
variable_mgr_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
  """Calculate the average gradient for each shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  agg_grads = []
  has_nan_or_inf_list = []

  for single_grads in zip(*tower_grads):
    grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
        single_grads, use_mean, check_inf_nan)
    agg_grads.append(grad_and_var)
    has_nan_or_inf_list.append(has_nan_or_inf)

  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None
segment.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def segment_argmax(input, segment_ids):
    """Computes row and col indices Tensors of the segment max in the 2D input."""

    with tf.name_scope("segment_argmax"):
        num_partitions = tf.reduce_max(segment_ids) + 1
        is_max = segment_is_max(input, segment_ids)

        # The current is_max could still contain multiple True entries per
        # partition. As long as they are in the same row, that is not a problem.
        # However, we do need to remove duplicate Trues in the same partition
        # in multiple rows.
        # For that, we'll multiply is_max with the row indices + 1 and perform
        # segment_is_max() again.

        rows = tf.shape(input)[0]
        cols = tf.shape(input)[1]
        row_indices = tf.tile(tf.expand_dims(tf.range(rows), 1), [1, cols])
        is_max = segment_is_max(tf.cast(is_max, tf.int32) * (row_indices + 1), segment_ids)

        # Get selected rows and columns
        row_selected = tf.reduce_any(is_max, axis=1)
        row_indices = tf.squeeze(tf.where(row_selected))
        rows_selected = tf.reduce_sum(tf.cast(row_selected, tf.int64))

        # Assert rows_selected is correct & ensure row_indices is always 1D
        with tf.control_dependencies([tf.assert_equal(rows_selected, num_partitions)]):
            row_indices = tf.reshape(row_indices, [-1])

        selected_rows_is_max = tf.gather(is_max, row_indices)
        col_indices = tf.argmax(tf.cast(selected_rows_is_max, tf.int64), axis=1)

        # Pack indices
        return row_indices, col_indices
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).

    Returns an uint8 tensor (0s and 1s).
    '''
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        try:
            # tf.random_poisson is implemented after v1.2
            random_poisson = tf.random_poisson
        except AttributeError:
            # This algorithm to generate random Poisson-distributed numbers is
            # given by Kunth [1]
            # [1]: https://en.wikipedia.org/wiki/
            #      Poisson_distribution#Generating_Poisson-distributed_random_variables
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            static_shape = tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape())
            enlam = tf.exp(-self.rate)
            x = tf.zeros(shape, dtype=self.dtype)
            prod = tf.ones(shape, dtype=self.param_dtype)

            def loop_cond(prod, x):
                return tf.reduce_any(tf.greater_equal(prod, enlam))

            def loop_body(prod, x):
                prod *= tf.random_uniform(tf.shape(prod), minval=0, maxval=1)
                x += tf.cast(tf.greater_equal(prod, enlam), dtype=self.dtype)
                return prod, x

            _, samples = tf.while_loop(
                loop_cond, loop_body, loop_vars=[prod, x],
                shape_invariants=[static_shape, static_shape])

            samples.set_shape(static_shape)
        else:
            samples = random_poisson(self.rate, [n_samples],
                                     dtype=self.param_dtype)
            if self.param_dtype != self.dtype:
                samples = tf.cast(samples, self.dtype)
        return samples
pondering_rnn.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _cond(self, unused_x, unused_cumul_out, unused_prev_state,
            unused_cumul_state, cumul_halting, unused_iteration,
            unused_remainder):
    """The `cond` of the `tf.while_loop`."""
    return tf.reduce_any(cumul_halting < 1)
tensorflow_backend.py 文件源码 项目:keraflow 作者: ipod825 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def any(self, x, axis=None, keepdims=False):
        '''Bitwise reduction (logical OR).

        Returns an uint8 tensor (0s and 1s).
        '''
        x = self.cast(x, tf.bool)
        x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
        return self.cast(x, tf.uint8)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).

    # Arguments
        x: input tensor.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def test_name(self):
    result_lt = ops.reduce_any(self.bool_lt, {'channel'})
    self.assertIn('lt_reduce_any', result_lt.name)
ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test(self):
    result_lt = ops.reduce_any(self.bool_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        tf.reduce_any(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt)
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical OR).

    Returns an uint8 tensor (0s and 1s).
    '''
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
AttentiveACTCell.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, timestep = 0, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            # define within cell constants/ counters used to control while loop for ACTStep
            prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
            prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
            counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
            acc_outputs = tf.zeros_like(state, tf.float32, name="output_accumulator")
            acc_states = tf.zeros_like(state, tf.float32, name="state_accumulator")
            batch_mask = tf.constant(True, tf.bool,[self.batch_size])

            # While loop stops when this predicate is FALSE.
            # Ie all (probability < 1-eps AND counter < N) are false.


            pred = lambda batch_mask,prob_compare,prob,\
                          counter,state,inputs,acc_output,acc_state:\
                tf.reduce_any(
                    tf.logical_and(
                        tf.less(prob_compare,self.one_minus_eps),
                        tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
            _,_,remainders,iterations,_,_,output,next_state = \
                control_flow_ops.while_loop(pred,self.ACTStep,
                [batch_mask,prob_compare,prob,
                 counter,state,inputs, acc_outputs, acc_states])

        #accumulate remainder  and N values
        self.ACT_remainder.append(tf.reduce_mean(1 - remainders))
        self.ACT_iterations.append(tf.reduce_mean(iterations))

        return output, next_state
ACTAttentionModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def do_act_steps(self, premise, hypothesis):


        self.rep_size = premise.get_shape()[-1].value

        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        initial_state = tf.zeros([self.batch_size, 2*self.rep_size], tf.float32, name="state")
        acc_states = tf.zeros([self.batch_size,2*self.rep_size], tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state, premise, hypothesis, acc_states])

        return state, remainders, iterations
ACTDAModel.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations
ACTCell.py 文件源码 项目:act-rte-inference 作者: DeNeutoy 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, timestep = 0, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            # define within cell constants/ counters used to control while loop for ACTStep
            prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
            prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
            counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
            acc_outputs = tf.zeros_like(state, tf.float32, name="output_accumulator")
            acc_states = tf.zeros_like(state, tf.float32, name="state_accumulator")
            batch_mask = tf.constant(True, tf.bool,[self.batch_size])

            # While loop stops when this predicate is FALSE.
            # Ie all (probability < 1-eps AND counter < N) are false.

            #x = self.ACTStep(batch_mask,prob_compare,prob,counter,state,inputs,acc_outputs,acc_states)


            pred = lambda batch_mask,prob_compare,prob,\
                          counter,state,input,acc_output,acc_state:\
                tf.reduce_any(
                    tf.logical_and(
                        tf.less(prob_compare,self.one_minus_eps),
                        tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
            _,_,remainders,iterations,_,_,output,next_state = \
                control_flow_ops.while_loop(pred,self.ACTStep,
                [batch_mask,prob_compare,prob,
                 counter,state,inputs, acc_outputs, acc_states])

        #accumulate remainder  and N values
        self.ACT_remainder.append(tf.reduce_mean(1 - remainders))
        self.ACT_iterations.append(tf.reduce_mean(iterations))

        return output, next_state
layers.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
    elif data_shape_size == 2:
        return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
layers.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def target_mask_op(data, pad_val=0):        # HangSheng: return tensor for mask,if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))


# Dynamic RNN
layers.py 文件源码 项目:tensorlayer-chinese 作者: shorxp 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
    elif data_shape_size == 2:
        return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
layers.py 文件源码 项目:tensorlayer-chinese 作者: shorxp 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def target_mask_op(data, pad_val=0):        # HangSheng: return tensor for mask,if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))


# Dynamic RNN
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).

    # Arguments
        x: input tensor.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
layers.py 文件源码 项目:dcgan 作者: zsdonghao 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
    elif data_shape_size == 2:
        return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
layers.py 文件源码 项目:dcgan 作者: zsdonghao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def target_mask_op(data, pad_val=0):        # HangSheng: return tensor for mask,if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))


# Dynamic RNN
bytenet.py 文件源码 项目:master-thesis 作者: AndreasMadsen 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sample_inference_model(self,
                               source: tf.Tensor, length: tf.Tensor,
                               samples=1,
                               reuse: bool=False) -> tf.Tensor:
        x = tf.cast(source, tf.int32)

        logprops, labels = bytenet_sampling_translator(
            x,
            beam_size=samples,
            **self._parameters,
            name="bytenet-model",
            reuse=reuse
        )

        # check if <eos> exists in each sequence
        # eos_found.shape = (batch, beam)
        eos_found = tf.reduce_any(tf.equal(labels, 1), axis=2)
        # set properbility to something very small if <eos> was not found
        # log(epsilon) = -1e9
        log_eps = tf.constant(-1e9, dtype=logprops.dtype)
        logprops = tf.where(eos_found,
                            logprops,
                            tf.fill(tf.shape(logprops), log_eps))

        # sort by logprops
        _, indices = tf.nn.top_k(logprops, k=samples, sorted=True)
        labels = batch_beam_gather(labels, indices)

        return tf.cast(labels, source.dtype)
tensorflow_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).

    # Arguments
        x: Tensor or variable.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    return tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
act_cell.py 文件源码 项目:Video-Captioning 作者: hehefan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    with vs.variable_scope(scope or type(self).__name__):
      # define within cell constants/ counters used to control while loop for ACTStep
      if self.state_is_tuple:
        state = array_ops.concat(1, state)

      self.batch_size = tf.shape(inputs)[0]
      self.one_minus_eps = tf.fill([self.batch_size], tf.constant(1.0 - self.epsilon, dtype=tf.float32))
      prob = tf.fill([self.batch_size], tf.constant(0.0, dtype=tf.float32), "prob")
      counter = tf.zeros_like(prob, tf.float32, name="counter")
      acc_outputs = tf.fill([self.batch_size, self.output_size], 0.0, name='output_accumulator')
      acc_states = tf.zeros_like(state, tf.float32, name="state_accumulator")
      flag = tf.fill([self.batch_size], True, name="flag")

      pred = lambda flag, prob, counter, state, inputs, acc_outputs, acc_states: tf.reduce_any(flag)

      _, probs, iterations, _, _, output, next_state = control_flow_ops.while_loop(pred, self.act_step, loop_vars=[flag, prob, counter, state, inputs, acc_outputs, acc_states])

    self.ACT_remainder.append(1 - probs)
    self.ACT_iterations.append(iterations)

    if self.state_is_tuple:
      next_c, next_h = array_ops.split(1, 2, next_state)
      next_state = rnn_cell._LSTMStateTuple(next_c, next_h)

    return output, next_state


问题


面经


文章

微信
公众号

扫码关注公众号