python类reduce_all()的实例源码

metrics.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def categorical_accuracy_with_variable_timestep(y_true, y_pred):
    # Actually discarding is not needed if the dummy is an all-zeros array
    # (It is indeed encoded in an all-zeros array by
    # CaptionPreprocessing.preprocess_batch)
    y_true = y_true[:, :-1, :]  # Discard the last timestep/word (dummy)
    y_pred = y_pred[:, :-1, :]  # Discard the last timestep/word (dummy)

    # Flatten the timestep dimension
    shape = tf.shape(y_true)
    y_true = tf.reshape(y_true, [-1, shape[-1]])
    y_pred = tf.reshape(y_pred, [-1, shape[-1]])

    # Discard rows that are all zeros as they represent dummy or padding words.
    is_zero_y_true = tf.equal(y_true, 0)
    is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1)
    y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true)
    y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1),
                                               tf.argmax(y_pred, axis=1)),
                                      dtype=tf.float32))
    return accuracy


# As Keras stores a function's name as its metric's name
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def preturn_network(rewards, discounts, values):
  # First reward must be zero, first discount must be one
  first_reward = tf.Assert(
      tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
  first_discount = tf.Assert(
      tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])

  with tf.control_dependencies([first_reward, first_discount]):
    with tf.variable_scope('preturn'):
      accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
      accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
      discounted_values = values * accum_value_discounts
      discounted_rewards = rewards * accum_reward_discounts
      cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
      preturns = cumulative_rewards + discounted_values

      util.activation_summary(preturns)
      return preturns
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def preturn_network(rewards, discounts, values):
  # First reward must be zero, first discount must be one
  first_reward = tf.Assert(
      tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
  first_discount = tf.Assert(
      tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])

  with tf.control_dependencies([first_reward, first_discount]):
    with tf.variable_scope('preturn'):
      accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
      accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
      discounted_values = values * accum_value_discounts
      discounted_rewards = rewards * accum_reward_discounts
      cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
      preturns = cumulative_rewards + discounted_values

      util.activation_summary(preturns)
      return preturns
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical AND).

    Returns an uint8 tensor
    '''
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
renderer.py 文件源码 项目:tf.rasterizer 作者: vahidk 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def barycentric(verts, p):
    ab = verts[2] - verts[0]
    ac = verts[1] - verts[0]
    pa = verts[0] - p
    u = utils.tri_cross(
        [ab[0], ac[0], pa[:, 0]],
        [ab[1], ac[1], pa[:, 1]])
    v = [u[0] / u[2], u[1] / u[2]]
    bc = [1. - v[0] - v[1], v[1], v[0]]
    valid = tf.logical_and(
        tf.abs(u[2]) >= 1.0,
        tf.reduce_all(tf.stack(bc, axis=1) >= 0, axis=1))
    return bc, valid
metrics.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def per_sentence_accuracy(targets, predictions, weights=None):
    """Computes the per-sentence accuracy.

    Given a set of ground truth values and a set of predicted labels as tensors of
    the same shape, it returns a tensor of rank equal to the ground truth values
    tensor minus 1, with values 1.0 if all the predicted values along the -1 axis
    are correct, 0.0 otherwise. So, if the grount truth is [[1, 2, 3], [0, 9, 23]]
    and the predicted labels are [[1, 2, 3], [9, 0, 23]] the result will be: [1,0].

    Arguments:
      target: the gold truth values `Tensor`, with `tf.int32` as `dtype`. It has rank
        `[d_0, d_1, ..., d_{r-1}]` and the last value is supposed to range between
        `0` and `num_classes - 1`, where `num_classes` is the number of possible classes.
      predictions: the predicted values `Tensor` with `tf.float32` as `dtype`. It can
        have shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` and
        represents the probability distribution across the output classes generated by
        the model -- so that the predicted label is the one coming from argmax over the
        last dimension. Alternatively it can be of the same shape, `dtype` and format of
        `target`, and it will considered as the predicted labels.
      weights: coefficients for the metric. This must be scalar or of same rank as `target`.

    Returns:
      values: a `Tensor` of `dtype=tf.float32` and of [d_0, d_1, ..., d_{r-2}]
        representing the accuracy per sentence, i.e. for all the elements of the
        -1 axis, weighted according to the input argument `weights`
      weights: a `Tensor` of `dtype=tf.float32` and of the same shape of `values`
        representing the weighted scheme for the streaming average on `values`, which
        is the same tensor of the input `weights` argument.
    """
    values, weights = accuracy(targets, predictions, weights)
    values = tf.cast(values, tf.bool)
    if weights is not None:
        weights = tf.cast(weights, tf.bool)
        values = ops.logical_impl(weights, values)
    values = tf.reduce_all(values, axis=-1)
    return tf.cast(values, tf.float32), tf.cast(tf.ones_like(values), tf.float32)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).

    # Arguments
        x: input tensor.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
hmc.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _leapfrog_step(xs, ps, epsilon, max_iterations, logprob_grads_fn):
    def update_xs(ps_values):
        return _map(lambda x, p: x.assign_add(epsilon * p), xs, ps_values)

    def whether_proceed(grads):
        finits = _map(lambda grad: tf.reduce_all(tf.is_finite(grad)), grads)
        return tf.reduce_all(finits)

    def cond(i, proceed, _ps, _xs):
        return tf.logical_and(proceed, i < max_iterations)

    def body(i, _proceed, ps, _xs):
        xs_new = update_xs(ps)
        with tf.control_dependencies(xs_new):
            _, grads = logprob_grads_fn()
            proceed = whether_proceed(grads)
            def ps_step():
                with tf.control_dependencies(grads):
                    return _update_ps(ps, grads, epsilon)
            def ps_no_step():
                with tf.control_dependencies(grads):
                    return ps

            ps_new = tf.cond(proceed, ps_step, ps_no_step, strict=True)
            return i + 1, proceed, ps_new, xs_new

    result = _while_loop(cond, body, [0, True, ps, xs])

    _i, proceed_out, ps_out, xs_out = result
    deps = _flat([proceed_out], ps_out, xs_out)
    with tf.control_dependencies(deps):
        logprob_out, grads_out = logprob_grads_fn()
        return proceed_out, xs_out, ps_out, logprob_out, grads_out
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def initialize(self, name=None):
        with tf.name_scope(name, "TrainingHelperInitialize"):
            finished = tf.equal(0, self._sequence_length)
            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: self._zero_inputs,
                lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
            return (finished, next_inputs)
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
        """next_inputs_fn for TrainingHelper."""
        with tf.name_scope(name, "TrainingHelperNextInputs",
                           [time, outputs, state]):
            next_time = time + 1
            finished = (next_time >= self._sequence_length)
            all_finished = tf.reduce_all(finished)

            def read_from_ta(inp):
                return inp.read(next_time)
            next_inputs = tf.cond(
                all_finished, lambda: self._zero_inputs,
                lambda: nest.map_structure(read_from_ta, self._input_tas))
            return (finished, next_inputs, state)
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        with tf.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                           [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""
                where_sampling = tf.cast(
                    tf.where(sample_ids > -1), tf.int32)
                where_not_sampling = tf.cast(
                    tf.where(sample_ids <= -1), tf.int32)
                where_sampling_flat = tf.reshape(where_sampling, [-1])
                where_not_sampling_flat = tf.reshape(
                    where_not_sampling, [-1])
                sample_ids_sampling = tf.gather(
                    sample_ids, where_sampling_flat)
                inputs_not_sampling = tf.gather(
                    base_next_inputs, where_not_sampling_flat)
                sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
                base_shape = tf.shape(base_next_inputs)
                return (tf.scatter_nd(indices=where_sampling,
                                      updates=sampled_next_inputs,
                                      shape=base_shape)
                        + tf.scatter_nd(indices=where_not_sampling,
                                        updates=inputs_not_sampling,
                                        shape=base_shape))

            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_name(self):
    result_lt = ops.reduce_all(self.bool_lt, {'channel'})
    self.assertIn('lt_reduce_all', result_lt.name)
ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test(self):
    result_lt = ops.reduce_all(self.bool_lt, {'channel'})
    golden_lt = core.LabeledTensor(
        tf.reduce_all(self.bool_tensor, 1), [self.a0, self.a2, self.a3])
    self.assertLabeledTensorsEqual(result_lt, golden_lt)
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical AND).

    Returns an uint8 tensor
    '''
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
pix2pix_model.py 文件源码 项目:sciencebeam-gym 作者: elifesciences 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def colors_to_dimensions(image_tensor, colors):
  logger = get_logger()
  single_label_tensors = []
  for single_label_color in colors:
    is_color = tf.reduce_all(
      tf.equal(image_tensor, single_label_color),
      axis=-1
    )
    single_label_tensor = tf.where(
      is_color,
      tf.fill(is_color.shape, 1.0),
      tf.fill(is_color.shape, 0.0)
    )
    single_label_tensors.append(single_label_tensor)
  return tf.stack(single_label_tensors, axis=-1)
pix2pix_model.py 文件源码 项目:sciencebeam-gym 作者: elifesciences 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def replace_black_with_white_color(image_tensor):
  is_black = tf.reduce_all(
  tf.equal(image_tensor, (0, 0, 0)),
    axis=-1
  )
  is_black = tf.stack([is_black] * 3, axis=-1)
  return tf.where(
    is_black,
    255 * tf.ones_like(image_tensor),
    image_tensor
  )
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def lambda_preturn_network(preturns, lambdas):
  # Final lamdba must be zero
  final_lambda = tf.Assert(
      tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])

  with tf.control_dependencies([final_lambda]):
    with tf.variable_scope('lambda_preturn'):
      accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
      lambda_bar = (1 - lambdas) * accum_lambda  # This should always sum to 1
      lambda_preturn = tf.reduce_sum(
          lambda_bar * preturns, reduction_indices=1)

      util.activation_summary(lambda_preturn)
      return lambda_preturn
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def lambda_preturn_network(preturns, lambdas):
  # Final lamdba must be zero
  final_lambda = tf.Assert(
      tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])

  with tf.control_dependencies([final_lambda]):
    with tf.variable_scope('lambda_preturn'):
      accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
      lambda_bar = (1 - lambdas) * accum_lambda  # This should always sum to 1
      lambda_preturn = tf.reduce_sum(
          lambda_bar * preturns, reduction_indices=1)

      util.activation_summary(lambda_preturn)
      return lambda_preturn
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).

    # Arguments
        x: input tensor.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
tensorflow_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    """Bitwise reduction (logical AND).

    # Arguments
        x: Tensor or variable.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    return tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
tensorflow_backend.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def all(x, axis=None, keepdims=False):
    '''Bitwise reduction (logical AND).

    Returns an uint8 tensor
    '''
    axis = _normalize_axis(axis, ndim(x))
    x = tf.cast(x, tf.bool)
    x = tf.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
    return tf.cast(x, tf.uint8)
helpers.py 文件源码 项目:tacotron 作者: keithito 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    '''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.'''
    with tf.name_scope('TacoTestHelper'):
      finished = tf.reduce_all(tf.equal(outputs, self._end_token), axis=1)
      # Feed last output frame as next input. outputs is [N, output_dim * r]
      next_inputs = outputs[:, -self._output_dim:]
      return (finished, next_inputs, state)
ops.py 文件源码 项目:tfdeploy 作者: riga 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_All(self):
        t = tf.reduce_all(self.random(3, 4, 5), reduction_indices=[0, 1], keep_dims=True)
        self.check(t)
        if td._tf_version[:3] >= (0, 12, 0):
            t = tf.reduce_all(self.random(3, 4, 5), axis=[0, 1], keep_dims=True)
            self.check(t)
.~Seq2Seq_model_for_TextSummarizer-600L.py 文件源码 项目:text_summarizer 作者: sayondutta 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loop_fn_transition(time,previous_output,previous_state,previous_loop_state):
    #print time
    elements_finished = (time >= decoder_lengths)
    def next_input():
        prev_out_with_weights = tf.matmul(previous_output,w['score'])
        prev_out_with_weights = tf.reshape(prev_out_with_weights,[-1,final_hidden_units,1])
        score = tf.matmul(encoder_outputs,prev_out_with_weights)
        score = tf.reshape(score,[-1,num_steps])
        attention = tf.nn.softmax(score)
        attention = tf.reshape(attention,[-1,1,num_steps])
        ct = tf.matmul(attention,encoder_outputs)
        ct = tf.reshape(ct,[-1,final_hidden_units])
        ctht = tf.concat((ct,previous_output),1)
        ht_dash = tf.nn.tanh(tf.add(tf.matmul(ctht,w['hdash']),b['hdash']))
        pred = tf.nn.softmax(tf.add(tf.matmul(ctht,w['decoder']),b['decoder']))
        prediction = tf.argmax(pred,axis=1)
        inputn = tf.nn.embedding_lookup(embeddings,prediction)
        return inputn
    finished = tf.reduce_all(elements_finished)
    next_input = tf.cond(finished,lambda:pad_embedded,next_input)
    state = previous_state
    output = previous_output
    #print output.shape
    loop_state = None
    return (elements_finished,
            next_input,
            state,
            output,
            loop_state)


# In[31]:
Seq2Seq_model_for_TextSummarizer-600L.py 文件源码 项目:text_summarizer 作者: sayondutta 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def loop_fn_transition(time,previous_output,previous_state,previous_loop_state):
    #print time
    elements_finished = (time >= decoder_lengths)
    def next_input():
        prev_out_with_weights = tf.matmul(previous_output,w['score'])
        prev_out_with_weights = tf.reshape(prev_out_with_weights,[-1,final_hidden_units,1])
        score = tf.matmul(encoder_outputs,prev_out_with_weights)
        score = tf.reshape(score,[-1,num_steps])
        attention = tf.nn.softmax(score)
        attention = tf.reshape(attention,[-1,1,num_steps])
        ct = tf.matmul(attention,encoder_outputs)
        ct = tf.reshape(ct,[-1,final_hidden_units])
        ctht = tf.concat((ct,previous_output),1)
        ht_dash = tf.nn.tanh(tf.add(tf.matmul(ctht,w['hdash']),b['hdash']))
        pred = tf.nn.softmax(tf.add(tf.matmul(ctht,w['decoder']),b['decoder']))
        prediction = tf.argmax(pred,axis=1)
        inputn = tf.nn.embedding_lookup(embeddings,prediction)
        return inputn
    finished = tf.reduce_all(elements_finished)
    next_input = tf.cond(finished,lambda:pad_embedded,next_input)
    state = previous_state
    output = previous_output
    #print output.shape
    loop_state = None
    return (elements_finished,
            next_input,
            state,
            output,
            loop_state)


# In[31]:
captcha_model.py 文件源码 项目:CAPTCHA 作者: zakizhou 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def evaluation(logits, labels):
    prediction = tf.argmax(logits, 2)
    prediction = tf.cast(prediction, tf.int32)
    equal = tf.equal(prediction, labels)
    equal_all = tf.reduce_all(equal, axis=1)
    accuracy = tf.reduce_mean(tf.cast(equal_all, tf.float32), name="accuracy")
    return accuracy
seq2seq.py 文件源码 项目:website-fingerprinting 作者: AxelGoetz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _init_decoder(self):
        """
        Creates decoder attributes.
        We cannot simply use a dynamic_rnn since we are feeding the outputs of the
        decoder back into the inputs.
        Therefore we use a raw_rnn and emulate a dynamic_rnn with this behavior.
        (https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py)
        """
        # EOS token added
        self.decoder_inputs_length = self.encoder_inputs_length + 1

        def loop_fn_initial(time, cell_output, cell_state, loop_state):
            elements_finished = (time >= self.decoder_inputs_length)

            # EOS token (0 + self.EOS)
            initial_input = tf.zeros([self.batch_size, self.decoder_cell.output_size], dtype=tf.float32) + self.EOS
            initial_cell_state = self.encoder_final_state
            initial_loop_state = None  # we don't need to pass any additional information

            return (elements_finished,
                    initial_input,
                    initial_cell_state,
                    None,  # cell output is dummy here
                    initial_loop_state)

        def loop_fn(time, cell_output, cell_state, loop_state):
            if cell_output is None:  # time == 0
                return loop_fn_initial(time, cell_output, cell_state, loop_state)

            cell_output.set_shape([self.batch_size, self.decoder_cell.output_size])

            emit_output = cell_output

            next_cell_state = cell_state

            elements_finished = (time >= self.decoder_inputs_length)
            finished = tf.reduce_all(elements_finished)

            next_input = tf.cond(
                finished,
                lambda: tf.zeros([self.batch_size, self.decoder_cell.output_size], dtype=tf.float32), # self.PAD
                lambda: cell_output # Use the input from the previous cell
            )

            next_loop_state = None

            return (
                elements_finished,
                next_input,
                next_cell_state,
                emit_output,
                next_loop_state
            )

        decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(self.decoder_cell, loop_fn)
        self.decoder_outputs = decoder_outputs_ta.stack()
        self.decoder_outputs = tf.transpose(self.decoder_outputs, [1, 0, 2])

        with tf.variable_scope('DecoderOutputProjection') as scope:
            self.decoder_outputs = self.projection(self.decoder_outputs, self.seq_width, scope)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _log_prob(self, given):
        logits = self.logits

        def _broadcast(given, logits):
            # static shape has been checked in base class.
            ones_ = tf.ones(tf.shape(logits)[:-1], self.dtype)
            if logits.get_shape():
                ones_.set_shape(logits.get_shape()[:-1])
            given *= ones_
            logits *= tf.ones_like(tf.expand_dims(given, -1), self.param_dtype)
            return given, logits

        def _is_same_dynamic_shape(given, logits):
            return tf.cond(
                tf.equal(tf.rank(given), tf.rank(logits) - 1),
                lambda: tf.reduce_all(tf.equal(
                    tf.concat([tf.shape(given), tf.shape(logits)[:-1]], 0),
                    tf.concat([tf.shape(logits)[:-1], tf.shape(given)], 0))),
                lambda: tf.convert_to_tensor(False, tf.bool))

        if not (given.get_shape() and logits.get_shape()):
            given, logits = _broadcast(given, logits)
        else:
            if given.get_shape().ndims != logits.get_shape().ndims - 1:
                given, logits = _broadcast(given, logits)
            elif given.get_shape().is_fully_defined() and \
                    logits.get_shape()[:-1].is_fully_defined():
                if given.get_shape() != logits.get_shape()[:-1]:
                    given, logits = _broadcast(given, logits)
            else:
                # Below code seems to induce a BUG when this function is
                # called in HMC. Probably due to tensorflow's not supporting
                # control flow edge from an op inside the body to outside.
                # We should further fix this.
                #
                # given, logits = tf.cond(
                #     is_same_dynamic_shape(given, logits),
                #     lambda: (given, logits),
                #     lambda: _broadcast(given, logits, 'given', 'logits'))
                given, logits = _broadcast(given, logits)

        # `labels` type of `sparse_softmax_cross_entropy_with_logits` must be
        # int32 or int64
        if self.dtype == tf.float32:
            given = tf.cast(given, dtype=tf.int32)
        elif self.dtype == tf.float64:
            given = tf.cast(given, dtype=tf.int64)
        elif self.dtype not in [tf.int32, tf.int64]:
            given = tf.cast(given, tf.int32)
        log_p = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=given,
                                                                logits=logits)
        if given.get_shape() and logits.get_shape():
            log_p.set_shape(tf.broadcast_static_shape(given.get_shape(),
                                                      logits.get_shape()[:-1]))
        return log_p
saliency_map.py 文件源码 项目:tensorflow-adversarial 作者: gongzhitaao 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _jsma_impl(model, x, yind, epochs, eps, clip_min, clip_max, score_fn):

    def _cond(i, xadv):
        return tf.less(i, epochs)

    def _body(i, xadv):
        ybar = model(xadv)

        dy_dx = tf.gradients(ybar, xadv)[0]

        # gradients of target w.r.t input
        yt = tf.gather_nd(ybar, yind)
        dt_dx = tf.gradients(yt, xadv)[0]

        # gradients of non-targets w.r.t input
        do_dx = dy_dx - dt_dx

        c0 = tf.logical_or(eps < 0, xadv < clip_max)
        c1 = tf.logical_or(eps > 0, xadv > clip_min)
        cond = tf.reduce_all([dt_dx >= 0, do_dx <= 0, c0, c1], axis=0)
        cond = tf.to_float(cond)

        # saliency score for each pixel
        score = cond * score_fn(dt_dx, do_dx)

        shape = score.get_shape().as_list()
        dim = _prod(shape[1:])
        score = tf.reshape(score, [-1, dim])

        # find the pixel with the highest saliency score
        ind = tf.argmax(score, axis=1)
        dx = tf.one_hot(ind, dim, on_value=eps, off_value=0.0)
        dx = tf.reshape(dx, [-1] + shape[1:])

        xadv = tf.stop_gradient(xadv + dx)
        xadv = tf.clip_by_value(xadv, clip_min, clip_max)

        return i+1, xadv

    _, xadv = tf.while_loop(_cond, _body, (0, tf.identity(x)),
                            back_prop=False, name='_jsma_batch')

    return xadv


问题


面经


文章

微信
公众号

扫码关注公众号