python类add_n()的实例源码

trainer.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
        self.var_list = model.get_var_list()
        self.global_step = model.get_global_step()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/gpu:{}".format(gpu_idx)):
                loss = model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
model.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _build_loss(self):
        config = self.config
        JX = tf.shape(self.x)[2]
        M = tf.shape(self.x)[1]
        JQ = tf.shape(self.q)[1]
        loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
        losses = tf.nn.softmax_cross_entropy_with_logits(
            self.logits, tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
        ce_loss = tf.reduce_mean(loss_mask * losses)
        tf.add_to_collection('losses', ce_loss)
        ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            self.logits2, tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')))
        tf.add_to_collection("losses", ce_loss2)

        self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
        tf.scalar_summary(self.loss.op.name, self.loss)
        tf.add_to_collection('ema/scalar', self.loss)
trainer.py 文件源码 项目:bi-att-flow 作者: allenai 项目源码 文件源码 阅读 85 收藏 0 点赞 0 评论 0
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
        self.var_list = model.get_var_list()
        self.global_step = model.get_global_step()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
                loss = model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
model.py 文件源码 项目:text-classification2 作者: yuhui-lin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]
    Returns:
        Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits,
        labels,
        name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
model.py 文件源码 项目:text-classification2 作者: yuhui-lin 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]
    Returns:
        Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits,
        labels,
        name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
yellowfin.py 文件源码 项目:YellowFin 作者: JianGoForIt 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def grad_variance(self):
    grad_var_ops = []
    tensor_to_avg = []
    for t, g in zip(self._tvars, self._grads):
      if isinstance(g, ops.IndexedSlices):
        tensor_to_avg.append(
          tf.reshape(tf.unsorted_segment_sum(
            g.values, g.indices, g.dense_shape[0]),
            shape=t.get_shape()))
      else:
        tensor_to_avg.append(g)
    avg_op = self._moving_averager.apply(tensor_to_avg)
    grad_var_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_avg = [
        self._moving_averager.average(val) for val in tensor_to_avg]
      self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]
    self._grad_var = tf.maximum(
      tf.constant(EPS, dtype=self._grad_norm_squared_avg.dtype),
      self._grad_norm_squared_avg
      - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared] ) )
    if self._sparsity_debias:
      self._grad_var *= self._sparsity_avg
    return grad_var_ops
resdeconv_model.py 文件源码 项目:traffic_video_analysis 作者: polltooh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(infer, count_diff_infer, label):
    l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(infer - label), [1,2,3]), name = 'l2_loss')
    #l2_loss = mf.huber_loss(tf.reduce_sum(infer, [1,2,3]), tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'density_loss')

    huber_epsilon = 5.0
    c_lambda = 0.1
    count_infer = tf.add(tf.squeeze(count_diff_infer), tf.reduce_sum(infer, [1,2,3]), name = "count_infer")
    count_loss = tf.mul(c_lambda, mf.huber_loss(count_infer, tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'huber_loss'),
                name = 'count_loss')
    #count_loss = tf.mul(c_lambda, tf.reduce_mean(tf.square(count_infer - tf.reduce_sum(label, [1,2,3]))),
                    #name = 'count_loss')

    tf.add_to_collection('losses', count_loss)
    tf.add_to_collection('losses', l2_loss)

    return tf.add_n(tf.get_collection('losses'), name = 'total_loss'), count_infer
training_q.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _setup_classification_predictions_and_loss(self):
        self.inputs, self.target = self.input_queue.dequeue()
        self.num_batch_elems = tf.size(self.target)
        self.inputs = tf.reshape(self.inputs, self.input_shape)
        self.training_end_points = self.model(is_training=True, reuse=None, inputs=self.inputs)
        training_logits, self.training_predictions = self.training_end_points['logits'], self.training_end_points[
            'predictions']

        self.validation_end_points = self.model(is_training=False, reuse=True, inputs=self.inputs)
        validation_logits, self.validation_predictions = self.validation_end_points['logits'], \
                                                         self.validation_end_points[
                                                             'predictions']
        with tf.name_scope('loss'):
            training_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=training_logits, labels=self.target))

            self.validation_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=validation_logits, labels=self.target))

            l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            self.regularized_training_loss = training_loss + l2_loss * self.cnf.get('l2_reg', 0.0)
training_q.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _setup_regression_predictions_and_loss(self):
        self.inputs, self.target = self.input_queue.dequeue()
        self.num_batch_elems = tf.size(self.target)
        self.inputs = tf.reshape(self.inputs, self.input_shape)
        self.training_end_points = self.model(is_training=True, reuse=None, inputs=self.inputs)
        self.training_predictions = self.training_end_points['predictions']
        self.validation_end_points = self.model(is_training=False, reuse=True, inputs=self.inputs)
        self.validation_predictions = self.validation_end_points['predictions']

        with tf.name_scope('loss'):
            training_loss = tf.reduce_mean(
                tf.square(tf.subtract(self.training_predictions, self.target)))

            self.validation_loss = tf.reduce_mean(
                tf.square(tf.subtract(self.validation_predictions, self.target)))

            l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            self.regularized_training_loss = training_loss + l2_loss * self.cnf.get('l2_reg', 0.0)
training.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _setup_classification_predictions_and_loss(self):
        self.training_end_points = self.model(is_training=True, reuse=None)
        self.inputs = self.training_end_points['inputs']
        training_logits, self.training_predictions = self.training_end_points['logits'], self.training_end_points[
            'predictions']
        self.validation_end_points = self.model(is_training=False, reuse=True)
        self.validation_inputs = self.validation_end_points['inputs']
        validation_logits, self.validation_predictions = self.validation_end_points['logits'], \
                                                         self.validation_end_points[
                                                             'predictions']
        with tf.name_scope('predictions'):
            self.target = tf.placeholder(tf.int32, shape=(None,), name='target')
        with tf.name_scope('loss'):
            training_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=training_logits, labels=self.target))

            self.validation_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=validation_logits, labels=self.target))

            l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            self.regularized_training_loss = training_loss + l2_loss * self.cnf.get('l2_reg', 0.0)
training.py 文件源码 项目:tefla 作者: litan 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _setup_regression_predictions_and_loss(self):
        self.training_end_points = self.model(is_training=True, reuse=None)
        self.inputs = self.training_end_points['inputs']
        self.training_predictions = self.training_end_points['predictions']
        self.validation_end_points = self.model(is_training=False, reuse=True)
        self.validation_inputs = self.validation_end_points['inputs']
        self.validation_predictions = self.validation_end_points['predictions']
        with tf.name_scope('predictions'):
            self.target = tf.placeholder(tf.float32, shape=(None, 1), name='target')
        with tf.name_scope('loss'):
            training_loss = tf.reduce_mean(
                tf.square(tf.subtract(self.training_predictions, self.target)))

            self.validation_loss = tf.reduce_mean(
                tf.square(tf.subtract(self.validation_predictions, self.target)))

            l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            self.regularized_training_loss = training_loss + l2_loss * self.cnf.get('l2_reg', 0.0)
second_order.py 文件源码 项目:tensorflow-forward-ad 作者: renmengye 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def fisher_vec_bk(ys, xs, vs):
  """Implements Fisher vector product using backward AD.

  Args:
    ys: Loss function, scalar.
    xs: Weights, list of tensors.
    vs: List of tensors to multiply, for each weight tensor.

  Returns:
    J'Jv: Fisher vector product.
  """
  # Validate the input
  if type(xs) == list:
    if len(vs) != len(xs):
      raise ValueError("xs and vs must have the same length.")

  grads = tf.gradients(ys, xs, gate_gradients=True)
  gradsv = list(map(lambda x: tf.reduce_sum(x[0] * x[1]), zip(grads, vs)))
  jv = tf.add_n(gradsv)
  jjv = list(map(lambda x: x * jv, grads))
  return jjv
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def total_loss_sum(losses):
    '''

    Adds L2 regularization loss to the given list of losses

    Parameters
    ----------
    losses : list
        List of losses

    Returns
    -------
    total_loss: float
        L2 regularized loss


    '''
    # Assemble all of the losses for the current tower only.
    # Calculate the total loss for the current tower.
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
    return total_loss
metrics.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
hourglass_tiny.py 文件源码 项目:hourglasstensorlfow 作者: wbenbihi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _attention_iter(self, inputs, lrnSize, itersize, name = 'attention_iter'):
        with tf.name_scope(name):
            numIn = inputs.get_shape().as_list()[3]
            padding = np.floor(lrnSize/2)
            pad = tf.pad(inputs, np.array([[0,0],[1,1],[1,1],[0,0]]))
            U = self._conv(pad, filters=1, kernel_size=3, strides=1)
            pad_2 = tf.pad(U, np.array([[0,0],[padding,padding],[padding,padding],[0,0]]))
            sharedK = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([lrnSize,lrnSize, 1, 1]), name= 'shared_weights')
            Q = []
            C = []
            for i in range(itersize):
                if i ==0:
                    conv = tf.nn.conv2d(pad_2, sharedK, [1,1,1,1], padding='VALID', data_format='NHWC')
                else:
                    conv = tf.nn.conv2d(Q[i-1], sharedK, [1,1,1,1], padding='SAME', data_format='NHWC')
                C.append(conv)
                Q_tmp = tf.nn.sigmoid(tf.add_n([C[i], U]))
                Q.append(Q_tmp)
            stacks = []
            for i in range(numIn):
                stacks.append(Q[-1]) 
            pfeat = tf.multiply(inputs,tf.concat(stacks, axis = 3) )
        return pfeat
multiscale_cnn_lstm_model.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, num_frames, 
                   l2_penalty=1e-8, **unused_params):

    num_layers = FLAGS.multiscale_cnn_lstm_layers
    lstm_size = int(FLAGS.lstm_cells)
    pool_size=2
    num_filters=[256,256,512]
    filter_sizes=[1,2,3]
    features_size = sum(num_filters)

    sub_predictions = []
    cnn_input = model_input

    cnn_max_frames = model_input.get_shape().as_list()[1]

    for layer in range(num_layers):
      cnn_output = self.cnn(cnn_input, num_filters=num_filters, filter_sizes=filter_sizes, sub_scope="cnn%d"%(layer+1))
      cnn_output_relu = tf.nn.relu(cnn_output)

      lstm_memory = self.rnn(cnn_output_relu, lstm_size, num_frames, sub_scope="rnn%d"%(layer+1))
      sub_prediction = self.moe(lstm_memory, vocab_size, scopename="moe%d"%(layer+1))
      sub_predictions.append(sub_prediction)

      cnn_max_frames /= pool_size
      max_pooled_cnn_output = tf.reduce_max(
          tf.reshape(
              cnn_output_relu[:, :cnn_max_frames*2, :], 
              [-1, cnn_max_frames, pool_size, features_size]
          ), axis=2)

      # for the next cnn layer
      cnn_input = max_pooled_cnn_output
      num_frames = tf.maximum(num_frames/pool_size, 1)

    support_predictions = tf.concat(sub_predictions, axis=1)
    predictions = tf.add_n(sub_predictions) / len(sub_predictions)

    return {"predictions": predictions, 
            "support_predictions": support_predictions}
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def l2loss(params):
    if len(params) == 0:
        return tf.constant(0.0)
    else:
        return tf.add_n([sum(tf.square(p)) for p in params])
distributions.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def neglogp(self, x):
        return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x - self.low, axis=len(x.get_shape()) - 1))])
distributions.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def kl(self, other):
        return tf.add_n([
                p.kl(q) for p, q in zip(self.categoricals, other.categoricals)
            ])
distributions.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def entropy(self):
        return tf.add_n([p.entropy() for p in self.categoricals])


问题


面经


文章

微信
公众号

扫码关注公众号