python类add_n()的实例源码

cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
bbbc006.py 文件源码 项目:dcan-tensorflow 作者: lisjin 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def loss(c_fuse, s_fuse, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        c_fuse: Contours output map from inference().
        s_fuse: Segments output map from inference().
        labels: Labels from distorted_inputs or inputs().

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.

    # Split the labels tensor into contours and segments image tensors
    # Each has shape [FLAGS.batch_size, 696, 520, 1]
    contours_labels, segments_labels = tf.split(labels, 2, 3)

    _add_cross_entropy(contours_labels, c_fuse, 'c')
    _add_cross_entropy(segments_labels, s_fuse, 's')

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
BaseUnet.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _get_loss(self,labels):

        with tf.name_scope("Loss"):
            """
            with tf.name_scope("logloss"):
                logit = tf.squeeze(tf.nn.sigmoid(self.logit))
                self.loss = tf.reduce_mean(self._logloss(labels, logit))
            """
            with tf.name_scope("L2_loss"):
                if self.flags.lambdax:
                    lambdax = self.flags.lambdax
                else:
                    lambdax = 0
                self.l2loss = lambdax*tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            with tf.name_scope("dice_coef"):
                #yp_label = tf.cast(logit>self.flags.threshold, tf.float32)
                logit = tf.squeeze(self.logit)
                self.acc = tf.reduce_mean(self._dice_coef(labels,logit))
                self.metric = "dice_coef"
                self.loss = -self.acc

        with tf.name_scope("summary"):
            if self.flags.visualize:
                tf.summary.scalar(name='dice coef', tensor=self.acc, collections=[tf.GraphKeys.SCALARS])
a2_transformer_classification.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def loss(self, l2_lambda=0.0001):  # 0.001
        with tf.name_scope("loss"):
            # input: `logits`:[batch_size, num_classes], and `labels`:[batch_size]
            # output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y_label,logits=self.logits);  # sigmoid_cross_entropy_with_logits.#losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits)
            # print("1.sparse_softmax_cross_entropy_with_logits.losses:",losses) # shape=(?,)
            loss = tf.reduce_mean(losses)  # print("2.loss.loss:", loss) #shape=()
            l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if ('bias' not in v.name ) and ('alpha' not in v.name)]) * l2_lambda
            loss = loss + l2_losses
        return loss

    #def loss_seq2seq(self):
    #    with tf.variable_scope("loss"):
    #        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y_label, logits=self.logits);#losses:[batch_size,self.decoder_sent_length]
    #        loss_batch=tf.reduce_sum(losses,axis=1)/self.decoder_sent_length #loss_batch:[batch_size]
    #        loss=tf.reduce_mean(loss_batch)
    #        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * self.l2_lambda
    #        loss = loss + l2_losses
    #        return loss
p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001
        """calculate loss using (NCE)cross entropy here"""
        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels each
        # time we evaluate the loss.
        if self.is_training: #training
            #labels=tf.reshape(self.input_y,[-1])               #[batch_size,1]------>[batch_size,]
            labels=tf.expand_dims(self.input_y,1)                   #[batch_size,]----->[batch_size,1]
            loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K.
                               biases=self.b_projection,                 #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`.
                               labels=labels,                 #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes.
                               inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                               num_sampled=self.num_sampled,  #scalar. 100
                               num_classes=self.num_classes,partition_strategy="div"))  #scalar. 1999
        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
        loss = loss + l2_losses
        return loss
net_model.py 文件源码 项目:3D_CNN_jonas 作者: 2015ZxEE 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(logits, label_batch):
    """
    Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits      -> logits from inference()
        label_batch -> 1D tensor of [batch_size]
    Rtns:
        total_loss  -> float tensor
    """
    # Calculate the average cross entropy loss across the batch.
    label_batch        = tf.cast(label_batch,tf.int64)
    cross_entropy      = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,
                                label_batch,name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses',cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
autoencoder_net.py 文件源码 项目:unsupervised-2017-cvprw 作者: imatge-upc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def tower_loss(name_scope, autoencoder, clips):
    # calculate reconstruction loss
    rec_loss = tf.reduce_mean(tf.abs(clips-autoencoder.rec_vid))

    weight_decay_loss_list = tf.get_collection('losses', name_scope)
    weight_decay_loss = 0.0
    if len(weight_decay_loss_list) > 0:
        weight_decay_loss = tf.add_n(weight_decay_loss_list)

    tf.add_to_collection('losses', rec_loss)
    losses = tf.get_collection('losses', name_scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    return total_loss, rec_loss, weight_decay_loss
mfb_dis_net.py 文件源码 项目:unsupervised-2017-cvprw 作者: imatge-upc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def tower_loss(name_scope, mfb, use_pretrained_encoder, encoder_gradient_ratio=1.0):
    # get reconstruction and ground truth
    ac_loss = mfb.ac_loss

    weight_decay_loss_list = tf.get_collection('losses', name_scope)
    if use_pretrained_encoder:
        if encoder_gradient_ratio == 0.0:
            weight_decay_loss_list = [var for var in weight_decay_loss_list \
                                      if 'c3d' not in var.name and 'mapping' not in var.name]

    weight_decay_loss = 0.0
    if len(weight_decay_loss_list) > 0:
        weight_decay_loss = tf.add_n(weight_decay_loss_list)

    total_loss = weight_decay_loss * 100 + ac_loss

    return total_loss, ac_loss, weight_decay_loss
clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _loss_shared(logits, labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
ocr.py 文件源码 项目:tf-cnn-lstm-ocr-captcha 作者: Luonic 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def inference(images, batch_size, train):
  """Build the ocr model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  features, timesteps = convolutional_layers(images, batch_size, train)
  logits = get_lstm_layers(features, timesteps, batch_size)
  return logits, timesteps


  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  # return tf.add_n(tf.get_collection('losses'), name='total_loss')
value_functions.py 文件源码 项目:baselines 作者: openai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, ob_dim, ac_dim): #pylint: disable=W0613
        X = tf.placeholder(tf.float32, shape=[None, ob_dim*2+ac_dim*2+2]) # batch of observations
        vtarg_n = tf.placeholder(tf.float32, shape=[None], name='vtarg')
        wd_dict = {}
        h1 = tf.nn.elu(dense(X, 64, "h1", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        h2 = tf.nn.elu(dense(h1, 64, "h2", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict))
        vpred_n = dense(h2, 1, "hfinal", weight_init=U.normc_initializer(1.0), bias_init=0, weight_loss_dict=wd_dict)[:,0]
        sample_vpred_n = vpred_n + tf.random_normal(tf.shape(vpred_n))
        wd_loss = tf.get_collection("vf_losses", None)
        loss = U.mean(tf.square(vpred_n - vtarg_n)) + tf.add_n(wd_loss)
        loss_sampled = U.mean(tf.square(vpred_n - tf.stop_gradient(sample_vpred_n)))
        self._predict = U.function([X], vpred_n)
        optim = kfac.KfacOptimizer(learning_rate=0.001, cold_lr=0.001*(1-0.9), momentum=0.9, \
                                    clip_kl=0.3, epsilon=0.1, stats_decay=0.95, \
                                    async=1, kfac_update=2, cold_iter=50, \
                                    weight_decay_dict=wd_dict, max_grad_norm=None)
        vf_var_list = []
        for var in tf.trainable_variables():
            if "vf" in var.name:
                vf_var_list.append(var)

        update_op, self.q_runner = optim.minimize(loss, loss_sampled, var_list=vf_var_list)
        self.do_update = U.function([X, vtarg_n], update_op) #pylint: disable=E1101
        U.initialize() # Initialize uninitialized TF variables
SingleLayerCAE.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(self, predictions, real_values):
        """Return the loss operation between predictions and real_values.
        Add L2 weight decay term if any.
        Args:
            predictions: predicted values
            real_values: real values
        Returns:
            Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
            mse = tf.divide(
                tf.reduce_mean(
                    tf.square(tf.subtract(predictions, real_values))),
                2.,
                name="mse")
            tf.add_to_collection(LOSSES, mse)

            # mse + weight_decay per layer
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
VGG.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
LeNetBN.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
VGGDirectDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
VGGBN.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
LeNet.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')
        return error
LeNetDirectDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
LeNetDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
VGGDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def loss(self, logits, labels):
        """Add L2Loss to all the trainable variables.
        Args:
          logits: Logits from get().
          labels: Labels from train_inputs or inputs(). 1-D tensor
                  of shape [batch_size]

        Returns:
          Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # Calculate the average cross entropy loss across the batch.
            labels = tf.cast(labels, tf.int64)
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=logits, labels=labels, name='cross_entropy_per_example')
            cross_entropy_mean = tf.reduce_mean(
                cross_entropy, name='cross_entropy')
            tf.add_to_collection(LOSSES, cross_entropy_mean)

            # The total loss is defined as the cross entropy loss plus all of the weight
            # decay terms (L2 loss).
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
SENN.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac):
        '''
        Loss definition
        Only speech inference loss is defined and work quite well
        Add VAD cross entropy loss if you want
        '''
        loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
        loss_o = loss_v1
        reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        # ipdb.set_trace()
        loss_v = loss_o + tf.add_n(reg_loss)
        tf.scalar_summary('loss', loss_v)
        # loss_merge = tf.cond(
        #     is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
        #     lambda: tf.scalar_summary('loss', loss_v))
        return loss_v, loss_o
        # return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10_gtf.py 文件源码 项目:deep_learning_study 作者: jowettcz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
resnet_cifar10_multi_gpu.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
alexnet_cifar10_multi_gpu.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
fcn5_mnist_multi_gpu.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
model.py 文件源码 项目:web_page_classification 作者: yuhui-lin 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]
    Returns:
        Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits,
        labels,
        name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')


问题


面经


文章

微信
公众号

扫码关注公众号