python类reduce_mean()的实例源码

character_rnn.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def get_training_tensors(self, learning_rate = 0.001, grad_clip = 5):
        #-----------------------------------------------------------------------
        # Build a loss function
        #-----------------------------------------------------------------------
        with tf.name_scope('targets-encode'):
            y_one_hot  = tf.one_hot(self.targets, self.n_classes)
            y_reshaped = tf.reshape(y_one_hot, self.logits.get_shape())

        with tf.name_scope('loss'):
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                           labels=y_reshaped)
            loss = tf.reduce_mean(loss)
            tf.summary.scalar('loss', loss)

        #-----------------------------------------------------------------------
        # Build the optimizer
        #-----------------------------------------------------------------------
        with tf.name_scope('optimizer'):
            tvars     = tf.trainable_variables()
            grads, _  = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                               grad_clip)
            train_op  = tf.train.AdamOptimizer(learning_rate)
            optimizer = train_op.apply_gradients(zip(grads, tvars))

        return loss, optimizer
network_resnet.py 文件源码 项目:HandDetection 作者: YunqiuXu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
    sigma_2 = sigma ** 2
    box_diff = bbox_pred - bbox_targets
    in_box_diff = bbox_inside_weights * box_diff
    abs_in_box_diff = tf.abs(in_box_diff)
    smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
    in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
                  + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
    out_loss_box = bbox_outside_weights * in_loss_box
    loss_box = tf.reduce_mean(tf.reduce_sum(
      out_loss_box,
      axis=dim
    ))
    return loss_box
model.py 文件源码 项目:Renewables_Scenario_Gen_GAN 作者: chennnnnyize 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def batchnormalize(X, eps=1e-8, g=None, b=None):
    if X.get_shape().ndims == 4:
        mean = tf.reduce_mean(X, [0,1,2])
        std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )
        X = (X-mean) / tf.sqrt(std+eps)

        if g is not None and b is not None:
            g = tf.reshape(g, [1,1,1,-1])
            b = tf.reshape(b, [1,1,1,-1])
            X = X*g + b

    elif X.get_shape().ndims == 2:
        mean = tf.reduce_mean(X, 0)
        std = tf.reduce_mean(tf.square(X-mean), 0)
        X = (X-mean) / tf.sqrt(std+eps)

        if g is not None and b is not None:
            g = tf.reshape(g, [1,-1])
            b = tf.reshape(b, [1,-1])
            X = X*g + b

    else:
        raise NotImplementedError

    return X
model.py 文件源码 项目:Renewables_Scenario_Gen_GAN 作者: chennnnnyize 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build_model(self):

        Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])
        Y = tf.placeholder(tf.float32, [self.batch_size, self.dim_y])

        image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)
        h4 = self.generate(Z,Y)
        #image_gen comes from sigmoid output of generator
        image_gen = tf.nn.sigmoid(h4)

        raw_real2 = self.discriminate(image_real, Y)
        #p_real = tf.nn.sigmoid(raw_real)
        p_real=tf.reduce_mean(raw_real2)

        raw_gen2 = self.discriminate(image_gen, Y)
        #p_gen = tf.nn.sigmoid(raw_gen)
        p_gen = tf.reduce_mean(raw_gen2)

        discrim_cost = tf.reduce_sum(raw_real2) - tf.reduce_sum(raw_gen2)
        gen_cost = -tf.reduce_mean(raw_gen2)

        return Z, Y, image_real, discrim_cost, gen_cost, p_real, p_gen
unet.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                                          tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
train.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _build_graph(self, image_size):

        self.image_size = image_size
        self.images = tf.placeholder(tf.float32,
                                     shape = (None, image_size, image_size, 3))
        images_mini = tf.image.resize_images(self.images,
                                             size = (int(image_size/4),
                                                     int(image_size/4)))
        self.images_blur = tf.image.resize_images(images_mini,
                                                  size = (image_size, image_size))

        self.net = U_Net(output_ch = 3, block_fn = 'origin')
        self.images_reconst = self.net(self.images_blur, reuse = False)
        # self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images.
        self.loss = tf.reduce_mean((self.images_reconst - self.images)**2)
        self.opt = tf.train.AdamOptimizer()\
                           .minimize(self.loss, var_list = self.net.vars)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
trainer.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_model(self):
        Gen=GeneratorTypes[self.gan_type]
        config=self.config
        self.gen=Gen(config.batch_size,config.gen_hidden_size,config.gen_z_dim)

        with tf.variable_scope('Disc') as scope:
            self.D1 = Discriminator(self.data.X, config.disc_hidden_size)
            scope.reuse_variables()
            self.D2 = Discriminator(self.gen.X, config.disc_hidden_size)
            d_var = tf.contrib.framework.get_variables(scope)

        d_loss_real=tf.reduce_mean( sxe(self.D1,1) )
        d_loss_fake=tf.reduce_mean( sxe(self.D2,0) )
        self.loss_d =  d_loss_real  +  d_loss_fake
        self.loss_g = tf.reduce_mean( sxe(self.D2,1) )

        optimizer=tf.train.AdamOptimizer
        g_optimizer=optimizer(self.config.lr_gen)
        d_optimizer=optimizer(self.config.lr_disc)
        self.opt_d = d_optimizer.minimize(self.loss_d,var_list= d_var)
        self.opt_g = g_optimizer.minimize(self.loss_g,var_list= self.gen.tr_var,
                               global_step=self.gen.step)

        with tf.control_dependencies([self.inc_step]):
            self.train_op=tf.group(self.opt_d,self.opt_g)
models.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def Grad_Penalty(real_data,fake_data,Discriminator,config):
    '''
    Implemention from "Improved training of Wasserstein"
    Interpolation based estimation of the gradient of the discriminator.
    Used to penalize the derivative rather than explicitly constrain lipschitz.
    '''
    batch_size=config.batch_size
    LAMBDA=config.lambda_W
    n_hidden=config.critic_hidden_size
    alpha = tf.random_uniform([batch_size,1],0.,1.)
    interpolates = alpha*real_data + ((1-alpha)*fake_data)#Could do more if not fixed batch_size
    disc_interpolates = Discriminator(interpolates,batch_size,n_hidden=n_hidden,config=config, reuse=True)[1]#logits
    gradients = tf.gradients(disc_interpolates,[interpolates])[0]#orig
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),
                           reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes-1)**2)
    grad_cost = LAMBDA*gradient_penalty
    return grad_cost,slopes
BaseUnet.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _get_loss(self,labels):

        with tf.name_scope("Loss"):
            """
            with tf.name_scope("logloss"):
                logit = tf.squeeze(tf.nn.sigmoid(self.logit))
                self.loss = tf.reduce_mean(self._logloss(labels, logit))
            """
            with tf.name_scope("L2_loss"):
                if self.flags.lambdax:
                    lambdax = self.flags.lambdax
                else:
                    lambdax = 0
                self.l2loss = lambdax*tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

            with tf.name_scope("dice_coef"):
                #yp_label = tf.cast(logit>self.flags.threshold, tf.float32)
                logit = tf.squeeze(self.logit)
                self.acc = tf.reduce_mean(self._dice_coef(labels,logit))
                self.metric = "dice_coef"
                self.loss = -self.acc

        with tf.name_scope("summary"):
            if self.flags.visualize:
                tf.summary.scalar(name='dice coef', tensor=self.acc, collections=[tf.GraphKeys.SCALARS])
losses.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def sparse_cross_entropy_loss(logits, labels,
                       weight=1.0, scope=None):
  """Define a Cross Entropy loss using sparse_softmax_cross_entropy_with_logits.

  It can scale the loss by weight factor, and smooth the labels.

  Args:
    logits: [batch_size, num_classes] logits outputs of the network .
    labels: [batch_size,] target labels.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    A tensor with the softmax_cross_entropy loss.
  """
  with tf.op_scope([logits, labels], scope, 'SparseCrossEntropyLoss'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,name='xentropy')
    weight = tf.convert_to_tensor(weight,
                                    dtype=logits.dtype.base_dtype,
                                    name='loss_weight')

    loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
train.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_label_costs(coder, dataset, labels, batch_size=100):
    """
    Return average cross entropy loss and class error rate on
    dataset by coder object with its current weights.
    """

    n_batches = dataset.shape[0] // batch_size
    error = 0.
    cost = 0.
    for index in range(n_batches):
        batch = dataset[index * batch_size : (index+1) * batch_size]
        labels_batch = labels[index * batch_size : (index+1) * batch_size]
        predicted = coder.get_hidden_values(batch)

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predicted,
                                                              labels=labels_batch)
        cost += tf.reduce_mean(loss).eval()

        bad_prediction = tf.not_equal(tf.argmax(predicted , 1), labels_batch)
        error += tf.reduce_mean(tf.cast(bad_prediction, tf.float32)).eval()

    return (cost / n_batches, error / n_batches)
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def recode_cost(self, inputs, variation, eps=1e-5, **kwargs):
        """
        Cost for given input batch of samples, under current params.
        """
        h = self.get_h_inputs(inputs)
        z_mu = tf.matmul(h, self.params['Mhz']) + self.params['bMhz']
        z_sig = tf.matmul(h, self.params['Shz']) + self.params['bShz']

        # KL divergence between latent space induced by encoder and ...
        lat_loss = -tf.reduce_sum(1 + z_sig - z_mu**2 - tf.exp(z_sig), 1)

        z = z_mu + tf.sqrt(tf.exp(z_sig)) * variation
        h = self.get_h_latents(z)
        x_mu = self.decoding(tf.matmul(h, self.params['Mhx']) + self.params['bMhx'])
        x_sig = self.decoding(tf.matmul(h, self.params['Shx']) + self.params['bShx'])
#        x_sig = tf.clip_by_value(x_mu * (1 - x_mu), .05, 1)

        # decoding likelihood term
        like_loss = tf.reduce_sum(tf.log(x_sig + eps) +
                                  (inputs - x_mu)**2 / x_sig, 1)

#        # Mean cross entropy between input and encode-decoded input.
#        like_loss = 2 * tf.reduce_sum(functions.cross_entropy(inputs, x_mu), 1)

        return .5 * tf.reduce_mean(like_loss + lat_loss)
transfer_cifar10_softmax_b1.py 文件源码 项目:deligan 作者: val-iisc 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def add_evaluation_step(graph, final_tensor_name, ground_truth_tensor_name):
    """Inserts the operations we need to evaluate the accuracy of our results.
    Args:
      graph: Container for the existing model's Graph.
      final_tensor_name: Name string for the new final node that produces results.
      ground_truth_tensor_name: Name string for the node we feed ground truth data
      into.
    Returns:
      Nothing.
    """
    result_tensor = graph.get_tensor_by_name(ensure_name_has_port(
        final_tensor_name))
    ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(
        ground_truth_tensor_name))
    correct_prediction = tf.equal(
        tf.argmax(result_tensor, 1), tf.argmax(ground_truth_tensor, 1))
    evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    return evaluation_step
a2_transformer_classification.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(self, l2_lambda=0.0001):  # 0.001
        with tf.name_scope("loss"):
            # input: `logits`:[batch_size, num_classes], and `labels`:[batch_size]
            # output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y_label,logits=self.logits);  # sigmoid_cross_entropy_with_logits.#losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits)
            # print("1.sparse_softmax_cross_entropy_with_logits.losses:",losses) # shape=(?,)
            loss = tf.reduce_mean(losses)  # print("2.loss.loss:", loss) #shape=()
            l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if ('bias' not in v.name ) and ('alpha' not in v.name)]) * l2_lambda
            loss = loss + l2_losses
        return loss

    #def loss_seq2seq(self):
    #    with tf.variable_scope("loss"):
    #        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y_label, logits=self.logits);#losses:[batch_size,self.decoder_sent_length]
    #        loss_batch=tf.reduce_sum(losses,axis=1)/self.decoder_sent_length #loss_batch:[batch_size]
    #        loss=tf.reduce_mean(loss_batch)
    #        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * self.l2_lambda
    #        loss = loss + l2_losses
    #        return loss
a2_layer_norm_residual_conn.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def layer_normalization(self,x):
        """
        x should be:[batch_size,sequence_length,d_model]
        :return:
        """
        filter=x.get_shape()[-1] #last dimension of x. e.g. 512
        print("layer_normalization:==================>variable_scope:","layer_normalization"+str(self.layer_index)+self.type)
        with tf.variable_scope("layer_normalization"+str(self.layer_index)+self.type):
            # 1. normalize input by using  mean and variance according to last dimension
            mean=tf.reduce_mean(x,axis=-1,keep_dims=True) #[batch_size,sequence_length,1]
            variance=tf.reduce_mean(tf.square(x-mean),axis=-1,keep_dims=True) #[batch_size,sequence_length,1]
            norm_x=(x-mean)*tf.rsqrt(variance+1e-6) #[batch_size,sequence_length,d_model]
            # 2. re-scale normalized input back
            scale=tf.get_variable("layer_norm_scale",[filter],initializer=tf.ones_initializer) #[filter]
            bias=tf.get_variable("layer_norm_bias",[filter],initializer=tf.ones_initializer) #[filter]
            output=norm_x*scale+bias #[batch_size,sequence_length,d_model]
            return output #[batch_size,sequence_length,d_model]
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def critic_loss(self):
        return tf.reduce_mean(tf.square(self.TD_loss))
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def actor_loss(self):
        if self.config.mode == 'discrete':
            log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
                                     axis=1, keep_dims=True)
            # use entropy to encourage exploration
            exp_v = log_prob * self.TD_loss
            entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True)  # encourage exploration
            exp_v = self.config.ENTROPY_BETA * entropy + exp_v
            return tf.reduce_mean(-exp_v)  # ????????log_prb????????????????????TD_loss
        elif self.config.mode == 'continuous':
            log_prob = self.action_normal_dist.log_prob(self.action_input)
            exp_v = log_prob * self.TD_loss
            # use entropy to encourage exploration
            exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
            return tf.reduce_mean(-exp_v)
gen_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size):
    vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix))
    minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32)

    center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0])
    yhat = tf.matmul(center_word_vector, vectors, transpose_b=True)

    predict_word = minibatch[:,1]
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat)
    loss = tf.reduce_mean(loss)
    return vectors, minibatch, loss
eval_output_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def pca_fit(X, n_components):
    mean = tf.reduce_mean(X, axis=0)
    centered_X = X - mean
    S, U, V = tf.svd(centered_X)

    return V[:n_components], mean
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def decov_loss(xs):
    """Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf
    'Reducing Overfitting In Deep Networks by Decorrelating Representation'
    """
    x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
    m = tf.reduce_mean(x, 0, True)
    z = tf.expand_dims(x-m, 2)
    corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0,2,1])), 0)
    corr_frob_sqr = tf.reduce_sum(tf.square(corr))
    corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
    loss = 0.5*(corr_frob_sqr - corr_diag_sqr)
    return loss


问题


面经


文章

微信
公众号

扫码关注公众号