python类divide()的实例源码

StackedDenoisingCAE.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _mse(self, input_x, output_x):
        # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
        return tf.divide(
            tf.reduce_mean(tf.square(tf.subtract(input_x, output_x))),
            2.,
            name="mse")
metrics.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def r2_op(predictions, targets):
    """ r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = r2_op(y_pred, y_true)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
Arguments:
    predictions: `Tensor`.
    targets: `Tensor`.

Returns:
    `Float`. The standard error.

"""
with tf.name_scope('StandardError'):
    a = tf.reduce_sum(tf.square(predictions))
    b = tf.reduce_sum(tf.square(targets))
    return tf.divide(a, b)

```

metrics.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def weighted_r2_op(predictions, targets, inputs):
    """ weighted_r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = weighted_r2_op(y_pred, y_true, input_data)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
Arguments:
    predictions: `Tensor`.
    targets: `Tensor`.
    inputs: `Tensor`.

Returns:
    `Float`. The standard error.

"""
with tf.name_scope('WeightedStandardError'):
    if hasattr(inputs, '__len__'):
        inputs = tf.add_n(inputs)
    if inputs.get_shape().as_list() != targets.get_shape().as_list():
        raise Exception("Weighted R2 metric requires Inputs and Targets to "
                        "have same shape.")
    a = tf.reduce_sum(tf.square(predictions - inputs))
    b = tf.reduce_sum(tf.square(targets - inputs))
    return tf.divide(a, b)

```

net.py 文件源码 项目:DenseHumanBodyCorrespondences 作者: halimacc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _loss(self, labels, logits):
        float_labels = tf.cast(labels, tf.float32)

        epsilon = tf.constant(value=1e-4)
        softmax = tf.nn.softmax(logits) + epsilon
        cross_entropy = -tf.reduce_sum(float_labels * tf.log(softmax), reduction_indices=[-1])
        cross_entropy_mean = tf.reduce_mean(cross_entropy)

        total_pixels = tf.constant(value=conf.width * conf.height, dtype=tf.float32)
        valid_pixels = tf.reduce_sum(float_labels)
        loss = tf.divide(tf.multiply(cross_entropy_mean, total_pixels), valid_pixels)

        return loss
evaluation_y.py 文件源码 项目:spoofnet-tensorflow 作者: yomna-safaa 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_eval_ops(logits, labels, one_hot=False, scope='', calc_accuracy=True):
    """Evaluate the quality of the logits at predicting the label.
      Args:
        logits: Logits tensor, float - [batch_size, NUM_CLASSES].
        labels: Labels tensor, int32 - [batch_size], with values in the
          range [0, NUM_CLASSES).
      Returns:
        A scalar int32 tensor with the number of examples (out of batch_size)
        that were predicted correctly.
      """
    print('Evaluation Ops..')
    with tf.name_scope(scope):
        # For a classifier model, we can use the in_top_k Op.
        # It returns a bool tensor with shape [batch_size] that is true for
        # the examples where the label's is was in the top k (here k=1)
        # of all logits for that example.
        # labels = tf.cast(labels, tf.int64)
        if one_hot:
            labels = tf.argmax(labels, 1)
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        num_correct = tf.reduce_sum(tf.cast(top_1_op, tf.float32))

        if calc_accuracy:
            acc_percent = tf.divide(num_correct, labels.shape[0].value)
        else:
            acc_percent = tf.constant(0.0)

        # =============
        y_const = tf.constant(-1, dtype=labels.dtype)
        y_greater = tf.greater(labels, y_const)
        n_all = tf.reduce_sum(tf.cast(y_greater, tf.float32))

        return top_1_op, acc_percent * 100.0, num_correct, n_all, labels


########################################################################
fwgrad.py 文件源码 项目:tensorflow-forward-ad 作者: renmengye 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def Div_FwGrad(op, dx, dy, _op_table=None, _grad_table=None):
  x = op.inputs[0]
  y = op.inputs[1]
  if dx is None and dy is None:
    return None
  elif dx is not None and dy is None:
    return tf.divide(dx, y)
  elif dy is not None and dx is None:
    return -tf.divide(x * dy, y**2)
  else:
    return tf.divide(y * dx - x * dy, y**2)
hourglass_tiny.py 文件源码 项目:hourglasstensorlfow 作者: wbenbihi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _compute_err(self, u, v):
        """ Given 2 tensors compute the euclidean distance (L2) between maxima locations
        Args:
            u       : 2D - Tensor (Height x Width : 64x64 )
            v       : 2D - Tensor (Height x Width : 64x64 )
        Returns:
            (float) : Distance (in [0,1])
        """
        u_x,u_y = self._argmax(u)
        v_x,v_y = self._argmax(v)
        return tf.divide(tf.sqrt(tf.square(tf.to_float(u_x - v_x)) + tf.square(tf.to_float(u_y - v_y))), tf.to_float(91))
humancritic_tensorflow.py 文件源码 项目:LearningFromHumanPreferences 作者: ZachisGit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def init_tf(self):
        tf.reset_default_graph()
        self.graph =tf.Graph()
        with self.graph.as_default():
            self.initializer = tf.truncated_normal_initializer(stddev=0.3)

            self.input_o0 = tf.placeholder(shape=[None,self.obs_size],dtype=tf.float32)
            self.input_o1 = tf.placeholder(shape=[None,self.obs_size],dtype=tf.float32)
            self.preference_distribution = tf.placeholder(shape=[2],dtype=tf.float32)
            self.model_o0 = self.create_model(self.input_o0)
            self.model_o1 = self.create_model(self.input_o1,reuse=True)
            self.batch_sizes = tf.placeholder(shape=[2],dtype=tf.float32)
            #'''
            self.model_o0_sum = tf.exp(tf.divide(tf.reduce_sum(self.model_o0),self.batch_sizes[0]))
            self.model_o1_sum = tf.exp(tf.divide(tf.reduce_sum(self.model_o1),self.batch_sizes[1]))
            #self.model_o1_sum = tf.exp(tf.reduce_sum(self.model_o1))
            self.p_o0_o1 = tf.divide(self.model_o0_sum,tf.add(self.model_o0_sum,self.model_o1_sum))
            self.p_o1_o0 = tf.divide(self.model_o1_sum,tf.add(self.model_o1_sum,self.model_o0_sum))
            self.loss = -tf.add(tf.multiply(self.preference_distribution[0],tf.log(self.p_o0_o1)), \
                    tf.multiply(self.preference_distribution[1],tf.log(self.p_o1_o0)))

            '''
            self.model_o0_sum = tf.exp(tf.reduce_sum(self.model_o0))
            self.model_o1_sum = tf.exp(tf.reduce_sum(self.model_o1))
            self.p_o0_o1 = tf.add(1e-5,tf.divide(self.model_o0_sum,tf.add(1e-5,tf.add(self.model_o0_sum,self.model_o1_sum))))
            self.p_o1_o0 = tf.add(1e-5,tf.divide(self.model_o1_sum,tf.add(1e-5,tf.add(self.model_o1_sum,self.model_o0_sum))))
            self.loss = tf.add(1e-5,-tf.add(tf.multiply(self.preference_distribution[0],tf.log(self.p_o0_o1)), \
                    tf.multiply(self.preference_distribution[1],tf.log(self.p_o1_o0))))
            #'''
            self.train_step = tf.train.AdamOptimizer(learning_rate=self.LEARNING_RATE).minimize(self.loss)
            self.sess = tf.Session()
            self.sess.run(tf.global_variables_initializer())

            self.saver = tf.train.Saver(tf.global_variables())
            self.checkpoint_path = "./human_critic/hc_model/"+self.datetime_str+"/hc_model_"+self.datetime_str+".ckpt"
classifier.py 文件源码 项目:chicksexer 作者: kensk8er 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _mean_pool(self, rnn_outputs, batch_size, max_char_len, max_word_len, char_lens):
        """
        Perform mean-pooling after the character-RNN layer.

        :param rnn_outputs: hidden states of all the time steps after the character-RNN layer
        :return: mean of the hidden states over every time step
        """
        # perform mean pooling over characters
        rnn_outputs = tf.reduce_mean(rnn_outputs, reduction_indices=1)

        # In order to avoid 0 padding affect the mean, multiply by `n / m` where `n` is
        # `max_char_len` and `m` is `char_lens`
        rnn_outputs = tf.multiply(rnn_outputs, tf.cast(max_char_len, tf.float32))  # multiply by `n`

        # swap the dimensions in order to divide by an appropriate value for each time step
        rnn_outputs = tf.transpose(rnn_outputs)

        rnn_outputs = tf.divide(rnn_outputs, tf.cast(char_lens, tf.float32))  # divide by `m`
        rnn_outputs = tf.transpose(rnn_outputs)  # shape back to the original shape

        # batch and word-len dimensions were merged before running character-RNN so shape it back
        rnn_outputs = tf.reshape(rnn_outputs, [batch_size, max_word_len, self._char_rnn_size * 2])

        # there are NaN due to padded words (with char_len=0) so convert those NaN to 0
        rnn_outputs = tf.where(tf.is_nan(rnn_outputs), tf.zeros_like(rnn_outputs), rnn_outputs)

        return rnn_outputs
model_utils.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def SampleSpacedFrames(model_input, num_frames, num_samples, header=2):
  batch_size = tf.shape(model_input)[0]

  sequence_float = tf.divide(tf.range(header, num_samples + header, dtype=tf.float32),  
                             tf.cast(header * 2 + num_samples, tf.float32) )
  sequence_mat = tf.reshape(sequence_float, [1, -1])

  frame_index = tf.cast( tf.matmul( tf.cast(num_frames, tf.float32), sequence_mat), tf.int32)
  frame_index = tf.minimum(frame_index, tf.cast(num_frames - 1, tf.int32))

  batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
model_utils.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def SampleSpacedFrames(model_input, num_frames, num_samples, header=2):
  batch_size = tf.shape(model_input)[0]

  sequence_float = tf.divide(tf.range(header, num_samples + header, dtype=tf.float32),  
                             tf.cast(header * 2 + num_samples, tf.float32) )
  sequence_mat = tf.reshape(sequence_float, [1, -1])

  frame_index = tf.cast( tf.matmul( tf.cast(num_frames, tf.float32), sequence_mat), tf.int32)
  frame_index = tf.minimum(frame_index, tf.cast(num_frames - 1, tf.int32))

  batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
normalizations.py 文件源码 项目:tensorflow-layer-library 作者: bioinf-jku 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def max_norm_all_tensors(tensor_list, clip: bool = True):
    """Normalization of list of tensors by maximum of tensors"""
    maxima = [tf.reduce_max(tf.abs(tensor)) for tensor in tensor_list]
    maxima = tf.stack(maxima)
    if clip:
        maximum = tf.reduce_max(maxima) + 1e-16
    else:
        maximum = tf.reduce_max(maxima)
    return [tf.divide(tensor, maximum) for tensor in tensor_list]
distributions.py 文件源码 项目:wide-deep-cnn 作者: DaniUPC 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _gaussian_pdf(self, x, mixings, sigma, mean):
        """ Wrapper for Gaussian PDF """
        variance = tf.square(sigma)
        output_size = tf.cast(tf.shape(mean)[1], tf.float32)
        # Left: 1/sqrt(pi * 2 * variance) [N, K]
        left = tf.reciprocal(tf.pow(2*np.pi, output_size/2.0) *
                             tf.pow(sigma, output_size))
        # Exponent: e^[-(x-mu)^2/(2var)]. [N, K]
        right = tf.exp(-tf.divide(tf.square(x - mean), 2 * variance))
        return tf.multiply(left, right)
lstm.py 文件源码 项目:yaset 作者: jtourille 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _get_weight(in_size, out_size):
        """
        Weight matrix initialization following Xavier initialization
        :param in_size: input size
        :param out_size: output size
        :return: weight matrix
        """

        min_val = -np.divide(np.sqrt(6), np.sqrt(np.add(in_size, out_size)))
        max_val = np.divide(np.sqrt(6), np.sqrt(np.add(in_size, out_size)))

        return tf.random_uniform([in_size, out_size], minval=min_val, maxval=max_val)
learningv2.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _process_towers_loss(self, dataset, opt, model, is_training=False, reuse=True, is_classification=True, loss_type='cross_entropy'):
        tower_loss = []
        predictions = []
        validation_metric = []
        validation_metric_tmp = [[] for _, _ in self.validation_metrics_def]
        for i in xrange(self.cnf.get('num_gpus', 1)):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % (self.cnf.get('TOWER_NAME', 'tower'), i)) as scope:
                    images, labels = inputs(dataset, self.cnf['tfrecords_im_size'], self.cnf.get(
                        'crop_size'), batch_size=self.cnf['batch_size_test'], num_preprocess_threads=32, num_readers=8, image_preprocessing=self.preprocessor.preprocess_image)
                    labels = self._adjust_ground_truth(labels)
                    loss_pred = self._tower_loss(
                        scope, model, images, labels, is_training=is_training, reuse=reuse, is_classification=is_classification, loss_type=loss_type)
                    tower_loss.append(loss_pred['loss'])
                    predictions.append(loss_pred['predictions'])
                    if self.loss_type == 'kappa_log':
                        labels = tf.argmax(labels, axis=1)
                    for i, (_, metric_function) in enumerate(self.validation_metrics_def):
                        metric_score = metric_function(
                            labels, tf.argmax(loss_pred['predictions'], 1))
                        validation_metric_tmp[i].append(metric_score)
        predictions = tf.convert_to_tensor(predictions)
        predictions = tf.reshape(predictions, [-1, self.num_classes])
        for i, (_, _) in enumerate(self.validation_metrics_def):
            validation_metric.append(
                tf.divide(sum(validation_metric_tmp[i]), self.cnf.get('num_gpus')))
        return sum(tower_loss), predictions, validation_metric
base.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _compute_weights(self, labels):
        log.debug('Computing weights from batch labels')
        labels = tf.cast(labels, dtype=tf.float32)
        lshape = tf.cast(tf.shape(labels), dtype=tf.float32)
        weights = tf.divide(tf.reduce_sum(
            labels, axis=0, keep_dims=True), lshape[0])
        return tf.tile(weights, [tf.shape(labels)[0], 1])
standardizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, img, is_training):
        if self.channel_wise:
            img_mean = img.mean(axis=(1, 2))
            img_std = img.std(axis=(1, 2))
            np.subtract(img, img_mean.reshape(3, 1, 1), out=img)
            np.divide(img, (img_std + 1e-4).reshape(3, 1, 1), out=img)
        else:
            img_mean = img.mean()
            img_std = img.std()
            np.subtract(img, img_mean, out=img)
            np.divide(img, img_std + 1e-4, out=img)

        np.clip(img, -self.clip, self.clip, out=img)
        return img
standardizer.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, img, is_training):
        np.subtract(img, self.mean[:, np.newaxis, np.newaxis], out=img)
        np.divide(img, self.std[:, np.newaxis, np.newaxis], out=img)
        if is_training:
            img = self.augment_color(img, sigma=self.sigma)
        else:
            # tta (test time augmentation)
            img = self.augment_color(img, color_vec=self.color_vec)
        return img
models.py 文件源码 项目:learning-rank-public 作者: andreweskeclarke 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def ranknet(x, relevance_labels, learning_rate, n_hidden, build_vars_fn, score_with_batchnorm_update_fn, score_fn):
    n_out = 1
    sigma = 1
    n_data = tf.shape(x)[0]

    print('USING SIGMA = %f' % sigma)
    params = build_vars_fn()
    predicted_scores, bn_params = score_with_batchnorm_update_fn(x, params)
    S_ij = tf.maximum(tf.minimum(1., relevance_labels - tf.transpose(relevance_labels)), -1.)
    real_scores = (1/2)*(1+S_ij)
    pairwise_predicted_scores = predicted_scores - tf.transpose(predicted_scores)
    lambdas = sigma*(1/2)*(1-S_ij) - sigma*tf.divide(1, (1 + tf.exp(sigma*pairwise_predicted_scores)))

    non_updating_predicted_scores = score_fn(x, bn_params, params)
    non_updating_S_ij = tf.maximum(tf.minimum(1., relevance_labels - tf.transpose(relevance_labels)), -1.)
    non_updating_real_scores = (1/2)*(1+non_updating_S_ij)
    non_updating_pairwise_predicted_scores = non_updating_predicted_scores - tf.transpose(non_updating_predicted_scores)
    non_updating_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=non_updating_pairwise_predicted_scores, labels=non_updating_real_scores))

    def get_derivative(W_k):
        dsi_dWk = tf.map_fn(lambda x_i: tf.squeeze(tf.gradients(score_fn(tf.expand_dims(x_i, 0), bn_params, params), [W_k])[0]), x)
        dsi_dWk_minus_dsj_dWk = tf.expand_dims(dsi_dWk, 1) - tf.expand_dims(dsi_dWk, 0)
        desired_lambdas_shape = tf.concat([tf.shape(lambdas), tf.ones([tf.rank(dsi_dWk_minus_dsj_dWk) - tf.rank(lambdas)], dtype=tf.int32)], axis=0)
        return tf.reduce_mean(tf.reshape(lambdas, desired_lambdas_shape)*dsi_dWk_minus_dsj_dWk, axis=[0,1])

    flat_params = [Wk for pk in params for Wk in pk]
    grads = [get_derivative(Wk) for Wk in flat_params]
    adam = tf.train.AdamOptimizer(learning_rate=learning_rate)
    adam_op = adam.apply_gradients([(tf.reshape(grad, tf.shape(param)), param) for grad, param in zip(grads, flat_params)])

    def optimizer(sess, feed_dict):
        sess.run(adam_op, feed_dict=feed_dict)

    def get_score(sess, feed_dict):
        return sess.run(non_updating_predicted_scores, feed_dict=feed_dict)

    return non_updating_cost, optimizer, get_score
ops.py 文件源码 项目:sText2Image 作者: elliottwu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def kl_divergence(p, q):
    tf.assert_rank(p,2)
    tf.assert_rank(q,2)

    p_shape = tf.shape(p)
    q_shape = tf.shape(q)
    tf.assert_equal(p_shape, q_shape)

    # normalize sum to 1
    p_ = tf.divide(p, tf.tile(tf.expand_dims(tf.reduce_sum(p,axis=1), 1), [1,p_shape[1]]))
    q_ = tf.divide(q, tf.tile(tf.expand_dims(tf.reduce_sum(q,axis=1), 1), [1,p_shape[1]]))

    return tf.reduce_sum(tf.multiply(p_, tf.log(tf.divide(p_, q_))), axis=1)


问题


面经


文章

微信
公众号

扫码关注公众号