python类square()的实例源码

test_utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testGetBackwardOpsChain(self):
        # a -> b -> c
        a = tf.placeholder(tf.float32)
        b = tf.sqrt(a)
        c = tf.square(b)
        for n in range(4):
            for seed_tensors in permutations([a, b, c], n):
                if c in seed_tensors:
                    truth = [a.op, b.op, c.op]
                elif b in seed_tensors:
                    truth = [a.op, b.op]
                elif a in seed_tensors:
                    truth = [a.op]
                else:
                    truth = []
                self.assertEqual(get_backward_ops(seed_tensors), truth)

        self.assertEqual(get_backward_ops([c], treat_as_inputs=[b]), [c.op])
        self.assertEqual(
            get_backward_ops([b, c], treat_as_inputs=[b]), [c.op])
        self.assertEqual(
            get_backward_ops([a, c], treat_as_inputs=[b]), [a.op, c.op])
vars.py 文件源码 项目:luminoth 作者: tryolabs 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def variable_summaries(var, name, collections=None):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization).

    Args:
        - var: Tensor for variable from which we want to log.
        - name: Variable name.
        - collections: List of collections to save the summary to.
    """
    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean, collections)
        num_params = tf.reduce_prod(tf.shape(var))
        tf.summary.scalar('num_params', num_params, collections)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev, collections)
        tf.summary.scalar('max', tf.reduce_max(var), collections)
        tf.summary.scalar('min', tf.reduce_min(var), collections)
        tf.summary.histogram('histogram', var, collections)
        tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
effects.py 文件源码 项目:py-noisemaker 作者: aayars 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pop(tensor, shape):
    """
    Pop art filter

    :param Tensor tensor:
    :param list[int] shape:
    """

    images = []

    freq = random.randint(1, 3) * 2

    ref = _downsample(resample(tensor, shape), shape, [int(shape[0] / (freq * 2)), int(shape[1] / (freq * 2)), shape[2]])

    for i in range(freq * freq):
        image = posterize(ref, random.randint(3, 6))
        image = image % tf.random_normal([3], mean=.5, stddev=.25)
        images.append(image)

    x, y = point_cloud(freq, distrib=PointDistribution.square, shape=shape, corners=True)

    out = voronoi(None, shape, diagram_type=VoronoiDiagramType.collage, xy=(x, y, len(x)), nth=random.randint(0, 3), collage_images=images, image_count=4)

    return outline(out, shape, sobel_func=1)
SingleLayerCAE.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def loss(self, predictions, real_values):
        """Return the loss operation between predictions and real_values.
        Add L2 weight decay term if any.
        Args:
            predictions: predicted values
            real_values: real values
        Returns:
            Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
            mse = tf.divide(
                tf.reduce_mean(
                    tf.square(tf.subtract(predictions, real_values))),
                2.,
                name="mse")
            tf.add_to_collection(LOSSES, mse)

            # mse + weight_decay per layer
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
objectives.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def contrastive_loss(y_pred, y_true, margin = 1.0):
    """ Contrastive Loss.

        Computes the constrative loss between y_pred (logits) and
        y_true (labels).

        http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf
        Sumit Chopra, Raia Hadsell and Yann LeCun (2005).
        Learning a Similarity Metric Discriminatively, with Application to Face Verification.

        Arguments:
            y_pred: `Tensor`. Predicted values.
            y_true: `Tensor`. Targets (labels).
            margin: . A self-set parameters that indicate the distance between the expected different identity features. Defaults 1.
    """

    with tf.name_scope("ContrastiveLoss"):
        dis1 = y_true * tf.square(y_pred)
        dis2 = (1 - y_true) * tf.square(tf.maximum((margin - y_pred), 0))
        return tf.reduce_sum(dis1 +dis2) / 2.
basic_autoencoder.py 文件源码 项目:tensorflow-roadmap 作者: quanhua92 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, input_dim, hidden_dim, epoch=250, learning_rate = 0.001):
        self.epoch = epoch
        self.learning_rate = learning_rate

        x = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])

        with tf.name_scope("encode"):
            weights = tf.Variable(tf.random_normal([input_dim, hidden_dim], dtype=tf.float32), name="weights")
            biases = tf.Variable(tf.zeros([hidden_dim]), name="biases")
            encoded = tf.nn.tanh(tf.matmul(x, weights) + biases)

            with tf.name_scope("decode"):
                weights = tf.Variable(tf.random_normal([hidden_dim, input_dim], dtype=tf.float32), name="weights")
                biases = tf.Variable(tf.zeros([input_dim]), name="biases")
                decoded = tf.matmul(encoded, weights) + biases

        self.x = x
        self.encoded = encoded
        self.decoded = decoded

        self.loss = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.x, self.decoded))))
        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
        self.saver = tf.train.Saver()
predict_stock.py 文件源码 项目:tensorflow-roadmap 作者: quanhua92 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, actions, input_dim):
        self.epsilon = 0.9
        self.gamma = 0.01
        self.actions = actions
        output_dim = len(actions)
        h1_dim = 200

        self.x = tf.placeholder(tf.float32, [None, input_dim])
        self.y = tf.placeholder(tf.float32, [output_dim])
        W1 = tf.Variable(tf.random_normal([input_dim, h1_dim]))
        b1 = tf.Variable(tf.constant(0.1, shape=[h1_dim]))
        h1 = tf.nn.relu(tf.matmul(self.x, W1) + b1)
        W2 = tf.Variable(tf.random_normal([h1_dim, output_dim]))
        b2 = tf.Variable(tf.constant(0.1, shape=[output_dim]))
        self.q = tf.nn.relu(tf.matmul(h1, W2) + b2)

        loss = tf.square(self.y - self.q)
        self.train_op = tf.train.AdagradOptimizer(0.01).minimize(loss)
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
nvdm.py 文件源码 项目:variational-text-tensorflow 作者: carpedm20 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def build_model(self):
    self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
    self.x_idx = tf.placeholder(tf.int32, [None], name="x_idx")

    self.build_encoder()
    self.build_generator()

    # Kullback Leibler divergence
    self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq))

    # Log likelihood
    self.g_loss = -tf.reduce_sum(tf.log(tf.gather(self.p_x_i, self.x_idx) + 1e-10))

    self.loss = self.e_loss + self.g_loss

    self.encoder_var_list, self.generator_var_list = [], []
    for var in tf.trainable_variables():
      if "encoder" in var.name:
        self.encoder_var_list.append(var)
      elif "generator" in var.name:
        self.generator_var_list.append(var)

    # optimizer for alternative update
    self.optim_e = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.e_loss, global_step=self.step, var_list=self.encoder_var_list)
    self.optim_g = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.g_loss, global_step=self.step, var_list=self.generator_var_list)

    # optimizer for one shot update
    self.optim = tf.train.AdamOptimizer(learning_rate=self.lr) \
                         .minimize(self.loss, global_step=self.step)

    _ = tf.scalar_summary("encoder loss", self.e_loss)
    _ = tf.scalar_summary("generator loss", self.g_loss)
    _ = tf.scalar_summary("total loss", self.loss)
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_mseloss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_mse"):
      float_labels = tf.cast(labels, tf.float32)
      mse_loss = tf.square(predictions-float_labels)
      return tf.reduce_mean(tf.reduce_sum(mse_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      mse_loss = tf.square(float_labels - predictions)
      return tf.reduce_mean(tf.reduce_sum(mse_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, support_predictions, labels, **unused_params):
    """ 
    support_predictions batch_size x num_models x num_classes
    predictions = tf.reduce_mean(support_predictions, axis=1)
    """
    model_count = tf.shape(support_predictions)[1]
    vocab_size = tf.shape(support_predictions)[2]

    mean_predictions = tf.reduce_mean(support_predictions, axis=1, keep_dims=True)
    support_labels = tf.tile(tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=1), multiples=[1,model_count,1])
    support_means = tf.stop_gradient(tf.tile(mean_predictions, multiples=[1,model_count,1]))

    support_predictions = tf.reshape(support_predictions, shape=[-1,model_count*vocab_size])
    support_labels = tf.reshape(support_labels, shape=[-1,model_count*vocab_size])
    support_means = tf.reshape(support_means, shape=[-1,model_count*vocab_size])

    ce_loss_fn = CrossEntropyLoss()
    # The cross entropy between predictions and ground truth
    cross_entropy_loss = ce_loss_fn.calculate_loss(support_predictions, support_labels, **unused_params)

    mse_loss_fn = MeanSquareErrorLoss()
    # The square error between predictions and mean predictions
    divergence = mse_loss_fn.calculate_loss(support_predictions, support_means, **unused_params)

    loss = cross_entropy_loss * (1.0 - FLAGS.support_loss_percent) - divergence * FLAGS.support_loss_percent
    return loss
cnn_lstm_memory_normalization_model.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def layer_normalize(self, input_raw, epsilon=1e-8):
    feature_dim = len(input_raw.get_shape()) - 1
    mean_input = tf.reduce_mean(input_raw, axis=feature_dim, keep_dims=True)
    std_input = tf.sqrt(tf.reduce_mean(tf.square(input_raw-mean_input), axis=feature_dim, keep_dims=True))
    std_input = tf.maximum(std_input, epsilon)
    output = (input_raw - mean_input) / std_input
    return output
check_video_id_match.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def build_graph(all_readers,
                input_reader,
                input_data_pattern,
                all_eval_data_patterns,
                batch_size=256):

  original_video_id, original_input, unused_labels_batch, unused_num_frames = (
      get_input_evaluation_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))

  video_id_notequal_tensors = []
  model_input_tensor = None
  input_distance_tensors = []
  for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
    video_id, model_input_raw, labels_batch, unused_num_frames = (
        get_input_evaluation_tensors(
            reader,
            data_pattern,
            batch_size=batch_size))
    video_id_notequal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
    if model_input_tensor is None:
      model_input_tensor = model_input_raw
    input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))

  video_id_mismatch_tensor = tf.stack(video_id_notequal_tensors)
  input_distance_tensor = tf.stack(input_distance_tensors)
  actual_batch_size = tf.shape(original_video_id)[0]

  tf.add_to_collection("video_id_mismatch", video_id_mismatch_tensor)
  tf.add_to_collection("input_distance", input_distance_tensor)
  tf.add_to_collection("actual_batch_size", actual_batch_size)
check_video_id.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build_graph(all_readers,
                input_reader,
                input_data_pattern,
                all_eval_data_patterns,
                batch_size=256):

  original_video_id, original_input, unused_labels_batch, unused_num_frames = (
      get_input_evaluation_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))

  video_id_equal_tensors = []
  model_input_tensor = None
  input_distance_tensors = []
  for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
    video_id, model_input_raw, labels_batch, unused_num_frames = (
        get_input_evaluation_tensors(
            reader,
            data_pattern,
            batch_size=batch_size))
    video_id_equal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
    if model_input_tensor is None:
      model_input_tensor = model_input_raw
    input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))

  video_id_equal_tensor = tf.stack(video_id_equal_tensors)
  input_distance_tensor = tf.stack(input_distance_tensors)

  tf.add_to_collection("video_id_equal", video_id_equal_tensor)
  tf.add_to_collection("input_distance", input_distance_tensor)
mmd_vae_eval.py 文件源码 项目:MMD-Variational-Autoencoder 作者: ShengjiaZhao 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compute_kernel(x, y):
    x_size = tf.shape(x)[0]
    y_size = tf.shape(y)[0]
    dim = tf.shape(x)[1]
    tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
    tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
    return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
TensorFlow_Test.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testSquare(self):
    with self.test_session():
      x = tf.square([2, 3])
      self.assertAllEqual(x.eval(), [4, 9])
multiclass_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def kernel_pred(x_data, prediction_grid):
    A = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    B = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    square_distance = tf.add(tf.subtract(A, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))),
                             tf.transpose(B))
    return tf.exp(tf.multiply(gamma, tf.abs(square_distance)))
rnn_ops.py 文件源码 项目:skiprnn-2017-telecombcn 作者: imatge-upc 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def layer_norm(x, axes=1, initial_bias_value=0.0, epsilon=1e-3, name="var"):
    """
    Apply layer normalization to x
    Args:
        x: input variable.
        initial_bias_value: initial value for the LN bias.
        epsilon: small constant value to avoid division by zero.
        scope: scope or name for the LN op.
    Returns:
        LN(x) with same shape as x
    """
    if not isinstance(axes, list):
        axes = [axes]

    scope = tf.get_variable_scope()
    with tf.variable_scope(scope):
        with tf.variable_scope(name):
            mean = tf.reduce_mean(x, axes, keep_dims=True)
            variance = tf.sqrt(tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True))

            with tf.device('/cpu:0'):
                gain = tf.get_variable('gain', x.get_shape().as_list()[1:],
                                       initializer=tf.constant_initializer(1.0))
                bias = tf.get_variable('bias', x.get_shape().as_list()[1:],
                                       initializer=tf.constant_initializer(initial_bias_value))

            return gain * (x - mean) / (variance + epsilon) + bias
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def preproc(self, X):
        return np.concatenate([np.ones([X.shape[0], 1]), X, np.square(X)/2.0], axis=1)
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def gauss_prob(mu, logstd, x):
    std = tf.exp(logstd)
    var = tf.square(std)
    gp = tf.exp(-(x - mu)/(2*var)) / ((2*np.pi)**.5 * std)
    return  tf.reduce_prod(gp, [1])


问题


面经


文章

微信
公众号

扫码关注公众号