python类squared_difference()的实例源码

human_pose_nn.py 文件源码 项目:gait-recognition 作者: marian-margeta 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def euclidean_distance(self):
        x = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 1), 1)
        y = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 2), 1)

        x = tf.cast(x, tf.float32)
        y = tf.cast(y, tf.float32)

        dy = tf.squeeze(self.desired_points[:, 0, :])
        dx = tf.squeeze(self.desired_points[:, 1, :])

        sx = tf.squared_difference(x, dx)
        sy = tf.squared_difference(y, dy)

        l2_dist = tf.sqrt(sx + sy)

        return l2_dist
bmf.py 文件源码 项目:DeepFM 作者: dwt0317 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
main.py 文件源码 项目:unreal-implementation 作者: 404akhan 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _build_vr_network(self):
        self.vr_states = tf.placeholder(shape=[None, 80, 80, 4], dtype=tf.float32)
        self.vr_value_targets = tf.placeholder(shape=[None], dtype=tf.float32)

        with tf.variable_scope("shared", reuse=True):
            conv2 = self.build_shared_network(self.vr_states)

        fc1 = tf.contrib.layers.fully_connected(
            inputs=tf.contrib.layers.flatten(conv2),
            num_outputs=256,
            scope="fc1",
            reuse=True)

        self.vr_value = tf.contrib.layers.fully_connected(
            inputs=fc1,
            num_outputs=1,
            activation_fn=None,
            scope='logits_value',
            reuse=True)

        self.vr_value = tf.squeeze(self.vr_value, squeeze_dims=[1])

        self.vr_losses = tf.squared_difference(self.vr_value, self.vr_value_targets)
        self.vr_loss = tf.reduce_sum(self.vr_losses)
        self.vr_loss = self.pc_vr_lambda * self.vr_loss
a3c.py 文件源码 项目:Safe-RL-Benchmark 作者: befelix 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, policy, rate, train=True):
        self.rate = rate

        with tf.variable_scope('value_estimator'):
            self.X = tf.placeholder(policy.dtype,
                                    shape=policy.X.shape,
                                    name='X')
            self.V = tf.placeholder(policy.dtype,
                                    shape=[None],
                                    name='V')

            self.W = policy.init_weights((policy.layers[0], 1))

            self.V_est = tf.matmul(self.X, self.W)

            self.losses = tf.squared_difference(self.V_est, self.V)
            self.loss = tf.reduce_sum(self.losses, name='loss')

            if train:
                self.opt = tf.train.RMSPropOptimizer(rate, 0.99, 0.0, 1e-6)
                self.grads_and_vars = self.opt.compute_gradients(self.loss)
                self.grads_and_vars = [(g, v) for g, v in self.grads_and_vars
                                       if g is not None]
                self.update = self.opt.apply_gradients(self.grads_and_vars)
bmf.py 文件源码 项目:OpenLearning4DeepRecsys 作者: Leavingseason 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
train.py 文件源码 项目:CIKM2017 作者: heliarmk 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def combind_loss(logits, labels, reg_preds, reg_labels):
    alpha = 1
    beta = 0.025
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name='cross_entropy_per_example')
    cem = tf.reduce_mean(cross_entropy, name='cross_entropy')
    w_cem = cem * alpha
    tf.add_to_collection("losses", w_cem)
    reg_labels = tf.reshape(reg_labels, (-1, 1))
    # rmse = tf.sqrt(tf.losses.mean_squared_error(reg_labels, reg_preds, loss_collection=None))
    rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(reg_labels, reg_preds)))
    w_rmse = rmse * beta
    tf.add_to_collection("losses", w_rmse)

    return tf.add_n(tf.get_collection("losses"), name='combinded_loss'), cem, rmse
core_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def setUp(self):
    super(FloatBinaryOpsTest, self).setUp()

    self.ops = [
        ('igamma', None, tf.igamma, core.igamma),
        ('igammac', None, tf.igammac, core.igammac),
        ('zeta', None, tf.zeta, core.zeta),
        ('polygamma', None, tf.polygamma, core.polygamma),
        ('maximum', None, tf.maximum, core.maximum),
        ('minimum', None, tf.minimum, core.minimum),
        ('squared_difference', None, tf.squared_difference,
         core.squared_difference),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    test_lt = core.LabeledTensor(
        tf.cast(self.original_lt, tf.float32) / total_size,
        self.original_lt.axes)
    self.test_lt_1 = test_lt
    self.test_lt_2 = 1.0 - test_lt
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
test_boundary_optimization.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def create_cost_soft_min_distance(self, c, s):
  """Creates a soft-min distance of the centers to the points"""
  c_shape = c.get_shape().as_list();        
  s_shape = s.get_shape().as_list();

  #expand matrices
  cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);    
  ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
  ss = tf.transpose(ss, perm = [0,2,1]);
  cc = tf.tile(cc, [1, 1, s_shape[0]]);
  ss = tf.tile(ss, [c_shape[0], 1, 1]);

  #pairwise distances
  dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));
  dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

  #softmin
  return tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 0);
machine_vision_b.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def create_cost_soft_min_distance(c, s, k = 2.0):
  """Creates a soft-min distance of the centers to the points"""
  c_shape = c.get_shape().as_list();        
  s_shape = s.get_shape().as_list();

  #expand matrices
  cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);    
  ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
  ss = tf.transpose(ss, perm = [2,1,0]);
  cc = tf.tile(cc, [1, 1, s_shape[0]]);
  ss = tf.tile(ss, [c_shape[0], 1, 1]);
  #cc = tf.transpose(cc, perm = [2,1,0]);
  #cc = tf.tile(cc, [s_shape[0], 1, 1]);
  #ss = tf.tile(ss, [1, 1, c_shape[0]]); 

  #pairwise distances
  dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));

  #softmin
  softmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-k,"float32"), dist2)), dist2),reduction_indices = 1);

  return tf.reduce_mean(softmin);
machine_vision_c.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_pair_wise_distances(x, y):
  x_shape = x.get_shape().as_list();        
  y_shape = y.get_shape().as_list();

  #expand matrices
  xx = tf.reshape(x, [x_shape[0], x_shape[1], 1]);    
  yy = tf.reshape(y, [y_shape[0], y_shape[1], 1]);
  yy = tf.transpose(yy, perm = [2,1,0]);
  xx = tf.tile(xx, [1, 1, y_shape[0]]);
  yy = tf.tile(yy, [x_shape[0], 1, 1]);
  #cc = tf.transpose(cc, perm = [2,1,0]);
  #cc = tf.tile(cc, [s_shape[0], 1, 1]);
  #ss = tf.tile(ss, [1, 1, c_shape[0]]); 

  #pairwise distances
  dist = tf.sqrt(tf.reduce_sum(tf.squared_difference(xx,yy), reduction_indices = 1));
  return dist;
machine_vision_c.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
  d = create_pair_wise_distances(x, y);
  a = create_pair_wise_dots(nx, ny);
  a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
  return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));


#def create_cost_spacing(c, length, normalized = True):
#  c_shape = c.get_shape().as_list();
#  c1 = tf.slice(c, [1,0], [-1,-1]);
#  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
#  d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
#  if normalized:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
#  else:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
machine_vision_d.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
  d = create_pair_wise_distances(x, y);
  a = create_pair_wise_dots(nx, ny);
  a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
  return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));


#def create_cost_spacing(c, length, normalized = True):
#  c_shape = c.get_shape().as_list();
#  c1 = tf.slice(c, [1,0], [-1,-1]);
#  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
#  d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
#  if normalized:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
#  else:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
machine_vision_2.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def create_cost_soft_min_distance(self, c, s):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
machine_vision_2.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_cost_soft_min_distance_valid(self, c, s, v):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    mm = tf.reduce_max(v); #hack for batch size = 1
    ss = tf.slice(s, [0,0,0], [-1,mm,-1]);
    ss = tf.reshape(ss, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
machine_vision_3.py 文件源码 项目:CElegansBehaviour 作者: ChristophKirst 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def create_cost_soft_min_distance(self, c, s):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
cost.py 文件源码 项目:deepsleepnet 作者: akaraspt 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse
cost.py 文件源码 项目:tensorlayer-chinese 作者: shorxp 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-square-error of two distributions.

    Parameters
    ----------
    output : 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, w, h] or [batch_size, w, h, c].
    target : 2D, 3D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 3:   # [batch_size, w, h]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2]))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(preturns, lambda_preturn, labels):
  with tf.variable_scope('loss'):
    preturns_loss = tf.reduce_mean(
        tf.squared_difference(preturns, tf.expand_dims(labels, 1)))

    lambda_preturn_loss = tf.reduce_mean(
        tf.squared_difference(lambda_preturn, labels))

    consistency_loss = tf.reduce_mean(
        tf.squared_difference(
            preturns, tf.stop_gradient(tf.expand_dims(lambda_preturn, 1))))

    l2_loss = tf.get_collection('losses')

    total_loss = preturns_loss + lambda_preturn_loss + consistency_loss
    consistency_loss += l2_loss
    return total_loss, consistency_loss
predictron.py 文件源码 项目:predictron 作者: brendanator 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def loss(preturns, lambda_preturn, labels):
  with tf.variable_scope('loss'):
    preturns_loss = tf.reduce_mean(
        tf.squared_difference(preturns, tf.expand_dims(labels, 1)))

    lambda_preturn_loss = tf.reduce_mean(
        tf.squared_difference(lambda_preturn, labels))

    consistency_loss = tf.reduce_mean(
        tf.squared_difference(
            preturns, tf.stop_gradient(tf.expand_dims(lambda_preturn, 1))))

    l2_loss = tf.get_collection('losses')

    total_loss = preturns_loss + lambda_preturn_loss + consistency_loss
    consistency_loss += l2_loss
    return total_loss, consistency_loss
cost.py 文件源码 项目:dcgan 作者: zsdonghao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def testRandomFlipBoxes(self):
    boxes = self.createTestBoxes()

    # Case where the boxes are flipped.
    boxes_expected1 = self.expectedBoxesAfterMirroring()

    # Case where the boxes are not flipped.
    boxes_expected2 = boxes

    # After elementwise multiplication, the result should be all-zero since one
    # of them is all-zero.
    boxes_diff = tf.multiply(
        tf.squared_difference(boxes, boxes_expected1),
        tf.squared_difference(boxes, boxes_expected2))
    expected_result = tf.zeros_like(boxes_diff)

    with self.test_session() as sess:
      (boxes_diff, expected_result) = sess.run([boxes_diff, expected_result])
      self.assertAllEqual(boxes_diff, expected_result)
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def target_cost(self, inputs, targets, function=tf.squared_difference, **kwargs):
        """
        For mapping problems, r.m.s. difference between hidden values and targets.
        i.e. Cost for given input batch of samples, under current params.
        """
        hidden = self.get_hidden_values(inputs, **kwargs)
        return tf.reduce_mean(function(hidden, targets))
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def rms_loss(self, inputs, **kwargs):
        """
        Root-mean-squared difference between <inputs> and encoded-decoded output.
        """
        loss = tf.squared_difference(inputs, self.recode(inputs, **kwargs))
        return tf.reduce_mean(
                   tf.reduce_mean(loss, axis=range(1, self.input_dims)) ** .5)
LocaliseNet.py 文件源码 项目:LocaliseNet 作者: najeeb97khan 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _create_squared_loss(self, prev_layer, layer_name):

        with tf.variable_scope(layer_name) as scope:
            input_tensor, class_tensor, box_tensor = self.placeholder
            loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(box_tensor,prev_layer),reduction_indices=[1]))
            return loss
localisation.py 文件源码 项目:LocaliseNet 作者: najeeb97khan 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _create_squared_loss(self, prev_layer, layer_name):

        with tf.variable_scope(layer_name) as scope:
            input_tensor, class_tensor, box_tensor = self.placeholder
            loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(box_tensor,prev_layer),reduction_indices=[1]))
            return loss
human_pose_nn.py 文件源码 项目:gait-recognition 作者: marian-margeta 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _loss_mse(self):
        sq = tf.squared_difference(self.sigm_network, self.desired_heatmap)
        loss = self._adjust_loss(sq)

        return loss
vae.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gaussian_log_density(x, mu, sigma2):
        c = - 0.5 * math.log(2 * math.pi)
        density = c - tf.log(sigma2) / 2 - tf.squared_difference(x, mu) / (2 * sigma2)
        # return -tf.reduce_mean(tf.reduce_sum(density, axis=-1), axis=(1, 2))
        return density
adgm.py 文件源码 项目:TensorFlow-ADGM 作者: dancsalo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def gaussian_log_density(x, mu, sigma2):
        c = - 0.5 * math.log(2 * math.pi)
        density = c - tf.log(sigma2) / 2 - tf.squared_difference(x, mu) / (2 * sigma2)
        return -tf.reduce_mean(tf.reduce_sum(density, axis=-1), axis=(1, 2))
evals.py 文件源码 项目:urnn 作者: Rand0mUsername 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def train_urnn_for_timestep_idx(self, idx):
        print('Initializing and training URNNs for one timestep...')

        # CM

        tf.reset_default_graph()
        self.cm_urnn=TFRNN(
            name="cm_urnn",
            num_in=1,
            num_hidden=128,
            num_out=10,
            num_target=1,
            single_output=False,
            rnn_cell=URNNCell,
            activation_hidden=None, # modReLU
            activation_out=tf.identity,
            optimizer=tf.train.RMSPropOptimizer(learning_rate=glob_learning_rate, decay=glob_decay),
            loss_function=tf.nn.sparse_softmax_cross_entropy_with_logits)
        self.train_network(self.cm_urnn, self.cm_data[idx], 
                           self.cm_batch_size, self.cm_epochs)

        # AP

        tf.reset_default_graph()
        self.ap_urnn=TFRNN(
            name="ap_urnn",
            num_in=2,
            num_hidden=512,
            num_out=1,
            num_target=1,
            single_output=True,
            rnn_cell=URNNCell,
            activation_hidden=None, # modReLU
            activation_out=tf.identity,
            optimizer=tf.train.RMSPropOptimizer(learning_rate=glob_learning_rate, decay=glob_decay),
            loss_function=tf.squared_difference)
        self.train_network(self.ap_urnn, self.ap_data[idx], 
                           self.ap_batch_size, self.ap_epochs)

        print('Init and training URNNs for one timestep done.')
TFANN.py 文件源码 项目:pythonml 作者: nicholastoddsmith 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _GetLossFn(name):
    '''
    Helper function for selecting loss function
    name:   The name of the loss function
    return:     A handle for a loss function LF(YH, Y)
    '''
    return {'cos': lambda YH, Y : tf.losses.cosine_distance(Y, YH), 'hinge': lambda YH, Y : tf.losses.hinge_loss(Y, YH),
            'l1': lambda YH, Y : tf.losses.absolute_difference(Y, YH), 'l2': lambda YH, Y : tf.squared_difference(Y, YH),
            'log': lambda YH, Y : tf.losses.log_loss(Y, YH), 
            'sgce': lambda YH, Y : tf.nn.sigmoid_cross_entropy_with_logits(labels = Y, logits = YH), 
            'smce': lambda YH, Y : tf.nn.softmax_cross_entropy_with_logits(labels = Y, logits = YH)}.get(name)


问题


面经


文章

微信
公众号

扫码关注公众号