python类mul()的实例源码

model_func.py 文件源码 项目:traffic_video_analysis 作者: polltooh 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, wd = 0.0):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a xavier initialization.
    A weight decay is added only if one is specified.

    #Args:
            name: name of the variable
            shape: list of ints
            wd: add L2Loss weight decay multiplied by this float. If None, weight
                    decay is not added for this Variable.

    Returns:
            Variable Tensor
    """
    var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
    # print("change var")
    # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)
    if wd != 0.0:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
resdeconv_model.py 文件源码 项目:traffic_video_analysis 作者: polltooh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(infer, count_diff_infer, label):
    l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(infer - label), [1,2,3]), name = 'l2_loss')
    #l2_loss = mf.huber_loss(tf.reduce_sum(infer, [1,2,3]), tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'density_loss')

    huber_epsilon = 5.0
    c_lambda = 0.1
    count_infer = tf.add(tf.squeeze(count_diff_infer), tf.reduce_sum(infer, [1,2,3]), name = "count_infer")
    count_loss = tf.mul(c_lambda, mf.huber_loss(count_infer, tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'huber_loss'),
                name = 'count_loss')
    #count_loss = tf.mul(c_lambda, tf.reduce_mean(tf.square(count_infer - tf.reduce_sum(label, [1,2,3]))),
                    #name = 'count_loss')

    tf.add_to_collection('losses', count_loss)
    tf.add_to_collection('losses', l2_loss)

    return tf.add_n(tf.get_collection('losses'), name = 'total_loss'), count_infer
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
losses.py 文件源码 项目:piecewisecrf 作者: Vaan5 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
general.py 文件源码 项目:Chinese-QA 作者: distantJing 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
anomaly.py 文件源码 项目:kboc 作者: vmonaco 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _create_network(self):
        network_weights = self._initialize_weights(**self.network_architecture)

        # Use recognition network to determine mean and
        # (log) variance of Gaussian distribution in latent
        # space
        self.z_mean, self.z_log_sigma_sq = \
            self._recognition_network(network_weights["weights_recog"],
                                      network_weights["biases_recog"])

        # Draw one sample z from Gaussian distribution
        n_z = self.network_architecture["n_z"]
        eps = tf.random_normal((self.batch_size, n_z), 0, 1,
                               dtype=tf.float32, seed=np.random.randint(0, 1e9))
        # z = mu + sigma*epsilon
        self.z = tf.add(self.z_mean,
                        tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        # Use generator to determine mean of
        # Bernoulli distribution of reconstructed input
        self.x_reconstr_mean = \
            self._generator_network(network_weights["weights_gener"],
                                    network_weights["biases_gener"])
nnlib.py 文件源码 项目:rec-attend-public 作者: renmengye 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def weight_variable(shape,
                    initializer=None,
                    init_val=None,
                    wd=None,
                    name=None,
                    trainable=True):
  """Initialize weights.
  Args:
    shape: shape of the weights, list of int
    wd: weight decay
  """
  log = logger.get()
  if initializer is None:
    initializer = tf.truncated_normal_initializer(stddev=0.01)
  if init_val is None:
    var = tf.Variable(initializer(shape), name=name, trainable=trainable)
  else:
    var = tf.Variable(init_val, name=name, trainable=trainable)
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
hermitian.py 文件源码 项目:factorix 作者: gbouchar 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sparse_hermitian_product(emb, tuples):
    """
    Compute the Hermitian inner product between selected complex embeddings
    This corresponds to the usual dot product applied on the conjugate of the first vector: <conj(x), y>
    where conj is the complex conjugate (obtained by inverting the imaginary part)
    We consider that the embedding dimension is twice the rank, where the first part is in embeddings[:,:rk] and
    the imaginary part is in embeddings[:,rk:].
    It computes
     S[i] = <conj(E[I[i,1]], E[I[i,2]]>
    Usage:
    S = sparse_hermitian_product(E, I):
    :param emb: embedding matrix of size [n_emb, 2 * r] containing float numbers where r is the complex rank
    :param tuples: tuple matrix of size [n_t, 2] containing integers that correspond to the indices of the embeddings
    :return: a pair containing the real and imaginary parts of the Hermitian dot products
    """
    rk = emb.get_shape()[1].value // 2
    emb_re = emb[:, :rk]
    emb_im = emb[:, rk:]
    emb_sel_a_re = tf.gather(emb_re, tuples[:, 0])
    emb_sel_a_im = tf.gather(emb_im, tuples[:, 0])
    emb_sel_b_re = tf.gather(emb_re, tuples[:, 1])
    emb_sel_b_im = tf.gather(emb_im, tuples[:, 1])
    pred_re = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_re) + tf.mul(emb_sel_a_im, emb_sel_b_im), 1)
    pred_im = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_im) - tf.mul(emb_sel_a_im, emb_sel_b_re), 1)
    return pred_re, pred_im
cdq.py 文件源码 项目:aaai17-cdq 作者: caoyue10 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss_functions(self):
        with tf.device(self.device):
            ### Loss Function
            ### O = L + \lambda (Q^x + Q^y)
            ### L = sum_{ij} (log (1 + exp(alpha * <u_i,v_j>)) - alpha * s_ij * <u_i, v_j>)
            ### Q^x = || u - C * b_x ||
            ### Q^y = || v - C * b_y ||
            ### InnerProduct Value \in [-15, 15]
            InnerProduct = tf.clip_by_value(tf.mul(self.alpha, tf.matmul(self.img_last_layer, tf.transpose(self.txt_last_layer))), -1.5e1, 1.5e1)
            Sim = tf.clip_by_value(tf.matmul(self.img_label, tf.transpose(self.txt_label)), 0.0, 1.0)
            t_ones = tf.ones([tf.shape(self.img_last_layer)[0], tf.shape(self.txt_last_layer)[0]])

            self.cross_entropy_loss = tf.reduce_mean(tf.sub(tf.log(tf.add(t_ones, tf.exp(InnerProduct))), tf.mul(Sim, InnerProduct)))

            self.cq_loss_img = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(self.img_last_layer, tf.matmul(self.b_img, self.C))), 1))
            self.cq_loss_txt = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(self.txt_last_layer, tf.matmul(self.b_txt, self.C))), 1))
            self.q_lambda = tf.Variable(self.cq_lambda, name='lambda')
            self.cq_loss = tf.mul(self.q_lambda, tf.add(self.cq_loss_img, self.cq_loss_txt))
            self.total_loss = tf.add(self.cross_entropy_loss, self.cq_loss)
face_point_with_decay.py 文件源码 项目:Face_Point 作者: EllenSimith 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
tf.py 文件源码 项目:keras-mdn 作者: yanji84 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_mixture_coef(output, KMIX=24, OUTPUTDIM=1):
  out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX*OUTPUTDIM], name="mixparam")
  splits = tf.split(1, 2 + OUTPUTDIM, output)
  out_pi = splits[0]
  out_sigma = splits[1]
  out_mu = tf.pack(splits[2:], axis=2)
  out_mu = tf.transpose(out_mu, [1,0,2])
  # use softmax to normalize pi into prob distribution
  max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
  out_pi = tf.sub(out_pi, max_pi)
  out_pi = tf.exp(out_pi)
  normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))
  out_pi = tf.mul(normalize_pi, out_pi)
  # use exponential to make sure sigma is positive
  out_sigma = tf.exp(out_sigma)
  return out_pi, out_sigma, out_mu
cifar10.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
layers.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def leaky_relu(x, alpha=0.01, name='leaky_relu', outputs_collections=None, **unused):
    """
    Computes leaky relu

    Args:
        x: a `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, int16`, or `int8`.
        aplha: the conatant fro scalling the activation
        name: a optional scope/name of the layer
        outputs_collections: The collections to which the outputs are added.

    Returns:
        A `Tensor` representing the results of the activation operation.
    """
    _check_unused(unused, name)
    with tf.name_scope(name):
        try:
            output = tf.nn.relu(x) + tf.mul(alpha, (x - tf.abs(x))) * 0.5
        except Exception:
            output = tf.nn.relu(x) + tf.multiply(alpha, (x - tf.abs(x))) * 0.5
        return _collect_named_outputs(outputs_collections, name, output)
model_cifar.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def loss(logits, labels,n_class, scope='loss'):
  with tf.variable_scope(scope):
    # entropy loss
    targets = one_hot_embedding(labels, n_class)
    entropy_loss = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(logits, targets),
      name='entropy_loss')
    tf.add_to_collection('losses', entropy_loss)
    # weight l2 decay loss
    weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
    weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
      name='weight_decay_loss')
    tf.add_to_collection('losses', weight_decay_loss)
  for var in tf.get_collection('losses'):
    tf.scalar_summary('losses/' + var.op.name, var)
  # total loss
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
model_cifar_contract.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def block(x, n_in, n_out, subsample,  phase_train, scope='res_block'):
  with tf.variable_scope(scope):
    if subsample:
      y = conv2d(x, n_in, n_out, 3, 2, 'SAME', False,phase_train, scope='conv_1')
    else:
      y = conv2d(x, n_in, n_out, 3, 1, 'SAME', False,phase_train, scope='conv_1')

    y = \
      batch_norm(y, n_out, phase_train, scope='bn_1')

    y = tf.mul(tf.sign(y),tf.sqrt(tf.abs(y)+1e-5) + 0.1)

    y = conv2d(y, n_out, n_out, 3, 1, 'SAME', False, phase_train, scope='conv_2')
    y = batch_norm(y, n_out, phase_train, scope='bn_2')

    y = tf.mul(tf.sign(y), tf.sqrt(tf.abs(y)+1e-5) + 0.1)
    return y
model_cifar_contract.py 文件源码 项目:deep_separation_contraction 作者: edouardoyallon 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels,n_class, scope='loss'):
  with tf.variable_scope(scope):
    # entropy loss
    targets = one_hot_embedding(labels, n_class)
    entropy_loss = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(logits, targets),
      name='entropy_loss')
    tf.add_to_collection('losses', entropy_loss)
    # weight l2 decay loss
    weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
    weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
      name='weight_decay_loss')
    tf.add_to_collection('losses', weight_decay_loss)
  for var in tf.get_collection('losses'):
    tf.scalar_summary('losses/' + var.op.name, var)
  # total loss
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
facenet.py 文件源码 项目:real_time_face_recognition 作者: shanren7 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def lppool(inpOp, pnorm, kH, kW, dH, dW, padding):
  global pool_counter
  global parameters
  name = 'pool' + str(pool_counter)
  pool_counter += 1

  with tf.name_scope('lppool'):
    if pnorm == 2:
      pwr = tf.square(inpOp)
    else:
      pwr = tf.pow(inpOp, pnorm)

    subsamp = tf.nn.avg_pool(pwr,
                          ksize=[1, kH, kW, 1],
                          strides=[1, dH, dW, 1],
                          padding=padding,
                          name=name)
    subsamp_sum = tf.mul(subsamp, kH*kW)

    if pnorm == 2:
      out = tf.sqrt(subsamp_sum)
    else:
      out = tf.pow(subsamp_sum, 1/pnorm)

  return out


问题


面经


文章

微信
公众号

扫码关注公众号