python类sign()的实例源码

very_tiny_yolo.py 文件源码 项目:YOLOv1_tensorflow 作者: Nielsyang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss_func_yolo(output, exp):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(exp[i][j+2]) * (tf.square(output[i][j] - exp[i][j]) + tf.square(output[i][j+1]-exp[i][j+1]) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+2])) - tf.sqrt(exp[i][j+2])) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+3])) - tf.sqrt(exp[i][j+3])))

      res += tf.sign(exp[i][j+2]) * (tf.square(output[i][j+4] - exp[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(exp[i][j])) * (tf.square(output[i][j+4] - exp[i][j+4]))

      res += COORD_W * tf.sign(exp[i][j+7]) * (tf.square(output[i][j+5] - exp[i][j+5]) + tf.square(output[i][j+6]-exp[i][j+6]) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+7])) - tf.sqrt(exp[i][j+7])) + 
                                               tf.square(tf.sqrt(tf.abs(output[i][j+8])) - tf.sqrt(exp[i][j+8])))

      res += tf.sign(exp[i][j+7]) * (tf.square(output[i][j+9] - exp[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(exp[i][j+5])) * (tf.square(output[i][j+9] - exp[i][j+9]))

      res += tf.sign(exp[i][j+7]) * (tf.square(output[i][j+10] - exp[i][j+10]) + tf.square(output[i][j+11] - exp[i][j+11]))

  return res
attacks.py 文件源码 项目:FeatureSqueezing 作者: uvasrg 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_gradient_sign_tf(x, predictions):
    """
    TensorFlow implementation of calculting signed gradient with respect to x.
    :param x: the input placeholder
    :param predictions: the model's output tensor
    :return: a tensor for the adversarial example
    """

    # Compute loss
    y = tf.to_float(tf.equal(predictions, tf.reduce_max(predictions, 1, keep_dims=True)))
    y = y / tf.reduce_sum(y, 1, keep_dims=True)
    loss = utils_tf.tf_model_loss(y, predictions, mean=False)

    # Define gradient of loss wrt input
    grad, = tf.gradients(loss, x)

    # Take sign of gradient
    signed_grad = tf.sign(grad)
    signed_grad = tf.stop_gradient(signed_grad)
    return signed_grad
diet.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q
HAMModel.py 文件源码 项目:DeeplearningForTextClassification 作者: zldeng 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def getSequenceRealLength(sequences):
    '''
    ??sequences?????
    input:[a_size,b_size,c_size],?????????????b_size??????0??c_size?tensor????
    return?????b_size????????
    '''
    abs_sequneces = tf.abs(sequences)

    #??????max is 0
    abs_max_seq = tf.reduce_max(abs_sequneces,reduction_indices = 2)

    max_seq_sign = tf.sign(abs_max_seq)

    #????0????????????
    real_len = tf.reduce_sum(max_seq_sign,reduction_indices = 1)

    return tf.cast(real_len,tf.int32)
model.py 文件源码 项目:incremental-sequence-learning 作者: edwin-de-jong 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_class_loss( self, args, z_classvars, z_classpred, targetdata_classvars ):
    self.mask = tf.sign( tf.abs( tf.reduce_max( targetdata_classvars, reduction_indices = 1 ) ) )

    self.result4 = tf.zeros( 1, dtype = tf.float32, name = None )
    if args.nrClassOutputVars > 0 and args.classweightfactor > 0:
      self.crossentropy = tf.nn.softmax_cross_entropy_with_logits( z_classvars, targetdata_classvars )
      self.result4 = args.classweightfactor *  self.crossentropy 
      self.result4 = tf.multiply( self.mask, self.result4 )
      self.targetdata_classvars = targetdata_classvars

    self.result = self.result4

    self.result_before_mask = self.result
    self.result *= self.mask #checked EdJ Sept 2: correctly only measures loss up to last point of actual sequence.
    self.lossvector = self.result

    self.lossnrpoints = tf.reduce_sum( self.mask )

    classloss = tf.reduce_sum( self.result  ) / self.lossnrpoints
    return classloss
ternary.py 文件源码 项目:ternarynet 作者: czhu95 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def p_ternarize(x, p):

    x = tf.tanh(x)
    shape = x.get_shape()

    thre = tf.get_variable('T', trainable=False, collections=[tf.GraphKeys.VARIABLES, 'thresholds'],
            initializer=0.05)
    flat_x = tf.reshape(x, [-1])
    k = int(flat_x.get_shape().dims[0].value * (1 - p))
    topK, _ = tf.nn.top_k(tf.abs(flat_x), k)
    update_thre = thre.assign(topK[-1])
    tf.add_to_collection('update_thre_op', update_thre)

    mask = tf.zeros(shape)
    mask = tf.select((x > thre) | (x < -thre), tf.ones(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask)

    tf.histogram_summary(w.name, w)
    return w
ternary.py 文件源码 项目:ternarynet 作者: czhu95 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tw_ternarize(x, thre):

    shape = x.get_shape()

    thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thre)

    w_p = tf.get_variable('Wp', collections=[tf.GraphKeys.VARIABLES, 'positives'], initializer=1.0)
    w_n = tf.get_variable('Wn', collections=[tf.GraphKeys.VARIABLES, 'negatives'], initializer=1.0)

    tf.scalar_summary(w_p.name, w_p)
    tf.scalar_summary(w_n.name, w_n)

    mask = tf.ones(shape)
    mask_p = tf.select(x > thre_x, tf.ones(shape) * w_p, mask)
    mask_np = tf.select(x < -thre_x, tf.ones(shape) * w_n, mask_p)
    mask_z = tf.select((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask_z)

    w = w * mask_np

    tf.histogram_summary(w.name, w)
    return w
ugrnn.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def add_training_ops(self):
        def apply_gradient_clipping(gradient):
            if gradient is not None:
                return tf.mul(tf.clip_by_value(tf.abs(grad), 0.1, 1.),
                              tf.sign(grad))
            else:
                return None

        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                           beta1=0.9, beta2=0.999,
                                           epsilon=1e-08,
                                           use_locking=False, name='Adam')

        loss_op = self.loss_op + config.weight_decay_factor * tf.add_n(
            [tf.nn.l2_loss(v) for v in tf.get_collection('weights_decay')])

        gvs = optimizer.compute_gradients(loss_op)

        if self._clip_gradients:
            gvs = [(apply_gradient_clipping(grad), var) for grad, var in gvs]

        train_op = optimizer.apply_gradients(gvs)

        return train_op
BGAN.py 文件源码 项目:BGAN 作者: htconquer 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(x64, x_tilde, z_x_log_sigma_sq1, z_x_meanx1, d_x, d_x_p, l_x, l_x_tilde,ss_ ):

    SSE_loss = tf.reduce_mean(tf.square(x64 - x_tilde))


    pair_loss=tf.reduce_mean(tf.square(tf.matmul(z_x_meanx1, tf.transpose(z_x_meanx1))- ss_)) +\
              tf.reduce_mean(tf.square(z_x_meanx1 - tf.sign(z_x_meanx1)))

    KL_loss = tf.reduce_sum(-0.5 * tf.reduce_sum(1 + tf.clip_by_value(z_x_log_sigma_sq1, -10.0, 10.0)
                                                 - tf.square(tf.clip_by_value(z_x_meanx1, -10.0, 10.0))
                                                 - tf.exp(tf.clip_by_value(z_x_log_sigma_sq1, -10.0, 10.0)),
                                                 1)) / 64/64/3

    D_loss = tf.reduce_mean(-1. * (tf.log(tf.clip_by_value(d_x, 1e-5, 1.0)) +
                                   tf.log(tf.clip_by_value(1.0 - d_x_p, 1e-5, 1.0))))
    G_loss = tf.reduce_mean(-1. * (tf.log(tf.clip_by_value(d_x_p, 1e-5, 1.0))))
    LL_loss = tf.reduce_sum(tf.square(l_x - l_x_tilde)) / 64/64./3.
    return SSE_loss, KL_loss, D_loss, G_loss, LL_loss,pair_loss
util.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def sample_bernoulli(probs):
    return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
labels_rbm.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def sample_prob(self,probs):
        return tf.nn.relu(tf.sign(probs - tf.random_uniform(probs.get_shape())))
linear_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def inference_fn(W,b,x_data,y_target):
    prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
    return accuracy
layers.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def mu_law_decode_nonlinear(output, quantization_channels=256):
    '''
    Uncompress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function.
    '''
    with tf.name_scope('decode'):
        mu = quantization_channels - 1
        # Map values back to [-1, 1].
        # signal = 2 * (tf.to_float(output) / mu) - 1
        signal = output
        # Perform inverse of mu-law transformation.
        magnitude = (1 / mu) * ((1 + mu)**abs(signal) - 1)
        return tf.sign(signal) * magnitude
model_ops.py 文件源码 项目:recurrent-entity-networks 作者: jimfleming 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_sequence_length(sequence, scope=None):
    "Determine the length of a sequence that has been padded with zeros."
    with tf.variable_scope(scope, 'SequenceLength'):
        used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=[-1]))
        length = tf.cast(tf.reduce_sum(used, reduction_indices=[-1]), tf.int32)
        return length
networks.py 文件源码 项目:identifiera-sarkasm 作者: risnejunior 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def calc_seqlenth(input):
    # this code is copied from TFLearn retrieve seqlenth method. Credited to it's creator @aymericdamien
    with tf.name_scope('GetLength'):
        used = tf.sign(tf.reduce_max(tf.abs(input), reduction_indices=2))
        length = tf.reduce_sum(used, reduction_indices=1)
        length = tf.cast(length, tf.int32)
    return length
# This code is copied from TFLearn advanced_indexing_op() method. Credited to it's creator @aymericdamien
shrinkage.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def simple_soft_threshold(r_, lam_):
    "implement a soft threshold function y=sign(r)*max(0,abs(r)-lam)"
    lam_ = tf.maximum(lam_, 0)
    return tf.sign(r_) * tf.maximum(tf.abs(r_) - lam_, 0)
shrinkage.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def shrink_piecwise_linear(r,rvar,theta):
    """Implement the piecewise linear shrinkage function.
        With minor modifications and variance normalization.
        theta[...,0] : abscissa of first vertex, scaled by sqrt(rvar)
        theta[...,1] : abscissa of second vertex, scaled by sqrt(rvar)
        theta[...,2] : slope from origin to first vertex
        theta[''',3] : slope from first vertex to second vertex
        theta[...,4] : slope after second vertex
    """
    ab0 = theta[...,0]
    ab1 = theta[...,1]
    sl0 = theta[...,2]
    sl1 = theta[...,3]
    sl2 = theta[...,4]

    # scale each column by sqrt(rvar)
    scale_out = tf.sqrt(rvar)
    scale_in = 1/scale_out
    rs = tf.sign(r*scale_in)
    ra = tf.abs(r*scale_in)

    # split the piecewise linear function into regions
    rgn0 = tf.to_float( ra<ab0)
    rgn1 = tf.to_float( ra<ab1) - rgn0
    rgn2 = tf.to_float( ra>=ab1)
    xhat = scale_out * rs*(
            rgn0*sl0*ra +
            rgn1*(sl1*(ra - ab0) + sl0*ab0 ) +
            rgn2*(sl2*(ra - ab1) +  sl0*ab0 + sl1*(ab1-ab0) )
            )
    dxdr =  sl0*rgn0 + sl1*rgn1 + sl2*rgn2
    dxdr = tf.reduce_mean(dxdr,0)
    return (xhat,dxdr)
attack_random_noise.py 文件源码 项目:cleverhans 作者: tensorflow 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def main(_):
  eps = FLAGS.max_epsilon / 255.0
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]

  with tf.Graph().as_default():
    x_input = tf.placeholder(tf.float32, shape=batch_shape)
    noisy_images = x_input + eps * tf.sign(tf.random_normal(batch_shape))
    x_output = tf.clip_by_value(noisy_images, 0.0, 1.0)

    with tf.Session(FLAGS.master) as sess:
      for filenames, images in load_images(FLAGS.input_dir, batch_shape):
        out_images = sess.run(x_output, feed_dict={x_input: images})
        save_images(out_images, filenames, FLAGS.output_dir)
rbm.py 文件源码 项目:rbm-ae-tf 作者: Cospel 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def sample_prob(self, probs):
        return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))


问题


面经


文章

微信
公众号

扫码关注公众号