python类sign()的实例源码

layers.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def mu_law_encode_nonlinear(audio, quantization_channels=256):
    '''
    Compress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function as opposed to 
          quantization.
    '''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.multiply(tf.sign(audio), magnitude, name='mulaw')
        # Quantize signal to the specified number of levels.
        # return tf.to_int32((signal + 1) / 2 * mu + 0.5)
        return signal
querysum_model.py 文件源码 项目:querysum 作者: helmertz 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _score(self, prev_decoder_state, prev_embedding):
        # Returns scores in a tensor of shape [batch_size, input_sequence_length]

        if self.mode == 'decode':
            query_part = self.query_attention_partial_score_placeholder
            encoder_part = self.encoder_state_attention_partial_scores_placeholder
        else:
            query_part = self.query_attention_partial_score
            encoder_part = self.encoder_state_attention_partial_scores

        embedding_part = tf.matmul(prev_embedding, self.attention_w_e)

        output = tf.matmul(prev_decoder_state,
                           self.attention_w) + embedding_part + query_part + encoder_part + self.attention_b
        output = tf.tanh(output)
        output = tf.reduce_sum(self.attention_v * output, axis=2)
        output = tf.transpose(output, [1, 0])

        # Handle input document padding by giving a large penalty, eliminating it from the weighted average
        padding_penalty = -1e20 * tf.to_float(1 - tf.sign(self.documents_placeholder))
        masked = output + padding_penalty

        return masked
shrinkage.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def shrink_soft_threshold(r,rvar,theta):
    """
    soft threshold function
        y=sign(x)*max(0,abs(x)-theta[0]*sqrt(rvar) )*scaling
    where scaling is theta[1] (default=1)
    in other words, if theta is len(1), then the standard
    """
    if len(theta.get_shape())>0 and theta.get_shape() != (1,):
        lam = theta[0] * tf.sqrt(rvar)
        scale=theta[1]
    else:
        lam  = theta * tf.sqrt(rvar)
        scale = None
    lam = tf.maximum(lam,0)
    arml = tf.abs(r) - lam
    xhat = tf.sign(r) * tf.maximum(arml,0)
    dxdr = tf.reduce_mean(tf.to_float(arml>0),0)
    if scale is not None:
        xhat = xhat*scale
        dxdr = dxdr*scale
    return (xhat,dxdr)
shrinkage.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def shrink_spline(r,rvar,theta):
    """ Spline-based shrinkage function
    """
    scale = theta[0]*tf.sqrt(rvar)
    rs = tf.sign(r)
    ar = tf.abs(r/scale)
    ar2 = tf.square(ar)
    ar3 = ar*ar2
    reg1 = tf.to_float(ar<1)
    reg2 = tf.to_float(ar<2)-reg1
    ar_m2 = 2-ar
    ar_m2_p2 = tf.square(ar_m2)
    ar_m2_p3 = ar_m2*ar_m2_p2
    beta3 = ( (2./3 - ar2  + .5*ar3)*reg1 + (1./6*(ar_m2_p3))*reg2 )
    xhat = r*(theta[1] + theta[2]*beta3)
    return (xhat,auto_gradients(xhat,r))
attacks.py 文件源码 项目:cleverhans 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def attack_single_step(self, x, eta, y):
        """
        Given the original image and the perturbation computed so far, computes
        a new perturbation.

        :param x: A tensor with the original input.
        :param eta: A tensor the same shape as x that holds the perturbation.
        :param y: A tensor with the target labels or ground-truth labels.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import model_loss, clip_eta

        adv_x = x + eta
        preds = self.model.get_probs(adv_x)
        loss = model_loss(y, preds)
        if self.targeted:
            loss = -loss
        grad, = tf.gradients(loss, adv_x)
        scaled_signed_grad = self.eps_iter * tf.sign(grad)
        adv_x = adv_x + scaled_signed_grad
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
        eta = adv_x - x
        eta = clip_eta(eta, self.ord, self.eps)
        return x, eta
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def mu_law(x, mu=255, int8=False):
  """A TF implementation of Mu-Law encoding.

  Args:
    x: The audio samples to encode.
    mu: The Mu to use in our Mu-Law.
    int8: Use int8 encoding.

  Returns:
    out: The Mu-Law encoded int8 data.
  """
  out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
  out = tf.floor(out * 128)
  if int8:
    out = tf.cast(out, tf.int8)
  return out
model.py 文件源码 项目:NER-LSTM-CRF 作者: liu-nlper 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_loss(self):
        """
        ??loss

        Return:
            loss: scalar
        """
        if not self._use_crf:
            labels = tf.reshape(
                tf.contrib.layers.one_hot_encoding(
                    tf.reshape(self.input_label_ph, [-1]), num_classes=self._nb_classes),
                shape=[-1, self._sequence_length, self._nb_classes])
            cross_entropy = -tf.reduce_sum(labels * tf.log(self.logits), axis=2)
            mask = tf.sign(tf.reduce_max(tf.abs(labels), axis=2))
            cross_entropy_masked = tf.reduce_sum(
                cross_entropy*mask, axis=1) / tf.cast(self.sequence_actual_length, tf.float32)
            return tf.reduce_mean(cross_entropy_masked)
        else:
            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
                self.logits, self.input_label_ph, self.sequence_actual_length)
            return tf.reduce_mean(-log_likelihood)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples
rnn.py 文件源码 项目:hyperchamber 作者: 255BITS 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_rnn(config, x, scope='rnn'):
    with tf.variable_scope(scope):
        memory=config['rnn_size']
        cell = rnn_cell.BasicLSTMCell(memory)
        state = cell.zero_state(batch_size=config['batch_size'], dtype=tf.float32)
        x, state = rnn.rnn(cell, [tf.cast(x,tf.float32)], initial_state=state, dtype=tf.float32)
        x = x[-1]
        #w = tf.get_variable('w', [hc.get('rnn_size'),4])
        #b = tf.get_variable('b', [4])
        #x = tf.nn.xw_plus_b(x, w, b)
        x=tf.sign(x)
        return x, state

# Each step of the graph we have:
# x is [BATCH_SIZE, 4] where the data is an one hot binary vector of the form:
# [start_token end_token a b]
#
# y is [BATCH_SIZE, 4] is a binary vector of the chance each character is correct
#
layers.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, n_features, lenscale=1.0, p=1, variational=False,
                 lenscale_posterior=None):
        """Create an instance of an arc cosine kernel layer."""
        # Setup random weights
        if variational:
            kern = RBFVariational(lenscale=lenscale,
                                  lenscale_posterior=lenscale_posterior)
        else:
            kern = RBF(lenscale=lenscale)
        super().__init__(n_features=n_features, kernel=kern)

        # Kernel order
        assert isinstance(p, int) and p >= 0
        if p == 0:
            self.pfunc = tf.sign
        elif p == 1:
            self.pfunc = lambda x: x
        else:
            self.pfunc = lambda x: tf.pow(x, p)
yellowfin.py 文件源码 项目:YellowFin 作者: JianGoForIt 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_cubic_root(self):
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    # assert_array = \
    #   [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]),
    #   tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])]
    # with tf.control_dependencies(assert_array):
    # EPS in the numerator to prevent momentum being exactly one in case of 0 gradient
    p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS)
    w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
    w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
    y = w - p / 3.0 / (w + EPS)
    x = y + 1
    return x
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
bingrad_common.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def stochastical_binarize_gradients(grads_and_vars, scalers):
  """Stochastically binarize gradients."""
  gradients, variables = zip(*grads_and_vars)
  binarized_gradients = []
  for gradient, scaler in zip(gradients, scalers):
    if gradient is None:
      binarized_gradients.append(None)
      continue
    if isinstance(gradient, tf.IndexedSlices):
      gradient_shape = gradient.dense_shape
    else:
      gradient_shape = gradient.get_shape()

    zeros = tf.zeros(gradient_shape)
    abs_gradient = tf.abs(gradient)
    sign_gradient = tf.sign( gradient )
    rnd_sample = tf.random_uniform(gradient_shape,0,scaler)
    where_cond = tf.less(rnd_sample, abs_gradient)
    binarized_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize,
                               lambda: gradient,
                               lambda: tf.where(where_cond, sign_gradient * scaler, zeros))

    binarized_gradients.append(binarized_gradient)
  return list(zip(binarized_gradients, variables))
test_ternary_encoder_decoder.py 文件源码 项目:terngrad 作者: wenwei202 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def ternary_encoder(input_data):
  """Encoding and compressing the signs """
  a = tf.sign(input_data) # -1, 0, 1
  a = tf.add(a,1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
  a = tf.reshape(a,[-1])
  pad_size = 4 - tf.mod(tf.size(a), 4)
  pad = tf.range(0.0, pad_size)
  a = tf.concat([a, pad], 0)
  a_split1, a_split2, a_split3, a_split4 = tf.split(a,4) # assume the size is dividable by 4

  # encode 4 grads into 1 Byte
  sum_1 = tf.add(a_split1, a_split2*4)
  sum_2 = tf.add(a_split3*16, a_split4*64)
  sum_all = tf.add(sum_1, sum_2)
  encoded = tf.cast(sum_all, tf.uint8)
  return encoded
Bidirectionnet_GMM_softmaxloss.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        tf.summary.scalar('loss/sparse_loss',self.sparse_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
Bidirectionnet_GMM_softmaxloss.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag)*tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
Bidirectionnet_GMM_better_topK.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
Bidirectionnet_GMM_better_topK.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)), K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d)*tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
Bidirectionnet_GMM_better_topK_9000feat.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
Bidirectionnet_GMM9000feat_softmaxloss.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/softmax_loss',self.softmaxloss)
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
Bidirectionnet_GMM9000feat_softmaxloss.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def top_K_loss_margin(self,sentence,image,K=50,margin=0.3):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * d *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
        image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
Bidirectionnet_GMM_sigmod9000feat.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def build_summary(self):
        tf.summary.scalar('loss/reg_loss', tf.add_n(self.reg_loss))
        tf.summary.scalar('loss/total_loss', self.total_loss)
        if self.is_skip:
            tf.summary.histogram('activation/image_fc2',self.image_fc2)
        if not self.is_TopKloss:
            tf.summary.histogram('data_similarity/imsim',tf.sign(tf.nn.relu(self.image_margin-self.im_similarity)))
            tf.summary.histogram('data_similarity/sensim',tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity)))
        tf.summary.scalar('msic/dneg', self.d_neg)
        tf.summary.scalar('msic/dpos', self.d_pos)        
        for name, tensor in self.endpoint.items():
            tf.summary.histogram('activation/' + name, tensor)

        t_var = tf.trainable_variables()
        watch_list = ['s_fc1', 's_fc2']
        if not self.is_skip:
            watch_list += ['i_fc1', 'i_fc2']        
        for watch_scope in watch_list:
            watch_var = [var for var in t_var if watch_scope+'/weights' in var.name]
            tf.summary.histogram('weights/'+watch_scope, watch_var[0])
Bidirectionnet_GMM_sigmod9000feat.py 文件源码 项目:image-text-matching 作者: llltttppp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def top_K_loss_margin(self,sentence,image,K=50,margin=0.2):
        sim_matrix = tf.matmul(sentence, image, transpose_b=True)
        s_square = tf.reduce_sum(tf.square(sentence), axis=1)
        im_square = tf.reduce_sum(tf.square(image), axis=1)
        d = 1-tf.sigmoid(sim_matrix)
        positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
        length = tf.shape(d)[-1]
        dd = tf.matrix_set_diag(d, 8 * tf.ones([length]))
        flag =8-7*tf.sign(tf.nn.relu(self.sen_margin-self.sen_similarity))
        sen_loss_K ,_ = tf.nn.top_k(-1.0 * dd *flag, K, sorted=False) # note: this is negative value
        im_loss_K,_ = tf.nn.top_k(-tf.transpose(1.0 * dd*flag), K, sorted=False) # note: this is negative value
        sentence_center_loss = -tf.log(1-positive+1e-12)-tf.log(-sen_loss_K+1e-12)
        image_center_loss = -tf.log(1-positive+1e-12)-tf.log(-im_loss_K+1e-12)
        self.d_neg = tf.reduce_mean((sen_loss_K + im_loss_K)/-2.0)
        self.d_pos =tf.reduce_mean(positive)
        self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
        self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
        self.endpoint['debug/d_Matrix'] = d
        self.endpoint['debug/positive'] = positive
        self.endpoint['debug/s_center_loss'] = sentence_center_loss
        self.endpoint['debug/i_center_loss'] = image_center_loss
        self.endpoint['debug/S'] = sim_matrix
        self.endpoint['debug/sentence_square'] = s_square
        self.endpoint['debug/image_square'] = im_square
        return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
attacks.py 文件源码 项目:FeatureSqueezing 作者: QData 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_gradient_sign_tf(x, predictions):
    """
    TensorFlow implementation of calculting signed gradient with respect to x.
    :param x: the input placeholder
    :param predictions: the model's output tensor
    :return: a tensor for the adversarial example
    """

    # Compute loss
    y = tf.to_float(tf.equal(predictions, tf.reduce_max(predictions, 1, keep_dims=True)))
    y = y / tf.reduce_sum(y, 1, keep_dims=True)
    loss = utils_tf.tf_model_loss(y, predictions, mean=False)

    # Define gradient of loss wrt input
    grad, = tf.gradients(loss, x)

    # Take sign of gradient
    signed_grad = tf.sign(grad)
    signed_grad = tf.stop_gradient(signed_grad)
    return signed_grad
diet.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _quantize(x, params, randomize=True):
    """Quantize x according to params, optionally randomizing the rounding."""
    if not params.quantize:
        return x

    if not randomize:
        return tf.bitcast(
            tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

    abs_x = tf.abs(x)
    sign_x = tf.sign(x)
    y = abs_x / params.quantization_scale
    y = tf.floor(y + tf.random_uniform(tf.shape(x)))
    y = tf.minimum(y, tf.int16.max) * sign_x
    q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
    return q
util.py 文件源码 项目:cfrnet 作者: clinicalml 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def lindisc(X,p,t):
    ''' Linear MMD '''

    it = tf.where(t>0)[:,0]
    ic = tf.where(t<1)[:,0]

    Xc = tf.gather(X,ic)
    Xt = tf.gather(X,it)

    mean_control = tf.reduce_mean(Xc,reduction_indices=0)
    mean_treated = tf.reduce_mean(Xt,reduction_indices=0)

    c = tf.square(2*p-1)*0.25
    f = tf.sign(p-0.5)

    mmd = tf.reduce_sum(tf.square(p*mean_treated - (1-p)*mean_control))
    mmd = f*(p-0.5) + safe_sqrt(c + mmd)

    return mmd
varlen_support.py 文件源码 项目:RNNVis 作者: myaooo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def sequence_length(sequence):
    """
    Get the length tensor of a batched_sequence
        when embedding, or say, input sequence is a 3D tensor, the empty part should be filled with 0.s
        whe word_id, or say, input sequence is a 2D tensor, the empty part should be filled with -1s
    :param sequence: a Tensor of shape [batch_size, max_length(, embedding_size)]
    :return: a 1D Tensor of shape (batch_size,) representing the length of the sequence
    """
    embedding = len(sequence.get_shape()) == 3
    if embedding:
        # zeros will be 0., others will be 1.
        used = tf.sign(tf.reduce_max(tf.abs(sequence), axis=2))
    else:
        # -1 will be 0, others will be 1.
        used = tf.sign(sequence+1)
    length = tf.reduce_sum(used, axis=1)
    length = tf.cast(length, tf.int32)
    return length
tiny_yolo.py 文件源码 项目:YOLOv1_tensorflow 作者: Nielsyang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res
very_tiny_yolov2.py 文件源码 项目:YOLOv1_tensorflow 作者: Nielsyang 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res
very_tiny_yolov3.py 文件源码 项目:YOLOv1_tensorflow 作者: Nielsyang 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss_func_yolo(output, label):
  res = 0

  for i in range(BATCH_SIZE):
    for j in range(0, S*S*(B*5+CLASSES), B*5+CLASSES):
      res += COORD_W * tf.sign(label[i][j+2]) * (tf.square(output[i][j] - label[i][j]) + tf.square(output[i][j+1]-label[i][j+1]) + 
                                               tf.square(output[i][j+2]/(label[i][j+2]+1e-7) - 1) + 
                                               tf.square(output[i][j+3]/(label[i][j+3]+1e-7) - 1))

      res += tf.sign(label[i][j+2]) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j])) * (tf.square(output[i][j+4] - label[i][j+4]))

      res += COORD_W * tf.sign(label[i][j+7]) * (tf.square(output[i][j+5] - label[i][j+5]) + tf.square(output[i][j+6]-label[i][j+6]) + 
                                               tf.square(output[i][j+7]/(label[i][j+7]+1e-7) - 1) + 
                                               tf.square(output[i][j+8]/(label[i][j+8]+1e-7) - 1))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += NOOBJ_W * tf.sign(tf.floor(label[i][j+5])) * (tf.square(output[i][j+9] - label[i][j+9]))

      res += tf.sign(label[i][j+7]) * (tf.square(output[i][j+10] - label[i][j+10]) + tf.square(output[i][j+11] - label[i][j+11]))

  return res


问题


面经


文章

微信
公众号

扫码关注公众号