python类transpose()的实例源码

p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def inference(self):
        """main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.concat, 4.FC layer 5.softmax """
        #1.get emebedding of words in the sentence
        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size]
        #2. Bi-lstm layer
        # define lstm cess:get lstm cell output
        lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_size) #forward direction cell
        lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_size) #backward direction cell
        if self.dropout_keep_prob is not None:
            lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
            lstm_bw_cell=rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
        # bidirectional_dynamic_rnn: input: [batch_size, max_time, input_size]
        #                            output: A tuple (outputs, output_states)
        #                                    where:outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
        outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32) #[batch_size,sequence_length,hidden_size] #creates a dynamic bidirectional recurrent neural network
        print("outputs:===>",outputs) #outputs:(<tf.Tensor 'bidirectional_rnn/fw/fw/transpose:0' shape=(?, 5, 100) dtype=float32>, <tf.Tensor 'ReverseV2:0' shape=(?, 5, 100) dtype=float32>))
        #3. concat output
        output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2]
        self.output_rnn_last=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] #output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] #TODO
        print("output_rnn_last:", self.output_rnn_last) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32>
        #4. logits(use linear layer)
        with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
            logits = tf.matmul(self.output_rnn_last, self.W_projection) + self.b_projection  # [batch_size,num_classes]
        return logits
tflittle_gru.py 文件源码 项目:identifiera-sarkasm 作者: risnejunior 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._gru_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)
        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]
        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
tf_qrnn.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def forward(self, x):
        length = lambda mx: int(mx.get_shape()[0])

        with tf.variable_scope("QRNN/Forward"):
            if self.c is None:
                # init context cell
                self.c = tf.zeros([length(x), self.kernel.size], dtype=tf.float32)

            if self.conv_size <= 2:
                # x is batch_size x sentence_length x word_length
                # -> now, transpose it to sentence_length x batch_size x word_length
                _x = tf.transpose(x, [1, 0, 2])

                for i in range(length(_x)):
                    t = _x[i] # t is batch_size x word_length matrix
                    f, z, o = self.kernel.forward(t)
                    self._step(f, z, o)
            else:
                c_f, c_z, c_o = self.kernel.conv(x)
                for i in range(length(c_f)):
                    f, z, o = c_f[i], c_z[i], c_o[i]
                    self._step(f, z, o)

        return self.h
test_tf_qrnn_work.py 文件源码 项目:tensorflow_qrnn 作者: icoxfog417 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        _X = tf.transpose(X, [1, 0, 2])  # batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length
        _X = tf.reshape(_X, [-1, int(shape[2])])  # (batch_size x sentence_length) x word_length
        seq = tf.split(0, int(shape[1]), _X)  # sentence_length x (batch_size x word_length)

        with tf.name_scope("LSTM"):
            lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0)
            outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output
losses.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
  """Calculates the per-example cross-entropy loss for a sequence of logits and
    masks out all losses passed the sequence length.

  Args:
    logits: Logits of shape `[T, B, vocab_size]`
    targets: Target classes of shape `[T, B]`
    sequence_length: An int32 tensor of shape `[B]` corresponding
      to the length of each input

  Returns:
    A tensor of shape [T, B] that contains the loss per example, per time step.
  """
  with tf.name_scope("cross_entropy_sequence_loss"):
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=targets)

    # Mask out the losses we don't care about
    loss_mask = tf.sequence_mask(
        tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
    losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
seq2seq_model.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_loss(self, decoder_output, _features, labels):
    """Computes the loss for this model.

    Returns a tuple `(losses, loss)`, where `losses` are the per-batch
    losses and loss is a single scalar tensor to minimize.
    """
    #pylint: disable=R0201
    # Calculate loss per example-timestep of shape [B, T]
    losses = seq2seq_losses.cross_entropy_sequence_loss(
        logits=decoder_output.logits[:, :, :],
        targets=tf.transpose(labels["target_ids"][:, 1:], [1, 0]),
        sequence_length=labels["target_len"] - 1)

    # Calculate the average log perplexity
    loss = tf.reduce_sum(losses) / tf.to_float(
        tf.reduce_sum(labels["target_len"] - 1))

    return losses, loss
pointnet_cls.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def get_loss(pred, label, end_points, reg_weight=0.001):
    """ pred: B*NUM_CLASSES,
        label: B, """
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
    classify_loss = tf.reduce_mean(loss)
    tf.summary.scalar('classify loss', classify_loss)

    # Enforce the transformation as orthogonal matrix
    transform = end_points['transform'] # BxKxK
    K = transform.get_shape()[1].value
    mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
    mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
    mat_diff_loss = tf.nn.l2_loss(mat_diff) 
    tf.summary.scalar('mat loss', mat_diff_loss)

    return classify_loss + mat_diff_loss * reg_weight
pointnet_seg.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def get_loss(pred, label, end_points, reg_weight=0.001):
    """ pred: BxNxC,
        label: BxN, """
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
    classify_loss = tf.reduce_mean(loss)
    tf.scalar_summary('classify loss', classify_loss)

    # Enforce the transformation as orthogonal matrix
    transform = end_points['transform'] # BxKxK
    K = transform.get_shape()[1].value
    mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
    mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
    mat_diff_loss = tf.nn.l2_loss(mat_diff) 
    tf.scalar_summary('mat_loss', mat_diff_loss)

    return classify_loss + mat_diff_loss * reg_weight
pointnet_part_seg.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_loss(l_pred, seg_pred, label, seg, weight, end_points):
    per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label)
    label_loss = tf.reduce_mean(per_instance_label_loss)

    # size of seg_pred is batch_size x point_num x part_cat_num
    # size of seg is batch_size x point_num
    per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
    seg_loss = tf.reduce_mean(per_instance_seg_loss)

    per_instance_seg_pred_res = tf.argmax(seg_pred, 2)

    # Enforce the transformation as orthogonal matrix
    transform = end_points['transform'] # BxKxK
    K = transform.get_shape()[1].value
    mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32)
    mat_diff_loss = tf.nn.l2_loss(mat_diff) 


    total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3

    return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
test-embedding.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, embedding):
        self.sess         = tf.Session()
        self.inputs       = tf.placeholder(tf.float32,
                                           [None, embedding.shape[1]],
                                           name='inputs')
        self.test_vec     = tf.placeholder(tf.float32, [1, embedding.shape[1]],
                                           name='test_vec')
        self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec))

        #-----------------------------------------------------------------------
        # Compute normalized embedding matrix
        #-----------------------------------------------------------------------
        row_sum    = tf.reduce_sum(tf.square(self.inputs), axis=1,
                                   keep_dims=True)
        norm       = tf.sqrt(row_sum)
        self.normalized = self.inputs / norm
        self.embedding = self.sess.run(self.normalized,
                                       feed_dict={self.inputs: embedding})

    #---------------------------------------------------------------------------
image.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def make_png_thumbnail(x, n):
    '''
    Input:
        `x`: Tensor, value range=[-1, 1), shape=[n*n, h, w, c]
        `n`: sqrt of the number of images

    Return:
        `tf.string` (bytes) of the PNG. 
        (write these binary directly into a file)
    '''
    with tf.name_scope('MakeThumbnail'):
        _, h, w, c = x.get_shape().as_list()
        x = tf.reshape(x, [n, n, h, w, c])
        x = tf.transpose(x, [0, 2, 1, 3, 4])
        x = tf.reshape(x, [n * h, n * w, c])
        x = x / 2. + .5
        x = tf.image.convert_image_dtype(x, tf.uint8, saturate=True)
        x = tf.image.encode_png(x)
    return x
image.py 文件源码 项目:vae-npvc 作者: JeremyCCHsu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def make_png_jet_thumbnail(x, n):
    '''
    Input:
        `x`: Tensor, value range=[-1, 1), shape=[n*n, h, w, c]
        `n`: sqrt of the number of images

    Return:
        `tf.string` (bytes) of the PNG. 
        (write these binary directly into a file)
    '''
    with tf.name_scope('MakeThumbnail'):
        _, h, w, c = x.get_shape().as_list()
        x = tf.reshape(x, [n, n, h, w, c])
        x = tf.transpose(x, [0, 2, 1, 3, 4])
        x = tf.reshape(x, [n * h, n * w, c])
        x = x / 2. + .5
        x = gray2jet(x)
        x = tf.image.convert_image_dtype(x, tf.uint8, saturate=True)
        x = tf.image.encode_png(x)
    return x
repeat.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def repeat(tensor: tf.Tensor, repeats: int, axis: int) -> tf.Tensor:
    """
    Repeat elements of the input tensor in the specified axis ``repeats``-times.

    .. note::
        Chaining of this op may produce TF warnings although the performance seems to be unaffected.

    :param tensor: TF tensor to be repeated
    :param repeats: number of repeats
    :param axis: axis to repeat
    :return: tensor with repeated elements
    """
    shape = tensor.get_shape().as_list()

    dims = np.arange(len(tensor.shape))
    prepare_perm = np.hstack(([axis], np.delete(dims, axis)))
    restore_perm = np.hstack((dims[1:axis+1], [0], dims[axis+1:]))

    indices = tf.cast(tf.floor(tf.range(0, shape[axis]*repeats)/tf.constant(repeats)), 'int32')

    shuffled = tf.transpose(tensor, prepare_perm)
    repeated = tf.gather(shuffled, indices)
    return tf.transpose(repeated, restore_perm)
unet.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_image_summary(img, idx=0):
    """
    Make an image summary for 4d tensor image with index idx
    """

    V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
    V -= tf.reduce_min(V)
    V /= tf.reduce_max(V)
    V *= 255

    img_w = tf.shape(img)[1]
    img_h = tf.shape(img)[2]
    V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
    V = tf.transpose(V, (2, 0, 1))
    V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
    return V
networks.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def recode(self, inputs, skips=None, store=False, **kwargs):
        """
        Wrapper around seq_update() to fit recode() signature of parent class.
        """

        state = self.get_state()
        if 'hidden' in state:
            outputs, states = \
                self.seq_update(inputs, state['hidden'], skips=skips,
                                outputs=[state['output']],
                                states=[], as_list=True, store=store)
        else:            
            outputs, states = \
                self.seq_update(inputs, None, skips=skips,
                                outputs=[inputs[:, :, 0]],
                                states=[], as_list=True, store=store)

        return tf.transpose(tf.stack(outputs[:-1]), perm=[1, 2, 0])
p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001
        """calculate loss using (NCE)cross entropy here"""
        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels each
        # time we evaluate the loss.
        if self.is_training: #training
            #labels=tf.reshape(self.input_y,[-1])               #[batch_size,1]------>[batch_size,]
            labels=tf.expand_dims(self.input_y,1)                   #[batch_size,]----->[batch_size,1]
            loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K.
                               biases=self.b_projection,                 #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`.
                               labels=labels,                 #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes.
                               inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
                               num_sampled=self.num_sampled,  #scalar. 100
                               num_classes=self.num_classes,partition_strategy="div"))  #scalar. 1999
        l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
        loss = loss + l2_losses
        return loss
Random_clip_valid.py 文件源码 项目:C3D-tensorflow 作者: hx173149 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def parseNet(self, net, netstruct, istraining = True):
        for key in netstruct:
            if key[0] == "conv":
                net = self.conv3d(net, key[2], key[1],key[3], key[4])
            elif key[0] == "fc":
                net = self.fc(net, key[2], key[1], key[3], key[4],activation = key[-1])
            elif key[0] == "maxpool":
                net = tf.nn.max_pool3d(net, ksize = key[2], strides = key[2], padding = "SAME", name = key[1])
            elif key[0] == "dropout" and istraining:
                net = tf.nn.dropout(net, key[2], name = key[1])
            elif key[0] == "reshape":
                net = tf.reshape(net, key[-1])
            elif key[0] == "softmax":
                net = tf.nn.softmax(net)
            elif key[0] == "transpose":
                net = tf.transpose(net, perm=key[-1])
        return net
querysum_model.py 文件源码 项目:querysum 作者: helmertz 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def _rnn_attention_decoder(self, decoder_cell, training_wheels):
        loop_fn = self._custom_rnn_loop_fn(decoder_cell.output_size, training_wheels=training_wheels)
        decoder_outputs, _, (context_vectors_array, attention_logits_array, pointer_probability_array) = \
            tf.nn.raw_rnn(decoder_cell,
                          loop_fn,
                          swap_memory=True)

        decoder_outputs = decoder_outputs.stack()
        decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2])

        attention_logits = attention_logits_array.gather(tf.range(0, attention_logits_array.size() - 1))
        attention_logits = tf.transpose(attention_logits, [1, 0, 2])

        context_vectors = context_vectors_array.gather(tf.range(0, context_vectors_array.size() - 1))
        context_vectors = tf.transpose(context_vectors, [1, 0, 2])

        pointer_probabilities = pointer_probability_array.gather(tf.range(0, pointer_probability_array.size() - 1))
        pointer_probabilities = tf.transpose(pointer_probabilities, [1, 0])

        return decoder_outputs, context_vectors, attention_logits, pointer_probabilities
querysum_model.py 文件源码 项目:querysum 作者: helmertz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _score(self, prev_decoder_state, prev_embedding):
        # Returns scores in a tensor of shape [batch_size, input_sequence_length]

        if self.mode == 'decode':
            query_part = self.query_attention_partial_score_placeholder
            encoder_part = self.encoder_state_attention_partial_scores_placeholder
        else:
            query_part = self.query_attention_partial_score
            encoder_part = self.encoder_state_attention_partial_scores

        embedding_part = tf.matmul(prev_embedding, self.attention_w_e)

        output = tf.matmul(prev_decoder_state,
                           self.attention_w) + embedding_part + query_part + encoder_part + self.attention_b
        output = tf.tanh(output)
        output = tf.reduce_sum(self.attention_v * output, axis=2)
        output = tf.transpose(output, [1, 0])

        # Handle input document padding by giving a large penalty, eliminating it from the weighted average
        padding_penalty = -1e20 * tf.to_float(1 - tf.sign(self.documents_placeholder))
        masked = output + padding_penalty

        return masked
proposal.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _bbox_transform(self, ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = tf.log(gt_widths / ex_widths)
    targets_dh = tf.log(gt_heights / ex_heights)

    targets = tf.transpose(tf.pack(
      (targets_dx, targets_dy, targets_dw, targets_dh),
      axis=0
    ))
    return targets
prior.py 文件源码 项目:attend_infer_repeat 作者: akosiorek 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _cumprod(tensor, axis=0):
    """A custom version of cumprod to prevent NaN gradients when there are zeros in `tensor`
    as reported here: https://github.com/tensorflow/tensorflow/issues/3862

    :param tensor: tf.Tensor
    :return: tf.Tensor
    """
    transpose_permutation = None
    n_dim = len(tensor.get_shape())
    if n_dim > 1 and axis != 0:

        if axis < 0:
            axis += n_dim

        transpose_permutation = np.arange(n_dim)
        transpose_permutation[-1], transpose_permutation[0] = 0, axis

    tensor = tf.transpose(tensor, transpose_permutation)

    def prod(acc, x):
        return acc * x

    prob = tf.scan(prod, tensor)
    tensor = tf.transpose(prob, transpose_permutation)
    return tensor
Attention.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0):
        self.attention_units = attention_units
        self.enc_units = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1, 0, 2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]
        self.mode = mode
        self.mask = array_ops.sequence_mask(sequence_length, self.enc_length, tf.float32) if sequence_length is not None else None

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units))

        # pre-compute Uahj to minimize the computational cost
        with tf.variable_scope('attention'):
            Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units))
        self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1, 1, 1, 1], "SAME")
Attention.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0):
        self.attention_units    = attention_units
        self.enc_units          = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1,0,2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]
        self.mode = mode
        self.mask = array_ops.sequence_mask(sequence_length, self.enc_length) if sequence_length is not None else None
        self.tiny = -math.inf * tf.ones(shape=(self.batch_size, self.enc_length))

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units))
        ### pre-compute Uahj to minimize the computational cost
        with tf.variable_scope('attention'):
            Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units))
        self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1,1,1,1], "SAME")
Attention.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, attention_units, memory, time_major=True):
        self.attention_units    = attention_units
        self.enc_units          = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1,0,2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units))
        # pre-compute Uahj to minimize the computational cost
        with tf.variable_scope('attention'):
            Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units),
                                 initializer=gaussian_initializer(mean=0.0, std=0.001))
        self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1,1,1,1], "SAME")
CandidateSample.py 文件源码 项目:TFCommon 作者: MU94W 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def sampled_softmax_loss(label, logit, projection, num_sampled):
    """
    Args:
        label:
        logit:          unscaled log probabilities
        projection:     (W, b)
        num_sampled:
    """
    local_label = tf.reshape(label, shape=(-1,1))
    local_logit = tf.reshape(logit, shape=(-1, logit.get_shape()[-1].value))
    local_Wt    = tf.transpose(projection[0], perm=(1,0))
    local_b     = projection[1]
    loss_sum    = tf.nn.sampled_softmax_loss(weights=local_Wt, biases=local_b,
                                             labels=local_label,
                                             inputs=local_logit,
                                             num_sampled=num_sampled,
                                             num_classes=local_Wt.get_shape()[0].value)
    loss = tf.divide(tf.reduce_sum(loss_sum), tf.cast(tf.size(local_label), dtype=tf.float32))
    return loss
layers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def forward(self,z):
        if not self.ar:
            mu,log_sigma = self._get_mu_and_sigma(z)
        else:
            # permute z
            z = tf.reshape(z,[-1]+[1]*self.hps.z_size)
            perm = np.random.permutation(self.hps.z_size)+1
            z = tf.transpose(z,np.append([0],perm))
            z = tf.reshape(z,[-1,self.hps.z_size])
            mu,log_sigma = ar_layer(z,self.hps,n_hidden=self.n_hidden)
        log_sigma = tf.clip_by_value(log_sigma,-5,5)
        if not self.hps.ignore_sigma_flow:
            y = z * tf.exp(log_sigma) + mu
            log_det = -1 * log_sigma
        else:
            y = z + mu
            log_det = 0.0
        return y,log_det
pose_model.py 文件源码 项目:Face-Pose-Net 作者: fengju514 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _meshgrid(self, height, width):
    with tf.variable_scope('_meshgrid'):
      # This should be equivalent to:
      #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
      #                         np.linspace(-1, 1, height))
      #  ones = np.ones(np.prod(x_t.shape))
      #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
      x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
      y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.pack([1, width])))

      x_t_flat = tf.reshape(x_t, (1, -1))
      y_t_flat = tf.reshape(y_t, (1, -1))

      ones = tf.ones_like(x_t_flat)
      grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
      return grid
ops.py 文件源码 项目:Unsupervised-Anomaly-Detection-with-Generative-Adversarial-Networks 作者: xtarx 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def minibatch_discrimination(input_layer, num_kernels, dim_per_kernel=5, name='minibatch_discrim'):
    # batch_size = input_layer.shape[0]
    # num_features = input_layer.shape[1]
    batch_size = input_layer.get_shape().as_list()[0]
    num_features = input_layer.get_shape().as_list()[1]
    W = tf.get_variable('W', [num_features, num_kernels * dim_per_kernel],
                        initializer=tf.contrib.layers.xavier_initializer())
    b = tf.get_variable('b', [num_kernels], initializer=tf.constant_initializer(0.0))
    activation = tf.matmul(input_layer, W)
    activation = tf.reshape(activation, [batch_size, num_kernels, dim_per_kernel])
    tmp1 = tf.expand_dims(activation, 3)
    tmp2 = tf.transpose(activation, perm=[1, 2, 0])
    tmp2 = tf.expand_dims(tmp2, 0)
    abs_diff = tf.reduce_sum(tf.abs(tmp1 - tmp2), reduction_indices=[2])
    f = tf.reduce_sum(tf.exp(-abs_diff), reduction_indices=[2])
    f = f + b
    return f
tflittle_pony.py 文件源码 项目:identifiera-sarkasm 作者: risnejunior 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def feed_network(self,data,keep_prob,chunk_size,n_chunks, dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)

        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]

        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
ttbb_models.py 文件源码 项目:shalo 作者: henryre 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _embed_sentences(self):
        """Tensorflow implementation of Simple but Tough-to-Beat Baseline"""
        # Get word features
        word_embeddings = self._get_embedding()
        word_feats      = tf.nn.embedding_lookup(word_embeddings, self.input)
        # Get marginal estimates and scaling term
        batch_size = tf.shape(word_feats)[0]
        a = tf.pow(10.0, self._get_a_exp())
        p = tf.constant(self.marginals, dtype=tf.float32, name='marginals')
        q = tf.reshape(
            a / (a + tf.nn.embedding_lookup(p, self.input)),
            (batch_size, self.mx_len, 1)
        )
        # Compute initial sentence embedding
        z = tf.reshape(1.0 / tf.to_float(self.input_lengths), (batch_size, 1))
        S = z * tf.reduce_sum(q * word_feats, axis=1)
        # Compute common component
        S_centered = S - tf.reduce_mean(S, axis=0)
        _, _, V = tf.svd(S_centered, full_matrices=False, compute_uv=True)
        self.tf_ccx = tf.stop_gradient(tf.gather(tf.transpose(V), 0))
        # Common component removal
        ccx = tf.reshape(self._get_common_component(), (1, self.d))
        sv = {'embeddings': word_embeddings, 'a': a, 'p': p, 'ccx': ccx}
        return S - tf.matmul(S, ccx * tf.transpose(ccx)), sv


问题


面经


文章

微信
公众号

扫码关注公众号