python类sequence_mask()的实例源码

eval_output_embeddings.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def bag_of_tokens(config, labels, label_lengths):
    if config.train_output_embeddings:
        with tf.variable_scope('embed', reuse=True):
            output_embeddings = tf.get_variable('output_embedding')
    else:
        output_embeddings = tf.constant(config.output_embedding_matrix)

    #everything_label_placeholder = tf.placeholder(shape=(None, config.max_length,), dtype=tf.int32)
    #everything_label_length_placeholder = tf.placeholder(shape=(None,), dtype=tf.int32)

    labels = tf.constant(np.array(labels))
    embedded_output = tf.gather(output_embeddings, labels)
    print('embedded_output before', embedded_output)
    #mask = tf.sequence_mask(label_lengths, maxlen=config.max_length, dtype=tf.float32)
    # note: this multiplication will broadcast the mask along all elements of the depth dimension
    # (which is why we run the expand_dims to choose how to broadcast)
    #embedded_output = embedded_output * tf.expand_dims(mask, axis=2)
    #print('embedded_output after', embedded_output)

    return tf.reduce_sum(embedded_output, axis=1)
losses.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
  """Calculates the per-example cross-entropy loss for a sequence of logits and
    masks out all losses passed the sequence length.

  Args:
    logits: Logits of shape `[T, B, vocab_size]`
    targets: Target classes of shape `[T, B]`
    sequence_length: An int32 tensor of shape `[B]` corresponding
      to the length of each input

  Returns:
    A tensor of shape [T, B] that contains the loss per example, per time step.
  """
  with tf.name_scope("cross_entropy_sequence_loss"):
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=targets)

    # Mask out the losses we don't care about
    loss_mask = tf.sequence_mask(
        tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
    losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
layers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def ar_layer(z0,hps,n_hidden=10):
    ''' old iaf layer '''
    # Repeat input
    z_rep = tf.reshape(tf.tile(z0,[1,hps.z_size]),[-1,hps.z_size])

    # make mask    
    mask = tf.sequence_mask(tf.range(hps.z_size),hps.z_size)[None,:,:]
    mask = tf.reshape(tf.tile(mask,[tf.shape(z0)[0],1,1]),[-1,hps.z_size])

    # predict mu and sigma
    z_mask = z_rep * tf.to_float(mask)
    mid = slim.fully_connected(z_mask,n_hidden,activation_fn=tf.nn.relu)
    pars = slim.fully_connected(mid,2,activation_fn=None)
    pars = tf.reshape(pars,[-1,hps.z_size,2])    
    mu, log_sigma = tf.unstack(pars,axis=2)
    return mu, log_sigma
misc.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def mask_for_lengths(lengths, max_length=None, mask_right=True, value=-1000.0):
    """
    Creates a [batch_size x max_length] mask.

    Args:
        lengths: int32 1-dim tensor of batch_size lengths
        max_length: int32 0-dim tensor or python int
        mask_right: if True, everything before "lengths" becomes zero and the
            rest "value", else vice versa
        value: value for the mask

    Returns:
        [batch_size x max_length] mask of zeros and "value"s
    """
    mask = tf.sequence_mask(lengths, max_length, dtype=tf.float32)
    if mask_right:
        mask = 1.0 - mask
    mask *= value
    return mask
models.py 文件源码 项目:seq2seq 作者: eske 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs):
    with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
        if context is not None and encoder.use_context:
            state = tf.concat([state, context], axis=1)

        if encoder.attn_filters:
            e = compute_energy_with_filter(hidden_states, state, attn_size=encoder.attn_size,
                                           attn_filters=encoder.attn_filters,
                                           attn_filter_length=encoder.attn_filter_length, **kwargs)
        else:
            e = compute_energy(hidden_states, state, attn_size=encoder.attn_size,
                               attn_keep_prob=encoder.attn_keep_prob, pervasive_dropout=encoder.pervasive_dropout,
                               layer_norm=encoder.layer_norm, mult_attn=encoder.mult_attn, **kwargs)

        e -= tf.reduce_max(e, axis=1, keep_dims=True)
        mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)

        T = encoder.attn_temperature or 1.0
        exp = tf.exp(e / T) * mask
        weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
        weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)

        return weighted_average, weights
losses.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
  """Calculates the per-example cross-entropy loss for a sequence of logits and
    masks out all losses passed the sequence length.

  Args:
    logits: Logits of shape `[T, B, vocab_size]`
    targets: Target classes of shape `[T, B]`
    sequence_length: An int32 tensor of shape `[B]` corresponding
      to the length of each input

  Returns:
    A tensor of shape [T, B] that contains the loss per example, per time step.
  """
  with tf.name_scope("cross_entropy_sequence_loss"):
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=targets)

    # Mask out the losses we don't care about
    loss_mask = tf.sequence_mask(
        tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
    losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
conv_encoder_fairseq.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _create_position_embedding(self, lengths, maxlen):

    # Slice to size of current sequence
    pe_slice = self.pos_embed[2:maxlen+2, :]
    # Replicate encodings for each element in the batch
    batch_size = tf.shape(lengths)[0]
    pe_batch = tf.tile([pe_slice], [batch_size, 1, 1])

    # Mask out positions that are padded
    positions_mask = tf.sequence_mask(
        lengths=lengths, maxlen=maxlen, dtype=tf.float32)
    positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)

    positions_embed = tf.reverse_sequence(positions_embed, lengths, batch_dim=0, seq_dim=1)  # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]]   [4,2]
    positions_embed = tf.reverse(positions_embed,[1])  # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]

    return positions_embed
as_reader_tf.py 文件源码 项目:attention-sum-reader 作者: cairoHy 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def preprocess_input_sequences(self, data, shuffle=True):
        """
        ??????
        shuffle
        PAD/TRUNC????????
        y_true????self.A_len????index=0??????one-hot??
        """
        documents, questions, answer, candidates = self.union_shuffle(data) if shuffle else data
        d_lens = [len(i) for i in documents]

        questions_ok = pad_sequences(questions, maxlen=self.q_len, dtype="int32", padding="post", truncating="post")
        documents_ok = pad_sequences(documents, maxlen=self.d_len, dtype="int32", padding="post", truncating="post")
        context_mask = K.eval(tf.sequence_mask(d_lens, self.d_len, dtype=tf.float32))
        candidates_ok = pad_sequences(candidates, maxlen=self.A_len, dtype="int32", padding="post", truncating="post")
        y_true = np.zeros_like(candidates_ok)
        y_true[:, 0] = 1
        return questions_ok, documents_ok, context_mask, candidates_ok, y_true
attention_sum_reader.py 文件源码 项目:attention-sum-reader 作者: cairoHy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def preprocess_input_sequences(self, data, shuffle=True):
        """
        ??????
        shuffle
        PAD/TRUNC????????
        y_true????self.A_len????index=0??????one-hot??
        """
        documents, questions, answer, candidates = self.union_shuffle(data) if shuffle else data
        d_lens = [len(i) for i in documents]

        questions_ok = pad_sequences(questions, maxlen=self.q_len, dtype="int32", padding="post", truncating="post")
        documents_ok = pad_sequences(documents, maxlen=self.d_len, dtype="int32", padding="post", truncating="post")
        context_mask = K.eval(tf.sequence_mask(d_lens, self.d_len, dtype=tf.float32))
        candidates_ok = pad_sequences(candidates, maxlen=self.A_len, dtype="int32", padding="post", truncating="post")
        y_true = np.zeros_like(candidates_ok)
        y_true[:, 0] = 1
        return questions_ok, documents_ok, context_mask, candidates_ok, y_true
model.py 文件源码 项目:nmt 作者: tensorflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _compute_loss(self, logits):
    """Compute optimization loss."""
    target_output = self.iterator.target_output
    if self.time_major:
      target_output = tf.transpose(target_output)
    max_time = self.get_max_time(target_output)
    crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=target_output, logits=logits)
    target_weights = tf.sequence_mask(
        self.iterator.target_sequence_length, max_time, dtype=logits.dtype)
    if self.time_major:
      target_weights = tf.transpose(target_weights)

    loss = tf.reduce_sum(
        crossent * target_weights) / tf.to_float(self.batch_size)
    return loss
losses.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
    """Calculates the per-example cross-entropy loss for a sequence of logits and
        masks out all losses passed the sequence length.

    Args:
        logits: Logits of shape `[T, B, vocab_size]`
        targets: Target classes of shape `[T, B]`
        sequence_length: An int32 tensor of shape `[B]` corresponding
           to the length of each input

    Returns:
        A tensor of shape [T, B] that contains the loss per example, per time step.
    """
    with tf.name_scope("cross_entropy_sequence_loss"):
        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=targets)
        loss_mask = tf.sequence_mask(tf.to_int32(
            sequence_length), tf.to_int32(tf.shape(targets)[0]))
        losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
ner_model.py 文件源码 项目:sequence_tagging 作者: guillaumegenthial 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def add_loss_op(self):
        """Defines the loss"""
        if self.config.use_crf:
            log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
                    self.logits, self.labels, self.sequence_lengths)
            self.trans_params = trans_params # need to evaluate it for decoding
            self.loss = tf.reduce_mean(-log_likelihood)
        else:
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.logits, labels=self.labels)
            mask = tf.sequence_mask(self.sequence_lengths)
            losses = tf.boolean_mask(losses, mask)
            self.loss = tf.reduce_mean(losses)

        # for tensorboard
        tf.summary.scalar("loss", self.loss)
model.py 文件源码 项目:GNMT2 作者: Mingyearn 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _compute_loss(self, logits):
    """Compute optimization loss."""
    target_output = self.iterator.target_output
    if self.time_major:
      target_output = tf.transpose(target_output)
    max_time = self.get_max_time(target_output)
    crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=target_output, logits=logits)
    target_weights = tf.sequence_mask(
        self.iterator.target_sequence_length, max_time, dtype=logits.dtype)
    if self.time_major:
      target_weights = tf.transpose(target_weights)

    loss = tf.reduce_sum(
        crossent * target_weights) / tf.to_float(self.batch_size)
    return loss
model.py 文件源码 项目:tf_tagging 作者: Slyne 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def add_loss_op(self):
        """
        Adds loss to self
        """
        if self.config.crf:
            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
                self.logits, self.labels, self.sequence_lengths)
            self.loss = tf.reduce_mean(-log_likelihood)
        else:
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
            mask = tf.sequence_mask(self.sequence_lengths)
            losses = tf.boolean_mask(losses, mask)
            self.loss = tf.reduce_mean(losses)

        # for tensorboard
        tf.summary.scalar("loss", self.loss)
build_model.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
    with tf.name_scope('cross_entropy_sequence_loss'):
        total_length = tf.to_float(tf.reduce_sum(sequence_length))

        entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=targets)

        # Mask out the losses we don't care about
        loss_mask = tf.sequence_mask(
            tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
        loss_mask = tf.transpose(tf.to_float(loss_mask), [1, 0])

        losses = entropy_losses * loss_mask
        # losses.shape: T * B
        # sequence_length: B
        total_loss_avg = tf.reduce_sum(losses) / total_length

        return total_loss_avg
attention.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, query_size, keys, values, values_length,
                 name='attention'):
        self.attention_size = keys.get_shape().as_list()[-1]
        self.keys = keys
        self.values = values
        self.values_length = values_length
        self.query_trans = LinearOp(query_size, self.attention_size, name=name)

        with tf.variable_scope(name):
            self.v_att = tf.get_variable('v_att', shape=[self.attention_size],
                                         dtype=DTYPE)

        self.time_axis = 0 if TIME_MAJOR else 1

        # Replace all scores for padded inputs with tf.float32.min
        num_scores = tf.shape(self.keys)[self.time_axis]
        scores_mask = tf.sequence_mask(
            lengths=tf.to_int32(self.values_length),
            maxlen=tf.to_int32(num_scores),
            dtype=DTYPE)

        if TIME_MAJOR:
            scores_mask = tf.transpose(scores_mask)

        self.scores_mask = scores_mask
recurrent_layers.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def map(self, is_train, x, mask=None):
        x = tf.transpose(x, [1, 0, 2])

        if self.bidirectional:
            with tf.variable_scope("forward"):
                fw = self._apply_transposed(is_train, x)[0]
            with tf.variable_scope("backward"):
                bw = self._apply_transposed(is_train, tf.reverse_sequence(x, mask, 0, 1))[0]
                bw = tf.reverse_sequence(bw, mask, 0, 1)
            out = tf.concat([fw, bw], axis=2)
        else:
            out = self._apply_transposed(is_train, x)[0]
        out = tf.transpose(out, [1, 0, 2])
        if mask is not None:
            out *= tf.expand_dims(tf.cast(tf.sequence_mask(mask, tf.shape(out)[1]), tf.float32), 2)
        return out
layers.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def apply(self, is_train, x, mask=None):
        if self.map_layer is not None:
            x = self.map_layer.apply(is_train, x, mask)

        rank = len(x.shape) - 2
        if mask is not None:
            shape = tf.shape(x)
            mask = tf.sequence_mask(tf.reshape(mask, (-1,)), shape[-2])
            mask = tf.cast(tf.reshape(mask, (shape[0], shape[1], shape[2], 1)), tf.float32)
            # this min_val thing is kind of a hack, really we should do something like compute the
            # min val over the entire batch, or maybe just pick a very negative values, or maybe
            # do something a bit more finicky with tf.bool_mask
            # In practice it doesn't seem to be problem, and some of the earlier models used these
            # scheme so I have been sticking with it.
            if self.min_val == 0:
                x *= mask
            else:
                x = x * mask + self.min_val * (1 - mask)
            return tf.maximum(tf.reduce_max(x, axis=rank), tf.fill([1] * (len(x.shape)-1),
                                                                   float(self.min_val)))
        else:
            return tf.reduce_max(x, axis=rank)
layers.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def apply(self, is_train, x, mask=None):
        if mask is not None:
            answer_mask = tf.expand_dims(tf.cast(tf.sequence_mask(mask, tf.shape(x)[1]), tf.float32), 2)
            if self.apply_mask:
                x *= answer_mask
        else:
            answer_mask = None

        if self.reduce == "max":
            if mask is not None:
                raise NotImplementedError()
            return tf.reduce_max(x, axis=1)
        elif self.reduce == "mean":
            if mask is not None:
                return tf.reduce_sum(x * answer_mask, axis=1) / tf.cast(tf.expand_dims(mask, 1), tf.float32)
            else:
                return tf.reduce_mean(x, axis=1)
        elif self.reduce == "sum":
            if mask is not None:
                return tf.reduce_sum(x * answer_mask, axis=1)
            else:
                return tf.reduce_sum(x, axis=1)
        else:
            raise ValueError()
attention.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def apply(self, is_train, x, mask=None):
        if self.key_mapper is not None:
            with tf.variable_scope("map_keys"):
                keys = self.key_mapper.apply(is_train, x, mask)
        else:
            keys = x

        weights = tf.get_variable("weights", (keys.shape.as_list()[-1], self.n_encodings), dtype=tf.float32,
                                  initializer=get_keras_initialization(self.init))
        dist = tf.tensordot(keys, weights, axes=[[2], [0]])  # (batch, x_words, n_encoding)
        if self.bias:
            dist += tf.get_variable("bias", (1, 1, self.n_encodings),
                                    dtype=tf.float32, initializer=tf.zeros_initializer())
        if mask is not None:
            bool_mask = tf.expand_dims(tf.cast(tf.sequence_mask(mask, tf.shape(x)[1]), tf.float32), 2)
            dist = bool_mask * bool_mask + (1 - bool_mask) * VERY_NEGATIVE_NUMBER

        dist = tf.nn.softmax(dist, dim=1)

        out = tf.einsum("ajk,ajn->ank", x, dist)  # (batch, n_encoding, feature)

        if self.post_process is not None:
            with tf.variable_scope("post_process"):
                out = self.post_process.apply(is_train, out)
        return out
model.py 文件源码 项目:SentenceOrdering_PTR 作者: JerrikEph 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def add_loss_op(self, logits):
        def seq_loss(logits_tensor, label_tensor, length_tensor):
            """
            Args
                logits_tensor: shape (batch_size*time_steps_de, time_steps_en)
                label_tensor: shape (batch_size, time_steps_de), label id 1D tensor
                length_tensor: shape(batch_size)
            Return
                loss: A scalar tensor, mean error
            """

            labels = tf.reshape(label_tensor, shape=(-1,))
            loss_flat = tf.nn.sparse_softmax_cross_entropy_with_logits(logits_tensor, labels, name='sparse_softmax')
            losses = tf.reshape(loss_flat, shape=tf.shape(label_tensor)) #(batch_size, tstp_de)
            length_mask = tf.sequence_mask(length_tensor, tf.shape(losses)[1], dtype=tf.float32, name='length_mask')
            losses_sum = tf.reduce_sum(losses*length_mask, reduction_indices=[1]) #(batch_size)
            losses_mean = losses_sum / (tf.to_float(length_tensor)+1e-20) #(batch_size)
            loss = tf.reduce_mean(losses_mean) #scalar
            return loss 

        reg_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if v != self.embedding]) *self.config.reg
        valid_loss = seq_loss(logits, self.decoder_label, self.decoder_tstps)
        train_loss = reg_loss + valid_loss
        return train_loss, valid_loss, reg_loss
losses.py 文件源码 项目:automatic-summarization 作者: mozilla 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
  """Calculates the per-example cross-entropy loss for a sequence of logits and
    masks out all losses passed the sequence length.

  Args:
    logits: Logits of shape `[T, B, vocab_size]`
    targets: Target classes of shape `[T, B]`
    sequence_length: An int32 tensor of shape `[B]` corresponding
      to the length of each input

  Returns:
    A tensor of shape [T, B] that contains the loss per example, per time step.
  """
  with tf.name_scope("cross_entropy_sequence_loss"):
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=targets)

    # Mask out the losses we don't care about
    loss_mask = tf.sequence_mask(
        tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
    losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
recurrentNetwork.py 文件源码 项目:TikZ 作者: ellisk42 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decodesIntoAccuracy(self, labels, perSymbol = True):
        # as the dimensions None x L
        accuracyMatrix = tf.equal(self.hardOutputs, labels)

        # zero out anything past the labeled length
        accuracyMatrix = tf.logical_and(accuracyMatrix,
                                        tf.sequence_mask(self.lengthPlaceholder, maxlen = self.maximumLength))

        # Some across all of the time steps to get the total number of predictions correct in each batch entry
        accuracyVector = tf.reduce_sum(tf.cast(accuracyMatrix,tf.int32),axis = 1)
        if perSymbol:
            # Now normalize it by the sequence length and take the average
            accuracyVector = tf.divide(tf.cast(accuracyVector,tf.float32),
                                       tf.cast(self.lengthPlaceholder,tf.float32))
        if not perSymbol:
            # accuracy is measured per sequence
            accuracyVector = tf.cast(tf.equal(accuracyVector,self.lengthPlaceholder),tf.float32)
        return tf.reduce_mean(accuracyVector)
neuralnet_node_bilstmcrf.py 文件源码 项目:skp_edu_docker 作者: TensorMSA 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def add_loss_op(self):
        """
        Adds loss to self
        """
        if self.crf:
            log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
            self.logits, self.labels, self.sequence_lengths)
            self.loss = tf.reduce_mean(-log_likelihood)
        else:
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
            mask = tf.sequence_mask(self.sequence_lengths)
            losses = tf.boolean_mask(losses, mask)
            self.loss = tf.reduce_mean(losses)

        # for tensorboard
        tf.summary.scalar("loss", self.loss)
merge.py 文件源码 项目:TensorGraph 作者: hycis 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _train_fprop(self, state_list):
        '''The softmax is apply to n units that is not masked specified by the
           seqlen.
           state_list : [state_below, seqlen]
                state_below (2d tf tensor): shape = [batchsize, layer_dim]
                seqlen (1d tf tensor): shape = [batchsize]
                example:
                    state_below = 3 x 5 matrix
                    seqlen = [2, 1, 4]
        '''
        assert len(state_list) == 2
        state_below, seqlen = state_list
        assert len(seqlen.get_shape()) == 1
        shape = state_below.get_shape()
        assert len(shape) == 2, 'state below dimenion {} != 2'.format(len(shape))
        mask = tf.to_float(tf.sequence_mask(seqlen, shape[-1]))
        exp = tf.exp(state_below) * mask
        exp_sum = tf.reduce_sum(exp, axis=1)
        zeros = tf.to_float(tf.equal(exp_sum, 0))
        softmax = tf.div(exp, tf.expand_dims(exp_sum + zeros, -1))
        nonzeros = tf.to_float(tf.not_equal(exp_sum, 0))
        softmax = softmax * tf.expand_dims(nonzeros, -1)
        return softmax
decomposable_attention_ops.py 文件源码 项目:decomposable_attention 作者: shuuki4 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _masked_softmax(logits, lengths):
    """
    Softmax on last axis with proper mask
    """
    sequence_mask = tf.expand_dims(
        tf.sequence_mask(
            lengths, maxlen=tf.shape(logits)[-1], dtype=tf.float32),
        dim=1
    )

    max_logits = tf.reduce_max(logits, axis=-1, keep_dims=True)
    masked_logit_exp = tf.exp(logits - max_logits) * sequence_mask
    logit_sum = tf.reduce_sum(masked_logit_exp, axis=-1, keep_dims=True)

    probs = masked_logit_exp / logit_sum
    return probs
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _make_beam_mask(self, num_available_beams):
        mask = tf.sequence_mask(num_available_beams, self._beam_width)
        return tf.tile(tf.expand_dims(mask, axis=2), multiples=[1, 1, self._output_size])
seq2seq_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def add_loss_op(self, result):
        logits = result.rnn_output
        with tf.control_dependencies([tf.assert_positive(tf.shape(logits)[1], data=[tf.shape(logits)])]):
            length_diff = tf.reshape(self.config.max_length - tf.shape(logits)[1], shape=(1,))
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0]], axis=0), shape=(3, 2))
        preds = tf.pad(logits, padding, mode='constant')

        # add epsilon to avoid division by 0
        preds = preds + 1e-5

        mask = tf.sequence_mask(self.output_length_placeholder, self.config.max_length, dtype=tf.float32)
        loss = tf.contrib.seq2seq.sequence_loss(preds, self.output_placeholder, mask)

        with tf.control_dependencies([tf.assert_non_negative(loss, data=[preds, mask], summarize=256*60*300)]):
            return tf.identity(loss)
positional_cnn_deep_combine_chain_model.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def add_positional_embedding(self, model_input, num_frames, l2_penalty=1e-8):
    batch_size, max_frames, num_features = model_input.get_shape().as_list()
    positional_embedding = tf.get_variable("positional_embedding", dtype=tf.float32,
                                shape=[1, max_frames, num_features], 
                                initializer=tf.zeros_initializer(),
                                regularizer=tf.contrib.layers.l2_regularizer(l2_penalty))
    mask = tf.sequence_mask(lengths=num_frames, maxlen=max_frames, dtype=tf.float32)
    model_input_with_positional_embedding = tf.einsum("ijk,ij->ijk", model_input + positional_embedding, mask)
    return model_input_with_positional_embedding
lstm_positional_attention_max_pooling_model.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_mean_input(self, model_input, num_frames):
    batch_size, max_frames, num_features = model_input.get_shape().as_list()
    mask = tf.sequence_mask(lengths=num_frames, maxlen=max_frames, dtype=tf.float32)
    mean_input = tf.einsum("ijk,ij->ik", model_input, mask) / tf.expand_dims(tf.cast(num_frames, dtype=tf.float32), dim=1)
    tiled_mean_input = tf.tile(tf.expand_dims(mean_input, dim=1), multiples=[1,max_frames,1])
    return tiled_mean_input


问题


面经


文章

微信
公众号

扫码关注公众号