def _create_position_embedding(self, lengths, maxlen):
# Slice to size of current sequence
pe_slice = self.pos_embed[2:maxlen+2, :]
# Replicate encodings for each element in the batch
batch_size = tf.shape(lengths)[0]
pe_batch = tf.tile([pe_slice], [batch_size, 1, 1])
# Mask out positions that are padded
positions_mask = tf.sequence_mask(
lengths=lengths, maxlen=maxlen, dtype=tf.float32)
positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)
positions_embed = tf.reverse_sequence(positions_embed, lengths, batch_dim=0, seq_dim=1) # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]] [4,2]
positions_embed = tf.reverse(positions_embed,[1]) # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]
return positions_embed
评论列表
文章目录