def __init__(self, training, cell, embedding, start_tokens, end_token, initial_state, beam_width, output_layer=None, gold_sequence=None, gold_sequence_length=None):
self._training = training
self._cell = cell
self._output_layer = output_layer
self._embedding_fn = lambda ids: tf.nn.embedding_lookup(embedding, ids)
self._output_size = output_layer.units if output_layer is not None else self._output.output_size
self._batch_size = tf.size(start_tokens)
self._beam_width = beam_width
self._tiled_initial_cell_state = nest.map_structure(self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = start_tokens
self._tiled_start_tokens = self._maybe_tile_batch(start_tokens)
self._end_token = end_token
self._original_gold_sequence = gold_sequence
self._gold_sequence = gold_sequence
self._gold_sequence_length = gold_sequence_length
if training:
assert self._gold_sequence is not None
assert self._gold_sequence_length is not None
self._max_time = int(self._gold_sequence.shape[1])
# transpose gold sequence to be time major and make it into a TensorArray
self._gold_sequence = tf.TensorArray(dtype=tf.int32, size=self._max_time)
self._gold_sequence = self._gold_sequence.unstack(tf.transpose(gold_sequence, [1, 0]))
beam_aligner.py 文件源码
python
阅读 42
收藏 0
点赞 0
评论 0
评论列表
文章目录