def latent_to_decoder(latent_vector,
embedding_size,
latent_dim,
num_layers,
activation=tf.nn.relu,
use_lstm=False,
dtype=None):
concat_state_size = num_layers * embedding_size
if use_lstm:
concat_state_size *= 2
with tf.variable_scope('latent_to_decoder'):
w = tf.get_variable("w",[latent_dim, concat_state_size],
dtype=dtype)
b = tf.get_variable("b", [concat_state_size], dtype=dtype)
decoder_initial_state = prelu(tf.matmul(latent_vector, w) + b)
if num_layers > 1:
decoder_initial_state = tuple(tf.split(1, num_layers, decoder_initial_state))
if use_lstm:
decoder_initial_state = [tuple(tf.split(1, 2, single_layer_state)) for single_layer_state in decoder_initial_state]
elif use_lstm:
decoder_initial_state = tuple(tf.split(1, 2, decoder_initial_state))
return decoder_initial_state
seq2seq.py 文件源码
python
阅读 27
收藏 0
点赞 0
评论 0
评论列表
文章目录