def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units,
max_length=50, dropout=0.2, width=3):
init_emb = chainer.initializers.Normal(0.1)
init_out = VarInNormal(1.)
super(Seq2seq, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units, ignore_label=-1,
initialW=init_emb),
embed_y=L.EmbedID(n_target_vocab, n_units, ignore_label=-1,
initialW=init_emb),
embed_position_x=L.EmbedID(max_length, n_units,
initialW=init_emb),
embed_position_y=L.EmbedID(max_length, n_units,
initialW=init_emb),
encoder=ConvGLUEncoder(n_layers, n_units, width, dropout),
decoder=ConvGLUDecoder(n_layers, n_units, width, dropout),
W=L.Linear(n_units, n_target_vocab, initialW=init_out),
)
self.n_layers = n_layers
self.n_units = n_units
self.n_target_vocab = n_target_vocab
self.max_length = max_length
self.width = width
self.dropout = dropout
评论列表
文章目录