def __init__(self, n_vocab_char, n_units, n_units_char, index2charIds, dropout=.2): #dropout ratio, zero indicates no dropout
super(RNN, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
n_vocab_char, n_units_char, initialW=I.Uniform(1. / n_units_char)) # word embedding
self.mid = L.LSTM(n_units_char, n_units_char) # the first LSTM layer
self.out = L.Linear(n_units_char, n_units) # the feed-forward output layer
self.dropout = dropout
self.index2charIds = index2charIds
评论列表
文章目录