def __init__(self, deep, gpu, word2index, in_units, hidden_units, out_units, loss_func, train, drop_ratio=0.0):
n_vocab = len(word2index)
l2r_embedding=F.EmbedID(n_vocab, in_units)
r2l_embedding=F.EmbedID(n_vocab, in_units)
if deep:
super(BiLstmContext, self).__init__(
l2r_embed=l2r_embedding,
r2l_embed=r2l_embedding,
loss_func=loss_func,
l2r_1 = L.LSTM(in_units, hidden_units),
r2l_1 = L.LSTM(in_units, hidden_units),
l3 = L.Linear(2*hidden_units, 2*hidden_units),
l4 = L.Linear(2*hidden_units, out_units),
)
else:
super(BiLstmContext, self).__init__(
l2r_embed=l2r_embedding,
r2l_embed=r2l_embedding,
loss_func=loss_func,
l2r_1 = L.LSTM(in_units, hidden_units),
r2l_1 = L.LSTM(in_units, hidden_units),
lp_l2r = L.Linear(hidden_units, out_units/2),
lp_r2l = L.Linear(hidden_units, out_units/2)
)
if gpu >=0:
self.to_gpu()
l2r_embedding.W.data = self.xp.random.normal(0, math.sqrt(1. / l2r_embedding.W.data.shape[0]), l2r_embedding.W.data.shape).astype(np.float32)
r2l_embedding.W.data = self.xp.random.normal(0, math.sqrt(1. / r2l_embedding.W.data.shape[0]), r2l_embedding.W.data.shape).astype(np.float32)
self.word2index = word2index
self.train = train
self.deep = deep
self.drop_ratio = drop_ratio
评论列表
文章目录