def forward(self, ws, ss, ps):
batchsize, length = ws.shape
xp = chainer.cuda.get_array_module(ws[0])
ws = self.emb_word(ws) # (batch, length, word_dim)
ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
hs = F.dropout(hs, self.dropout_ratio, train=self.train)
hs = F.split_axis(hs, length, 0)
hs_f = []
hs_b = []
self._init_state()
for h_in_f, h_in_b in zip(hs, reversed(hs)):
h_f = self.lstm_f2(self.lstm_f1(F.squeeze(h_in_f, 0)))
hs_f.append(h_f)
h_b = self.lstm_b2(self.lstm_b1(F.squeeze(h_in_b, 0)))
hs_b.append(h_b)
ys = [self.linear2(F.relu(self.linear1(F.concat([h_f, h_b]))))
for h_f, h_b in zip(hs_f, reversed(hs_b))]
return ys
评论列表
文章目录