def forward_batch(self, x1, x2):
xp = cuda.get_array_module(x1.data)
batch, slen, hidden = x2.shape
return F.batch_matmul(
F.concat([x1, xp.ones((batch, slen, 1), 'f')], 2), # (batch, slen, hidden+1)
F.reshape(F.linear(F.reshape(x2, (batch * slen, -1)), self.W),
(batch, slen, -1)), transb=True)
评论列表
文章目录