def forward_lm(self, inpt, lang_h, ctx_h):
"""Run forward pass for language modeling."""
# embed words
inpt_emb = self.word_encoder(inpt)
# append the context embedding to every input word embedding
ctx_h_rep = ctx_h.narrow(0, ctx_h.size(0) - 1, 1).expand(
inpt.size(0), ctx_h.size(1), ctx_h.size(2))
inpt_emb = torch.cat([inpt_emb, ctx_h_rep], 2)
inpt_emb = self.dropout(inpt_emb)
out, _ = self.reader(inpt_emb, lang_h)
decoded = self.decoder(out.view(-1, out.size(2)))
# tie weights between word embedding/decoding
decoded = F.linear(decoded, self.word_encoder.weight)
return decoded.view(out.size(0), out.size(1), decoded.size(1)), out
dialog_model.py 文件源码
python
阅读 22
收藏 0
点赞 0
评论 0
评论列表
文章目录