def forward(self, x, lengths):
batch_size, seq_length = x.size()[:2]
emb = Variable(torch.from_numpy(
self.initial_embeddings.take(x.numpy(), 0)),
volatile=not self.training)
h = Variable(torch.zeros(batch_size, self.model_dim), volatile=not self.training)
for t in range(seq_length):
inp = emb[:,t,:]
h = self.rnn(inp, h)
h = F.relu(self.l0(F.dropout(h.squeeze(), 0.5, self.training)))
h = F.relu(self.l1(F.dropout(h, 0.5, self.training)))
y = F.log_softmax(h)
return y
static2.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录