def forward(self, x, lengths):
batch_size = x.size(0)
max_len = max(lengths)
emb = Variable(torch.from_numpy(
self.initial_embeddings.take(x.numpy(), 0)),
volatile=not self.training)
inp = Variable(torch.FloatTensor(emb.size()), volatile=not self.training)
h0 = Variable(torch.FloatTensor(1, batch_size, self.model_dim), volatile=not self.training)
_, hn = self.rnn(emb, h0)
h = F.relu(self.l0(F.dropout(hn.squeeze(), 0.5, self.training)))
h = F.relu(self.l1(F.dropout(h, 0.5, self.training)))
y = F.log_softmax(h)
return y
fakestatic.py 文件源码
python
阅读 35
收藏 0
点赞 0
评论 0
评论列表
文章目录