def forward(self, batch):
# shape of batch (sequence length, batch size)
inputs = self.embed(batch.question) # shape (sequence length, batch_size, dimension of embedding)
batch_size = inputs.size()[1]
state_shape = self.config.n_cells, batch_size, self.config.d_hidden
if self.config.rnn_type.lower() == 'gru':
h0 = autograd.Variable(inputs.data.new(*state_shape).zero_())
outputs, ht = self.rnn(inputs, h0)
else:
h0 = c0 = autograd.Variable(inputs.data.new(*state_shape).zero_())
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
# shape of `outputs` - (sequence length, batch size, hidden size X num directions)
tags = self.hidden2tag(outputs.view(-1, outputs.size(2)))
# print(tags)
scores = F.log_softmax(tags)
return scores
评论列表
文章目录