def eval(data_iter, model, args, scheduler):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
target.data.sub_(1)
# feature.data.t_(), target.data.sub_(1) # batch first, index align
# feature.data.t_(),\
# target.data.sub_(1) # batch first, index align
# target = autograd.Variable(target)
if args.cuda is True:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
# continue
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
# scheduler.step(loss.data[0])
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss = loss.data[0]/size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
train_ALL_LSTM.py 文件源码
python
阅读 21
收藏 0
点赞 0
评论 0
评论列表
文章目录