def eval(data_iter, model, args, scheduler):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label
feature.data.t_(), target.data.sub_(1) # batch first, index align
if args.cuda:
feature, target = feature.cuda(), feature.cuda()
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
# scheduler.step(loss.data[0])
# if args.init_clip_max_norm is not None:
# # print("aaaa {} ".format(args.init_clip_max_norm))
# utils.clip_grad_norm(model.parameters(), max_norm=args.init_clip_max_norm)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
avg_loss = loss.data[0]/size
# accuracy = float(corrects)/size * 100.0
accuracy = 100.0 * corrects/size
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
train_ALL_CNN_1.py 文件源码
python
阅读 22
收藏 0
点赞 0
评论 0
评论列表
文章目录