def test(model, generator, batch_num, epoch):
model.eval()
test_loss = 0
correct = 0
for batch_idx in range(batch_num):
data, target = next(generator)
data, target = torch.from_numpy(data), torch.from_numpy(target)
# convert BHWC to BCHW
data = data.permute(0, 3, 1, 2)
data, target = data.float().cuda(), target.long().cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss /= batch_num# loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, n_test, 100. * correct / n_test))
# ---
# Normal CNN
评论列表
文章目录