def test_nll(args, epoch, model, testLoader, optimizer, testF, weights):
model.eval()
test_loss = 0
dice_loss = 0
incorrect = 0
numel = 0
for data, target in testLoader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
target = target.view(target.numel())
numel += target.numel()
output = model(data)
test_loss += F.nll_loss(output, target, weight=weights).data[0]
dice_loss += bioloss.dice_error(output, target)
pred = output.data.max(1)[1] # get the index of the max log-probability
incorrect += pred.ne(target.data).cpu().sum()
test_loss /= len(testLoader) # loss function already averages over batch size
dice_loss /= len(testLoader)
err = 100.*incorrect/numel
print('\nTest set: Average loss: {:.4f}, Error: {}/{} ({:.3f}%) Dice: {:.6f}\n'.format(
test_loss, incorrect, numel, err, dice_loss))
testF.write('{},{},{}\n'.format(epoch, test_loss, err))
testF.flush()
return err
评论列表
文章目录