def evaluate_performance(ladder, valid_loader, e, agg_cost_scaled, agg_supervised_cost_scaled,
agg_unsupervised_cost_scaled, args):
correct = 0.
total = 0.
for batch_idx, (data, target) in enumerate(valid_loader):
if args.cuda:
data = data.cuda()
data, target = Variable(data), Variable(target)
output = ladder.forward_encoders_clean(data)
# TODO: Do away with the below hack for GPU tensors.
if args.cuda:
output = output.cpu()
target = target.cpu()
output = output.data.numpy()
preds = np.argmax(output, axis=1)
target = target.data.numpy()
correct += np.sum(target == preds)
total += target.shape[0]
print("Epoch:", e + 1, "\t",
"Total Cost:", "{:.4f}".format(agg_cost_scaled), "\t",
"Supervised Cost:", "{:.4f}".format(agg_supervised_cost_scaled), "\t",
"Unsupervised Cost:", "{:.4f}".format(agg_unsupervised_cost_scaled), "\t",
"Validation Accuracy:", correct / total)
评论列表
文章目录