def run_epoch(loader, model, criterion, optimizer, epoch=0, n_epochs=0, train=True):
time_meter = Meter(name='Time', cum=True)
loss_meter = Meter(name='Loss', cum=False)
error_meter = Meter(name='Error', cum=False)
if train:
model.train()
print('Training')
else:
model.eval()
print('Evaluating')
end = time.time()
for i, (input, target) in enumerate(loader):
if train:
model.zero_grad()
optimizer.zero_grad()
# Forward pass
input_var = Variable(input, volatile=(not train)).cuda(async=True)
target_var = Variable(target, volatile=(not train), requires_grad=False).cuda(async=True)
output_var = model(input_var)
loss = criterion(output_var, target_var)
# Backward pass
if train:
loss.backward()
optimizer.step()
optimizer.n_iters = optimizer.n_iters + 1 if hasattr(optimizer, 'n_iters') else 1
# Accounting
_, predictions_var = torch.topk(output_var, 1)
error = 1 - torch.eq(predictions_var, target_var).float().mean()
batch_time = time.time() - end
end = time.time()
# Log errors
time_meter.update(batch_time)
loss_meter.update(loss)
error_meter.update(error)
print(' '.join([
'%s: (Epoch %d of %d) [%04d/%04d]' % ('Train' if train else 'Eval',
epoch, n_epochs, i + 1, len(loader)),
str(time_meter),
str(loss_meter),
str(error_meter),
]))
return time_meter.value(), loss_meter.value(), error_meter.value()
评论列表
文章目录