def train_(self, epoch):
self.train()
for batch_idx, (data, target) in enumerate(train_loader):
correct = 0
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
self.optimizer.zero_grad()
output = self(data)
loss = F.nll_loss(output, target)
loss.backward()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
accuracy = 100. * correct / len(data)
self.optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {}/{} ({:.4f}%)'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0],
correct, len(data),
accuracy))
self.train_acc.append(accuracy)
backpropagation.py 文件源码
python
阅读 28
收藏 0
点赞 0
评论 0
评论列表
文章目录