def train(epoch):
if epoch > 2:
import pdb; pdb.set_trace()
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# 1. Add requires_grad so Torch doesn't erase the gradient with its optimization pass
data, target = Variable(data, requires_grad=True), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# 2. Get the `.grad` attribute of the variable.
# This is a Torch tensor, so to get the data as numpy format, we have to use `.grad.data.numpy()`
adversarial_example = data.grad.data.numpy()
print(adversarial_example.max())
if epoch > 2:
# 3. Let's plot it, because we can!
plt.clf()
plt.subplot(121); plt.imshow(data.data.numpy()[0,0,...], cmap='gray_r')
plt.subplot(122); plt.imshow(adversarial_example[0,0,...]); plt.colorbar()
plt.show(block=False)
plt.pause(0.01)
评论列表
文章目录