def backward(self, outputs, targets, weights, normalizer, criterion, regression=False):
outputs_split = torch.split(outputs, self.batch_size, self.dim)
targets_split = torch.split(targets, self.batch_size, self.dim)
weights_split = torch.split(weights, self.batch_size, self.dim)
grad_output = []
loss = 0
for out_t, targ_t, w_t in zip(outputs_split, targets_split, weights_split):
grad_output_t, loss_t = super(MemEfficientGenerator, self).backward(
out_t, targ_t, w_t, normalizer, criterion, regression)
grad_output.append(grad_output_t)
loss += loss_t
grad_output = torch.cat(grad_output, self.dim)
return grad_output, loss
评论列表
文章目录