def fake_blackbox_optimizer(self):
true_grads, losses, l2s, loss1, loss2, scores, nimgs = self.sess.run([self.grad_op, self.loss, self.l2dist, self.loss1, self.loss2, self.output, self.newimg], feed_dict={self.modifier: self.real_modifier})
# ADAM update
grad = true_grads[0].reshape(-1)
# print(true_grads[0])
epoch = self.adam_epoch[0]
mt = self.beta1 * self.mt + (1 - self.beta1) * grad
vt = self.beta2 * self.vt + (1 - self.beta2) * np.square(grad)
corr = (math.sqrt(1 - self.beta2 ** epoch)) / (1 - self.beta1 ** epoch)
# print(grad.shape, mt.shape, vt.shape, self.real_modifier.shape)
# m is a *view* of self.real_modifier
m = self.real_modifier.reshape(-1)
# this is in-place
m -= self.LEARNING_RATE * corr * (mt / (np.sqrt(vt) + 1e-8))
self.mt = mt
self.vt = vt
# m -= self.LEARNING_RATE * grad
if not self.use_tanh:
m_proj = np.maximum(np.minimum(m, self.modifier_up), self.modifier_down)
np.copyto(m, m_proj)
self.adam_epoch[0] = epoch + 1
return losses[0], l2s[0], loss1[0], loss2[0], scores[0], nimgs[0]
评论列表
文章目录