def adadelta(loss, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
grad_shared_flat, flat_grad, unflat_grads = flat_unflat_grads(loss, params)
grad_updates = [(grad_shared_flat, flat_grad)]
one = T.constant(1)
param_updates = list()
for p, g in zip(params, unflat_grads):
value = p.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=p.broadcastable)
accu_new = rho * accu + (one - rho) * g ** 2
update = g * T.sqrt(delta_accu + epsilon) / T.sqrt(accu_new + epsilon)
delta_accu_new = rho * delta_accu + (one - rho) * update ** 2
param_updates += [(accu, accu_new)]
param_updates += [(p, p - learning_rate * update)]
param_updates += [(delta_accu, delta_accu_new)]
return grad_updates, param_updates, grad_shared_flat
评论列表
文章目录