def fit(self, weights, o_error, tpo):
updates = []
gradients = theano.grad(o_error, weights)
for c, w, g in zip(self.t_cache, weights, gradients):
new_cache = tpo["decay_rate"] * c + ( 1- tpo["decay_rate"]) * T.sqr(g)
new_weights = w - (g * tpo["learn_rate"]) / T.sqrt(new_cache + 0.1**8)
updates.append((w, new_weights))
updates.append((c, new_cache))
return updates
###### ADADELTA
########################################
评论列表
文章目录