def get_value(self, *args, **kwargs):
if len(args) == 2:
gradient = args[0]
nat_gradient = args[1]
tmp = np.asscalar(gradient.dot(nat_gradient))
lambda_v = np.sqrt(tmp / (4. * self._eps))
# For numerical stability
lambda_v = max(lambda_v, 1e-8)
step_length = 1. / (2. * lambda_v)
return step_length
elif len(args) == 1:
return self.get_value(args[0], args[0], **kwargs)
else:
raise ValueError('Adaptive parameters needs gradient or gradient'
'and natural gradient')
评论列表
文章目录