def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
constraint_term, constraint_value = leq_constraint
penalty_var = TT.scalar("penalty")
penalized_loss = loss + penalty_var * constraint_term
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
flat_grad = flatten_tensor_variables(theano.grad(
penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
))
return [penalized_loss.astype('float64'), flat_grad.astype('float64')]
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
f_penalized_loss=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=[penalized_loss, loss, constraint_term],
log_name="f_penalized_loss",
),
f_opt=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=get_opt_output(),
log_name="f_opt"
)
)
penalty_lbfgs_optimizer.py 文件源码
python
阅读 77
收藏 0
点赞 0
评论 0
评论列表
文章目录