python类optimize()的实例源码

optimizer.py 文件源码 项目:KDDCUP2016 作者: hugochan 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def init(self, init_points, return_log):
                '''A function to perform all initialization and clear the optimize methods - To be constructed'''

                if self.randomstate != None:
                        numpy.random.seed(self.randomstate)

                print('Optimization procedure is initializing at %i random points.' % init_points)

                #Sampling some points are random to define xtrain.
                xtrain = numpy.asarray([numpy.random.uniform(x[0], x[1], size = init_points) for x in self.log_bounds]).T
                ytrain = []
                for x in xtrain : 
                    ytrain.append(self.f(dict(zip(self.keys, return_log(x)))))
                    print('%d points initialized.' % len(ytrain))

                ytrain = numpy.asarray(ytrain)

                print('Optimization procedure is done initializing.')

                return xtrain, ytrain

        # ----------------------- // ----------------------- # ----------------------- // ----------------------- #
lbfgs_optimizer.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
lbfgs_optimizer.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
penalty_lbfgs_optimizer.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
reps.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
lbfgs_optimizer.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
lbfgs_optimizer.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
lbfgs_optimizer.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
penalty_lbfgs_optimizer.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 77 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
reps.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
lbfgs_optimizer.py 文件源码 项目:rllabplusplus 作者: shaneshixiang 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
igsfa_node.py 文件源码 项目:cuicuilco 作者: AlbertoEsc 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def non_linear_inverse(self, y, verbose=False):
        """Non-linear inverse approximation method. """
        x_lin = self.linear_inverse(y)
        rmse_lin = ((y - self.execute(x_lin)) ** 2).sum(axis=1).mean() ** 0.5
        # scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08,
        # xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None)
        x_nl = numpy.zeros_like(x_lin)
        y_dim = y.shape[1]
        x_dim = x_lin.shape[1]
        if y_dim < x_dim:
            num_zeros_filling = x_dim - y_dim
        else:
            num_zeros_filling = 0
        if verbose:
            print("x_dim=", x_dim, "y_dim=", y_dim, "num_zeros_filling=", num_zeros_filling)
        y_long = numpy.zeros(y_dim + num_zeros_filling)

        for i, y_i in enumerate(y):
            y_long[0:y_dim] = y_i
            if verbose:
                print("x_0=", x_lin[i])
                print("y_long=", y_long)
            plsq = scipy.optimize.leastsq(func=f_residual, x0=x_lin[i], args=(self, y_long), full_output=False)
            x_nl_i = plsq[0]
            if verbose:
                print("x_nl_i=", x_nl_i, "plsq[1]=", plsq[1])
            if plsq[1] != 2:
                print("Quitting: plsq[1]=", plsq[1])
                # quit()
            x_nl[i] = x_nl_i
            print("|E_lin(%d)|=" % i, ((y_i - self.execute(x_lin[i].reshape((1, -1)))) ** 2).sum() ** 0.5)
            print("|E_nl(%d)|=" % i, ((y_i - self.execute(x_nl_i.reshape((1, -1)))) ** 2).sum() ** 0.5)
        rmse_nl = ((y - self.execute(x_nl)) ** 2).sum(axis=1).mean() ** 0.5
        print("rmse_lin(all samples)=", rmse_lin, "rmse_nl(all samples)=", rmse_nl)
        return x_nl
inversion.py 文件源码 项目:cuicuilco 作者: AlbertoEsc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def invert_exp_funcs2(exp_x_noisy, dim_x, exp_funcs, distance=sfa_libs.distance_best_squared_Euclidean,
                      use_hint=False, max_steady_factor=5, delta_factor=0.7, min_delta=0.0001, k=0.5, verbose=False):
    """ Function that approximates a preimage of exp_x_noisy notice 
    that distance, max_steady_factor, delta, min_delta are deprecated and useless
    """
    num_samples = exp_x_noisy.shape[0]

    if isinstance(use_hint, numpy.ndarray):
        if verbose:
            print("Using suggested approximation!")
        app_x = use_hint.copy()
    elif use_hint:
        if verbose:
            print("Using lowest dim_x=%d elements of input for first approximation!" % (dim_x))
        app_x = exp_x_noisy[:, 0:dim_x].copy()
    else:
        app_x = numpy.random.normal(size=(num_samples, dim_x))

    for row in range(num_samples):
        # app_x_row = app_x[row].reshape(1, dim_x)
        # exp_x_noisy_row = exp_x_noisy[row].reshape(1, dim_exp_x)
        # app_exp_x_row = app_exp_x[row].reshape(1, dim_exp_x)
        # Definition:       scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0,
        #                                         ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0,
        #                                         factor=100, diag=None, warning=True)
        plsq = scipy.optimize.leastsq(residuals, app_x[row], args=(exp_x_noisy[row], exp_funcs, app_x[row], k),
                                      ftol=1.49012e-06, xtol=1.49012e-06, gtol=0.0, maxfev=50*dim_x, epsfcn=0.0,
                                      factor=1.0)
        app_x[row] = plsq[0]

    app_exp_x = sfa_libs.apply_funcs_to_signal(exp_funcs, app_x)
    return app_x, app_exp_x
more_nodes.py 文件源码 项目:cuicuilco 作者: AlbertoEsc 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def non_linear_inverse(self, y, verbose=False):
        x_lin = self.linear_inverse(y)
        rmse_lin = ((y - self.execute(x_lin)) ** 2).sum(axis=1).mean() ** 0.5
        #        scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08,
        # xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None)
        x_nl = numpy.zeros_like(x_lin)
        y_dim = y.shape[1]
        x_dim = x_lin.shape[1]
        if y_dim < x_dim:
            num_zeros_filling = x_dim - y_dim
        else:
            num_zeros_filling = 0
        if verbose:
            print("x_dim=", x_dim, "y_dim=", y_dim, "num_zeros_filling=", num_zeros_filling)
        y_long = numpy.zeros(y_dim + num_zeros_filling)

        for i, y_i in enumerate(y):
            y_long[0:y_dim] = y_i
            if verbose:
                print("x_0=", x_lin[i])
                print("y_long=", y_long)
            plsq = scipy.optimize.leastsq(func=f_residual, x0=x_lin[i], args=(self, y_long), full_output=False)
            x_nl_i = plsq[0]
            if verbose:
                print("x_nl_i=", x_nl_i, "plsq[1]=", plsq[1])
            if plsq[1] != 2:
                print("Quitting: plsq[1]=", plsq[1])
                # quit()
            x_nl[i] = x_nl_i
            print("|E_lin(%d)|=" % i, ((y_i - self.execute(x_lin[i].reshape((1, -1)))) ** 2).sum() ** 0.5)
            print("|E_nl(%d)|=" % i, ((y_i - self.execute(x_nl_i.reshape((1, -1)))) ** 2).sum() ** 0.5)
        rmse_nl = ((y - self.execute(x_nl)) ** 2).sum(axis=1).mean() ** 0.5
        print("rmse_lin(all samples)=", rmse_lin, "rmse_nl(all samples)=", rmse_nl)
        return x_nl
Selection.py 文件源码 项目:fitdadi 作者: LohmuellerLab 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def optimize_log(p0, data, model_func, sel_dist, theta, lower_bound=None, 
                 upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
                 gtol=1e-5, multinom=False, maxiter=None, full_output=False,
                 func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
                 output_file=None):

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func_log, 
                                       numpy.log(p0), epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(numpy.exp(xopt), fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag
Selection.py 文件源码 项目:fitdadi 作者: LohmuellerLab 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def optimize(p0, data, model_func, sel_dist, theta, lower_bound=None,
             upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
             gtol=1e-5, multinom=False, maxiter=None, full_output=False,
             func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
             output_file=None):
    """
    optimizer for use with distributions where log transformations do not work,
    e.g. when gamma is positive and negative
    """
    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound, 
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = _project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func, p0, 
                                       epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(xopt, fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return xopt
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag

##end of dadi.Inference code
Selection.py 文件源码 项目:fitdadi 作者: LohmuellerLab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def optimize_log(p0, data, model_func, sel_dist, theta, lower_bound=None, 
                 upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
                 gtol=1e-5, multinom=False, maxiter=None, full_output=False,
                 func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
                 output_file=None):

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func_log, 
                                       numpy.log(p0), epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(numpy.exp(xopt), fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag
on_demand_imports.py 文件源码 项目:yt 作者: yt-project 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def optimize(self):
        if self._optimize is None:
            try:
                import scipy.optimize as optimize
            except ImportError:
                optimize = NotAModule(self._name)
            self._optimize = optimize
        return self._optimize
click_model.py 文件源码 项目:cas-eval 作者: varepsilon 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def train(self, data):
        reg_weight = self.regularization_weight()

        def f(theta):
            ll = 0
            for d in data:
                session = d['session']
                if DEBUG:
                    assert len(session) > 5
                    assert len(session) < 15
                ll += self.log_likelihood(theta, session, d['serp'], d['sat'], f_only=True).full
            N = len(data)
            reg_term = 0.5 * self.reg_coeff / N * np.multiply(reg_weight, theta).dot(theta)
            if DEBUG:
                self.debug_theta(theta)
                print 'mean LL = %f, reg_term = %f, N = %d' % (ll/N, reg_term, N)
            return -ll / N + reg_term

        def fprime(theta):
            ll_prime = np.zeros(self.num_features)
            for d in data:
                ll_prime += self.log_likelihood(theta, d['session'], d['serp'], d['sat']).gaussian
            N = len(data)
            return -ll_prime / N + self.reg_coeff / N * np.multiply(reg_weight, theta)

        theta0 = self.initial_guess()
        opt_res = scipy.optimize.minimize(f, theta0, method='L-BFGS-B', jac=fprime, options=dict(maxiter=100))
        return opt_res.x
lbfgs_optimizer.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(
                tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(
                inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
lbfgs_optimizer.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            ret = f_opt(*inputs)
            return ret

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(
                trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
reps.py 文件源码 项目:gail-driver 作者: sisl 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
opt.py 文件源码 项目:pyaeroopt 作者: mjzahr 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def optimize(self,solver,sens='finite-diff',options=None,callback=None):

        if 'pyopt' in solver:
            xStar,fStar = self.optimizePyopt(solver,sens,options)
        elif 'scipy' in solver:
            xStar,fStar = self.optimizeScipy(solver,sens,options,callback)
        elif 'nlopt' in solver:
            xStar,fStar = self.optimizeNlopt(solver,sens,options)
        elif 'openopt' in solver:
            xStar,fStar = self.optimizeOpenopt(solver,sens,options)

        return ( xStar, fStar )

    #TODO: test
transform.py 文件源码 项目:WaveletQuotes 作者: JobyKK 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def find_s0(self):
        """Find the smallest resolvable scale by finding where the
        equivalent fourier period is equal to 2 * dt. For a Morlet
        wavelet, this is roughly 1.
        """
        dt = self.dt

        def f(s):
            return self.fourier_period(s) - 2 * dt
        return scipy.optimize.fsolve(f, 1)[0]
transform.py 文件源码 项目:WaveletQuotes 作者: JobyKK 项目源码 文件源码 阅读 57 收藏 0 点赞 0 评论 0
def find_s0(self):
        """Find the smallest resolvable scale by finding where the
        equivalent fourier period is equal to 2 * dt. For a Morlet
        wavelet, this is roughly 1.
        """
        dt = self.dt

        def f(s):
            return self.fourier_period(s) - 2 * dt
        return scipy.optimize.fsolve(f, 1)[0]
RegressionTree.py 文件源码 项目:LambdaMart 作者: lezzago 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_splits_parallel(args):
    var_space, label, col = args
    # var_space = data.iloc[:,col].tolist()
    return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1)
    # return, 
    # if not min_error or error < min_error:
    #   min_error = error
    #   split_var = col
    #   min_split = split
lbfgs_optimizer.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
lbfgs_optimizer.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
penalty_lbfgs_optimizer.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
reps.py 文件源码 项目:rllab 作者: rll 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None


问题


面经


文章

微信
公众号

扫码关注公众号