python类infty()的实例源码

ad_helpme.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
ad_helpme_params.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
prospect_by_galaxy_uvis.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    #print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        #print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
helpme.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec
#RUNNING PROSPECTOR
# Outputs
prospect_by_galaxy.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    #print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        #print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
test_tractable.py 文件源码 项目:fang 作者: rgrosse 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_partition_function():
    with misc.gnumpy_conversion_check('allow'):
        rbm = random_rbm()
        total = -np.infty

        for vis_ in itertools.product(*[[0, 1]] * NVIS):
            vis = gnp.garray(vis_)
            for hid_ in itertools.product(*[[0, 1]] * NHID):
                hid = gnp.garray(hid_)
                total = np.logaddexp(total, rbm.energy(vis[nax, :], hid[nax, :])[0])

        assert np.allclose(tractable.exact_partition_function(rbm, batch_units=BATCH_UNITS), total)
test_utils.py 文件源码 项目:empymod 作者: empymod 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_get_layer_nr():
    bip = np.array([0, 0, 300])
    lbip, zbip = utils.get_layer_nr(bip, np.array([-np.infty, 500]))
    assert lbip == 0
    assert zbip == 300
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 300, 500]))
    assert lbip == 1
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
    assert lbip == 2
    bip = np.array([np.zeros(4), np.zeros(4), np.arange(4)*100])
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
    assert_allclose(lbip, [0, 1, 1, 2])
chap3_code_24.py 文件源码 项目:chapter_codes 作者: advancedpythonprogramming 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def maximum(values):
    temp_max = -np.infty
    for v in values:
        if v > temp_max:
            temp_max = v
        yield temp_max
stepper_control.py 文件源码 项目:Robo-Plot 作者: JackBuck 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, y_axis: Axis, x_axis: Axis):
        self.x_axis = x_axis
        self.y_axis = y_axis

        self.x_soft_lower_limit = -np.infty
        self.x_soft_upper_limit = np.infty
        self.y_soft_lower_limit = -np.infty
        self.y_soft_upper_limit = np.infty
match_two_strips.py 文件源码 项目:CV-lecture-quizzes-python 作者: pdvelez 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def find_best_match(patch, strip):

    # We will use SSD to find out the best match
    best_id = 0
    min_diff = np.infty

    for i in range(int(strip.shape[1] - patch.shape[1])):
        temp = strip[:, i: i + patch.shape[1]]
        ssd = np.sum((temp - patch) ** 2)
        if ssd < min_diff:
            min_diff = ssd
            best_id = i

    return best_id
test_k_means.py 文件源码 项目:Parallel-SGD 作者: angadgill 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_labels_assignment_and_inertia():
    # pure numpy implementation as easily auditable reference gold
    # implementation
    rng = np.random.RandomState(42)
    noisy_centers = centers + rng.normal(size=centers.shape)
    labels_gold = - np.ones(n_samples, dtype=np.int)
    mindist = np.empty(n_samples)
    mindist.fill(np.infty)
    for center_id in range(n_clusters):
        dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
        labels_gold[dist < mindist] = center_id
        mindist = np.minimum(dist, mindist)
    inertia_gold = mindist.sum()
    assert_true((mindist >= 0.0).all())
    assert_true((labels_gold != -1).all())

    # perform label assignment using the dense array input
    x_squared_norms = (X ** 2).sum(axis=1)
    labels_array, inertia_array = _labels_inertia(
        X, x_squared_norms, noisy_centers)
    assert_array_almost_equal(inertia_array, inertia_gold)
    assert_array_equal(labels_array, labels_gold)

    # perform label assignment using the sparse CSR input
    x_squared_norms_from_csr = row_norms(X_csr, squared=True)
    labels_csr, inertia_csr = _labels_inertia(
        X_csr, x_squared_norms_from_csr, noisy_centers)
    assert_array_almost_equal(inertia_csr, inertia_gold)
    assert_array_equal(labels_csr, labels_gold)
algorithms.py 文件源码 项目:scikit-cmeans 作者: bm424 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fit(self, x):
        """Optimizes cluster centers by restarting convergence several times.

        Parameters
        ----------
        x : :obj:`np.ndarray`
            (n_samples, n_features)
            The original data.

        """
        objective_best = np.infty
        memberships_best = None
        centers_best = None
        j_list = []
        for i in range(self.n_init):
            self.centers = None
            self.memberships = None
            self.converge(x)
            objective = self.objective(x)
            j_list.append(objective)
            if objective < objective_best:
                memberships_best = self.memberships.copy()
                centers_best = self.centers.copy()
                objective_best = objective
        self.memberships = memberships_best
        self.centers = centers_best
        return j_list
algorithms.py 文件源码 项目:scikit-cmeans 作者: bm424 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def objective(self, x):
        if self.memberships is None or self.centers is None:
            return np.infty
        distances = self.distances(x)
        return np.sum(self.memberships * distances)
algorithms.py 文件源码 项目:scikit-cmeans 作者: bm424 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def objective(self, x):
        if self.memberships is None or self.centers is None:
            return np.infty
        distances = self.distances(x)
        return np.sum(self.fuzzifier(self.memberships) * distances)
da.py 文件源码 项目:POT 作者: rflamary 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def fit(self, Xs=None, ys=None, Xt=None, yt=None):
        """Build a coupling matrix from source and target sets of samples
        (Xs, ys) and (Xt, yt)

        Parameters
        ----------
        Xs : array-like, shape (n_source_samples, n_features)
            The training input samples.
        ys : array-like, shape (n_source_samples,)
            The class labels
        Xt : array-like, shape (n_target_samples, n_features)
            The training input samples.
        yt : array-like, shape (n_target_samples,)
            The class labels. If some target samples are unlabeled, fill the
            yt's elements with -1.

            Warning: Note that, due to this convention -1 cannot be used as a
            class label

        Returns
        -------
        self : object
            Returns self.
        """

        # check the necessary inputs parameters are here
        if check_params(Xs=Xs, Xt=Xt):

            # pairwise distance
            self.cost_ = dist(Xs, Xt, metric=self.metric)
            self.cost_ = cost_normalization(self.cost_, self.norm)

            if (ys is not None) and (yt is not None):

                if self.limit_max != np.infty:
                    self.limit_max = self.limit_max * np.max(self.cost_)

                # assumes labeled source samples occupy the first rows
                # and labeled target samples occupy the first columns
                classes = [c for c in np.unique(ys) if c != -1]
                for c in classes:
                    idx_s = np.where((ys != c) & (ys != -1))
                    idx_t = np.where(yt == c)

                    # all the coefficients corresponding to a source sample
                    # and a target sample :
                    # with different labels get a infinite
                    for j in idx_t[0]:
                        self.cost_[idx_s[0], j] = self.limit_max

            # distribution estimation
            self.mu_s = self.distribution_estimation(Xs)
            self.mu_t = self.distribution_estimation(Xt)

            # store arrays of samples
            self.xs_ = Xs
            self.xt_ = Xt

        return self
applications.peptide.binding.py 文件源码 项目:microbiome-summer-school-2017 作者: aldro61 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def cross_validation_spectrum_kernel(seq, is_train, folds):
    # Cross-validation
    # Repeat for each combination of candidate hyperparameter values
    substring_length = 3
    C_values = [0.01, 0.1, 1.0, 10., 100.]

    best_result = {"score": -np.infty}

    seq_vec = sequences_to_vec(seq, substring_length)
    K = np.dot(seq_vec, seq_vec.T)
    K_train = K[is_train][:, is_train]
    K_test = K[~is_train][:, is_train]
    y_train = labels[is_train]
    y_test = labels[~is_train]

    for C in C_values:

        print "Parameters: C: {0:.4f}".format(C)

        # Cross-validation
        fold_scores = []
        for fold in np.unique(folds):
            print "...Fold {0:d}".format(fold + 1)
            fold_K_train = K_train[folds != fold][:, folds != fold]
            fold_K_test = K_train[folds == fold][:, folds != fold]
            fold_y_train = y_train[folds != fold]
            fold_y_test = y_train[folds == fold]
            fold_estimator = SupportVectorRegression(kernel="precomputed", C=C)
            fold_estimator.fit(fold_K_train.copy(), fold_y_train)
            fold_scores.append(fold_estimator.score(fold_K_test, fold_y_test))
        cv_score = np.mean(fold_scores)
        print "...... cv score:", cv_score

        if cv_score > best_result["score"]:
            best_result["score"] = cv_score
            best_result["K"] = dict(train=K_train, test=K_test, full=K)
            best_result["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
            best_result["hps"] = dict(C=C)

        print
        print
    return best_result
external_optimizer.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self,
               loss,
               var_list=None,
               equalities=None,
               inequalities=None,
               var_to_bounds=None,
               **optimizer_kwargs):
    """Initialize a new interface instance.

    Args:
      loss: A scalar `Tensor` to be minimized.
      var_list: Optional `list` of `Variable` objects to update to minimize
        `loss`.  Defaults to the list of variables collected in the graph
        under the key `GraphKeys.TRAINABLE_VARIABLES`.
      equalities: Optional `list` of equality constraint scalar `Tensor`s to be
        held equal to zero.
      inequalities: Optional `list` of inequality constraint scalar `Tensor`s
        to be held nonnegative.
      var_to_bounds: Optional `dict` where each key is an optimization
        `Variable` and each corresponding value is a length-2 tuple of
        `(low, high)` bounds. Although enforcing this kind of simple constraint
        could be accomplished with the `inequalities` arg, not all optimization
        algorithms support general inequality constraints, e.g. L-BFGS-B. Both
        `low` and `high` can either be numbers or anything convertible to a
        NumPy array that can be broadcast to the shape of `var` (using
        `np.broadcast_to`). To indicate that there is no bound, use `None` (or
        `+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of
        the following corresponding `bounds` could be supplied:
        * `(0, np.infty)`: Each element of `var` held positive.
        * `(-np.infty, [1, 2])`: First column less than 1, second column less
          than 2.
        * `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less
          than 2, etc.
        * `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,
          `var[0, 1]` less than 2, etc.
      **optimizer_kwargs: Other subclass-specific keyword arguments.
    """
    self.optimizer_kwargs = optimizer_kwargs

    self._loss = loss
    self._equalities = equalities or []
    self._inequalities = inequalities or []
    self._var_to_bounds = var_to_bounds

    if var_list is None:
      self._vars = variables.trainable_variables()
    elif var_list == []:
      raise ValueError("No variables to optimize.")
    else:
      self._vars = list(var_list)

    self._packed_bounds = []
    self._update_placeholders = []
    self._var_updates = []
    self._packed_var = None
    self._packed_loss_grad = None
    self._packed_equality_grads = []
    self._packed_inequality_grads = []
    self._var_shapes = None
external_optimizer.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _initialize_updated_shapes(self, session):
    shapes = array_ops.shape_n(self._vars)
    var_shapes = list(map(tuple, session.run(shapes)))

    if self._var_shapes is not None:
      new_old_shapes = zip(self._var_shapes, var_shapes)
      if all([old == new for old, new in new_old_shapes]):
        return

    self._var_shapes = var_shapes
    vars_and_shapes = zip(self._vars, self._var_shapes)
    vars_and_shapes_dict = dict(vars_and_shapes)

    packed_bounds = None
    if self._var_to_bounds is not None:
      left_packed_bounds = []
      right_packed_bounds = []
      for var, var_shape in vars_and_shapes:
        shape = list(var_shape)
        bounds = (-np.infty, np.infty)
        if var in var_to_bounds:
          bounds = var_to_bounds[var]
        left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
        right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
      packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
    self._packed_bounds = packed_bounds

    self._update_placeholders = [
        array_ops.placeholder(var.dtype) for var in self._vars
    ]
    self._var_updates = [
        var.assign(array_ops.reshape(placeholder, vars_and_shapes_dict[var]))
        for var, placeholder in zip(self._vars, self._update_placeholders)
    ]

    loss_grads = _compute_gradients(self._loss, self._vars)
    equalities_grads = [
        _compute_gradients(equality, self._vars)
        for equality in self._equalities
    ]
    inequalities_grads = [
        _compute_gradients(inequality, self._vars)
        for inequality in self._inequalities
    ]

    self._packed_var = self._pack(self._vars)
    self._packed_loss_grad = self._pack(loss_grads)
    self._packed_equality_grads = [
        self._pack(equality_grads) for equality_grads in equalities_grads
    ]
    self._packed_inequality_grads = [
        self._pack(inequality_grads) for inequality_grads in inequalities_grads
    ]

    dims = [_prod(vars_and_shapes_dict[var]) for var in self._vars]
    accumulated_dims = list(_accumulate(dims))
    self._packing_slices = [
        slice(start, end)
        for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
    ]
snlp.py 文件源码 项目:NLP.py 作者: PythonOptimizers 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, model, **kwargs):
        """Initialize a slack form of an :class:`NLPModel`.

        :parameters:
            :model:  Original model to be transformed into a slack form.
        """
        self.model = model

        # Save number of variables and constraints prior to transformation
        self.original_n = model.n
        self.original_m = model.m

        # Number of slacks for the constaints
        n_slacks = model.nlowerC + model.nupperC + model.nrangeC
        self.n_slacks = n_slacks

        # Update effective number of variables and constraints
        n = self.original_n + n_slacks
        m = self.original_m

        Lvar = -np.infty * np.ones(n)
        Uvar = +np.infty * np.ones(n)

        # Copy orignal bounds
        Lvar[:self.original_n] = model.Lvar
        Uvar[:self.original_n] = model.Uvar

        # Add bounds corresponding to lower constraints
        bot = self.original_n
        self.sL = range(bot, bot + model.nlowerC)
        Lvar[bot:bot + model.nlowerC] = model.Lcon[model.lowerC]

        # Add bounds corresponding to upper constraints
        bot += model.nlowerC
        self.sU = range(bot, bot + model.nupperC)
        Uvar[bot:bot + model.nupperC] = model.Ucon[model.upperC]

        # Add bounds corresponding to range constraints
        bot += model.nupperC
        self.sR = range(bot, bot + model.nrangeC)
        Lvar[bot:bot + model.nrangeC] = model.Lcon[model.rangeC]
        Uvar[bot:bot + model.nrangeC] = model.Ucon[model.rangeC]

        # No more inequalities. All constraints are now equal to 0
        Lcon = Ucon = np.zeros(m)

        super(SlackModel, self).__init__(n=n, m=m, name='Slack-' + model.name,
                                         Lvar=Lvar, Uvar=Uvar,
                                         Lcon=Lcon, Ucon=Ucon)

        # Redefine primal and dual initial guesses
        self.original_x0 = model.x0[:]
        self.x0 = np.zeros(self.n)
        self.x0[:self.original_n] = self.original_x0[:]

        self.original_pi0 = model.pi0[:]
        self.pi0 = np.zeros(self.m)
        self.pi0[:self.original_m] = self.original_pi0[:]
        return
sg_proscode.py 文件源码 项目:bates_galaxies_lab 作者: aleksds 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lnprobfn(theta, model, obs, sps, verbose=False, spec_noise=None,
             phot_noise=None, residuals=False):
    """Define the likelihood function.
    Given a parameter vector and a dictionary of observational data and a model
    object, return the ln of the posterior. This requires that an sps object
    (and if using spectra and gaussian processes, a GP object) be instantiated.
    """
    from prospect.likelihood import lnlike_spec, lnlike_phot, chi_spec, chi_phot

    # Calculate the prior probability and exit if not within the prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate the mean model--
    t1 = time()
    try:
        model_spec, model_phot, model_extras = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time() - t1

    # Return chi vectors for least-squares optimization--
    if residuals:
        chispec = chi_spec(model_spec, obs)
        chiphot = chi_phot(model_phot, obs)        
        return np.concatenate([chispec, chiphot])

    # Noise modeling--
    if spec_noise:
        spec_noise.update(**model.params)
    if phot_noise:
        phot_noise.update(**model.params)

    vectors = {
        'spec': model_spec,    # model spectrum
        'phot': model_phot,    # model photometry
        'sed': model._spec,    # object spectrum
        'cal': model._speccal, # object calibration spectrum
        }

    # Calculate likelihoods--
    t2 = time()
    lnp_spec = lnlike_spec(model_spec, obs=obs, spec_noise=spec_noise, **vectors)
    lnp_phot = lnlike_phot(model_phot, obs=obs, phot_noise=phot_noise, **vectors)
    d2 = time() - t2
    if False:
        from prospect.likelihood import write_log
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    #if (lnp_prior + lnp_phot + lnp_spec) > 0:
    #    print('Total probability is positive!!', lnp_prior, lnp_phot)
    #    pdb.set_trace()

    return lnp_prior + lnp_phot + lnp_spec


问题


面经


文章

微信
公众号

扫码关注公众号