python类logsumexp()的实例源码

SklearnClassifier.py 文件源码 项目:pyISC 作者: STREAM3 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def predict_log_proba(self,X):
        assert self.class_column > -1

        X1 = None
        if isinstance(X, pyisc.DataObject):
            assert X.class_column == self.class_column
            X1 = X.as_2d_array()
        elif isinstance(X, ndarray):
            X1 = X.copy()


        if X1 is not None:

            logps = self.compute_logp(X1)

            LogPs = [x-logsumexp(x) for x in array(logps).T] #normalized

            return array(LogPs)
        else:
            raise ValueError("Unknown type of data to score:", type(X))
rules.py 文件源码 项目:mitre 作者: gerberlab 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def flat_prior(self):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        In this base class, we implement a version where the 
        distribution over primitives is static; subclasses will 
        reevaluate this at each call based on the values of variables and 
        parameters.

        """

        raw_weights = np.zeros(len(self.rule_population))
        normalization = logsumexp(raw_weights)
        return raw_weights - normalization
###
test_multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, n_experiments, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given)
                dist = Multinomial(logits, n_experiments)
                log_p = dist.log_prob(given)
                target_log_p = np.log(misc.factorial(n_experiments)) - \
                    np.sum(np.log(misc.factorial(given)), -1) + \
                    np.sum(given * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = dist.prob(given)
                target_p = np.exp(target_log_p)
                self.assertAllClose(p.eval(), target_p)

            _test_value([-50., -20., 0.], 4, [1, 0, 3])
            _test_value([1., 10., 1000.], 1, [1, 0, 0])
            _test_value([[2., 3., 1.], [5., 7., 4.]], 3,
                        np.ones([3, 1, 3], dtype=np.int32))
            _test_value([-10., 10., 20., 50.], 100, [[0, 1, 99, 100],
                                                     [100, 99, 1, 0]])
mathext.py 文件源码 项目:bolero 作者: rock-learning 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def logsumexp(a, axis=None, b=None):
        a = np.asarray(a)
        if axis is None:
            a = a.ravel()
        else:
            a = np.rollaxis(a, axis)
        a_max = a.max(axis=0)
        if b is not None:
            b = np.asarray(b)
            if axis is None:
                b = b.ravel()
            else:
                b = np.rollaxis(b, axis)
            out = np.log(np.sum(b * np.exp(a - a_max), axis=0))
        else:
            out = np.log(np.sum(np.exp(a - a_max), axis=0))
        out += a_max
        return out
model.py 文件源码 项目:hydrus 作者: mark-r-g 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def ests_ll_quad(self, params):
        """
        Calculate the loglikelihood given model parameters `params`.

        This method uses Gaussian quadrature, and thus returns an *approximate*
        integral.
        """
        mu0, gamma0, err0 = np.split(params, 3)
        x = np.tile(self.z, (self.cfg.QCOUNT, 1, 1))  # (QCOUNTXnhospXnmeas)
        loc = mu0 + np.outer(QC1, gamma0)
        loc = np.tile(loc, (self.n, 1, 1))
        loc = np.transpose(loc, (1, 0, 2))
        scale = np.tile(err0, (self.cfg.QCOUNT, self.n, 1))
        zs = lpdf_3d(x=x, loc=loc, scale=scale)

        w2 = np.tile(self.w, (self.cfg.QCOUNT, 1, 1))
        wted = np.nansum(w2 * zs, axis=2).T  # (nhosp X QCOUNT)
        qh = np.tile(QC1, (self.n, 1))  # (nhosp X QCOUNT)
        combined = wted + norm.logpdf(qh)  # (nhosp X QCOUNT)

        return logsumexp(np.nan_to_num(combined), b=QC2, axis=1)  # (nhosp)
math.py 文件源码 项目:factorix 作者: gbouchar 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def log_softmax(vec):
    """Robust version of the log of softmax values

    Args:
        vec: vector of log-odds

    Returns:
        A vector whose exponential sums to one with lgo of softmax values log(exp(x_k)/sum_i (exp(x_i)))

    Examples:
        >>> print(log_softmax(np.array([1.0, 1.0, 1.0, 1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([-1.0, -1.0, -1.0, -1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([1.0, 0.0, -1.0, 1.1])))
        [-0.9587315 -1.9587315 -2.9587315 -0.8587315]

    """
    return vec - logsumexp(vec)
math.py 文件源码 项目:factorix 作者: gbouchar 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def log_softmax(vec):
    """Robust version of the log of softmax values

    Args:
        vec: vector of log-odds

    Returns:
        A vector whose exponential sums to one with lgo of softmax values log(exp(x_k)/sum_i (exp(x_i)))

    Examples:
        >>> print(log_softmax(np.array([1.0, 1.0, 1.0, 1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([-1.0, -1.0, -1.0, -1.0])))
        [-1.38629436 -1.38629436 -1.38629436 -1.38629436]
        >>> print(log_softmax(np.array([1.0, 0.0, -1.0, 1.1])))
        [-0.9587315 -1.9587315 -2.9587315 -0.8587315]

    """
    return vec - logsumexp(vec)
utils.py 文件源码 项目:NetPower_TestBed 作者: Vignesh2208 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def log_normalize(a, axis=None):
    """Normalizes the input array so that the exponent of the sum is 1.

    Parameters
    ----------
    a : array
        Non-normalized input data.

    axis : int
        Dimension along which normalization is performed.

    Notes
    -----
    Modifies the input **inplace**.
    """
    a_lse = logsumexp(a, axis)
    a -= a_lse[:, np.newaxis]
msmle.py 文件源码 项目:pynamd 作者: radakb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _uwham_obj_grad(self, f_i):
        """Return the log-likelihood (scalar objective function) and its
        gradient (wrt the free energies) for a given value of the free
        energies.  Note that this expects one less free energy than there are
        states, since we always solve under the constraint that f_1 = 0.
        """
        _f_i = hstack((zeros(1), asarray(f_i)))
        # For numerical stability, use log quantities.
        logQ_nj = _f_i - self._u_nj_m
        logNorm_n = logsumexp(logQ_nj, 1, self.PIsdiag[newaxis, :])
        W_nj = exp(logQ_nj - logNorm_n[:, newaxis])
        # Compute matrix products and sums (equivalent to multiplying by
        # appropriate vector of ones). Note that using dot() with ndarrays is
        # _much_ faster than multiplying matrix objects.
        PIW = self.PIs.dot(W_nj.T)
        WPI = W_nj.dot(self.PIs)
        g = PIW.mean(axis=1) # used in gradient and Hessian computation
        kappa = logNorm_n.mean() - (self.PIsdiag.dot(_f_i)).sum()
        grad = (g - self.PIsdiag)[1:]
        self._hess = (diagflat(g) - PIW.dot(WPI) / self.total_samples)[1:, 1:]
        return kappa, grad
compute_stirling.py 文件源码 项目:pymake 作者: dtrckd 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def recursive_line(self, new_line=5246):
        stir = self.load()
        J = stir.shape[0]
        K = stir.shape[1]
        for x in range(new_line):
            n = J + x
            new_l =  np.ones((1, K)) * np.inf
            print(n)
            for m in range(1,K):
                if m > n:
                    continue
                elif m == n:
                    new_l[0, m] = 0
                elif m == 1:
                    new_l[0, 1] = logsumexp( [  np.log(n-1) + stir[n-1, m] ] )
                else:
                    new_l[0, m] = logsumexp( [ stir[n-1, m-1] , np.log(n-1) + stir[n-1, m] ] )
            stir = np.vstack((stir, new_l))

        #np.save('stirling.npy', stir)
        #np.load('stirling.npy')
        return stir
compute_stirling.py 文件源码 项目:pymake 作者: dtrckd 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def recursive_row(self, new_row=''):
        stir = np.load('stirling.npy')
        J = stir.shape[0]
        K = stir.shape[1]
        x = 0
        while stir.shape[0] != stir.shape[1]:
            m = K + x
            new_c =  np.ones((J, 1)) * np.inf
            stir = np.hstack((stir, new_c))
            print(m)
            for n in range(K,J):
                if m > n:
                    continue
                elif m == n:
                    stir[n, m] = 0

                else:
                    stir[n,m] = logsumexp( [ stir[n-1, m-1] , np.log(n-1) + stir[n-1, m] ] )
            x += 1

        #np.save('stirling.npy', stir)
        #np.load('stirling.npy',)
        return stir
estimator.py 文件源码 项目:CNValloc 作者: m1m0r1 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _calc_gamma_psi(log_d, alpha, log_beta, gamma, log_psi0):
    log_psi = log_psi0
    count = 0
    #print ((np.exp(log_psi1 - log_psi) ** 2).sum())

    while count == 0 \
        or ((np.exp(log_psi1) - np.exp(log_psi)) ** 2).sum() > 0.001 \
        or ((gamma1 - gamma) ** 2).sum() > 0.001:
        #print ('gamma psi:', count, ((np.exp(log_psi1) - np.exp(log_psi)) ** 2).sum())
        log_psi1 = log_psi
        gamma1 = gamma

        psi_offset = (digamma(gamma))[:, np.newaxis, np.newaxis, :]

        log_psi = log_beta[np.newaxis, :, :, :] + psi_offset
        log_psi = log_normalize(log_psi, axis=3)
        gamma = np.exp(logsumexp(logsumexp(log_d[:, :, :, np.newaxis] + log_psi, axis=1), axis=1)) + alpha[np.newaxis, :]
        count += 1

    #log_psi = np.average([log_psi0, log_psi], axis=0, weights=[0.9, 0.1])   # weak learning
    return (gamma, log_psi)
estimator.py 文件源码 项目:CNValloc 作者: m1m0r1 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _calc_alpha_beta(log_d, alpha0, log_beta0, gamma, log_psi):
    log_beta = logsumexp(log_psi + log_d[:, :, :, np.newaxis], axis=0)
    log_beta = log_normalize(log_beta, axis=1)

    log_smooth = np.log(10)
    alpha = alpha0
    N = gamma.shape[0]
    zero = 1e-30

    gamma_digamma_sum = digamma(gamma.sum(axis=1))[:, np.newaxis]
    g_offset = (digamma(gamma) - gamma_digamma_sum).sum(axis=0) / N
    # using log
    def next_alpha(alpha):
        das = digamma(alpha.sum())
        g = alpha * N * (das - digamma(alpha) + g_offset)
        h = alpha * N * (das + g_offset)
        z = N * das
        x = (alpha * g / h).sum()
        w = (alpha ** 2 / h).sum()
        return np.exp(np.log(alpha) - (g - x * alpha / (1/z + w)) / h)

    return (alpha, log_beta)
estimator.py 文件源码 项目:CNValloc 作者: m1m0r1 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def as_dict(self):
        """ Returns json encodable dictionary
        """
        haps = [[self._data.alphabets[np.argmax(site_beta)] for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta]
        hap_probs = [[[np.exp(np.max(site_beta) - logsumexp(site_beta)) for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta]]

        return {
            'nsites': self._data.nsites,
            'sample_ids': self._data.sample_ids,
            'nhaps': self.nhaps,
            'alphabets': list(self._data.alphabets),
            'step': self.step,
            'log_likelihood': self.ll,
            'alpha': list(map(float, self.alpha)),
            'log_beta': [[list(map(float, site_beta)) for site_beta in beta_sites.transpose()] for beta_sites in self.log_beta],
            'haps': haps,
            'hap_probs': hap_probs,
            'gamma': list(map(float, vec) for vec in self.gamma),
        }
von_mises_fisher_mixture.py 文件源码 项目:spherecluster 作者: clara-labs 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _log_likelihood(X, centers, weights, concentrations):
    if len(np.shape(X)) != 2:
        X = X.reshape((1, len(X)))

    n_examples, n_features = np.shape(X)
    n_clusters, _ = centers.shape

    if n_features <= 50:  # works up to about 50 before numrically unstable
        vmf_f = _vmf_log
    else:
        vmf_f = _vmf_log_asymptotic

    f_log = np.zeros((n_clusters, n_examples))
    for cc in range(n_clusters):
        f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])

    posterior = np.zeros((n_clusters, n_examples))
    weights_log = np.log(weights)
    posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
    for ee in range(n_examples):
        posterior[:, ee] = np.exp(
                posterior[:, ee] - logsumexp(posterior[:, ee]))

    return posterior
GMM.py 文件源码 项目:megamix 作者: 14thibea 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster

        Parameters
        ----------
        points : an array (n_points,dim)

        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        """
        log_normal_matrix = _log_normal_matrix(points,self.means,self.cov_chol,
                                               self.covariance_type,self.n_jobs)
        log_product = log_normal_matrix + self.log_weights
        log_prob_norm = logsumexp(log_product,axis=1)

        log_resp = log_product - log_prob_norm[:,np.newaxis]

        return log_prob_norm,log_resp
GMM.py 文件源码 项目:megamix 作者: 14thibea 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster

        Parameters
        ----------
        points : an array (n_points,dim)

        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        """
        log_normal_matrix = _log_normal_matrix(points,self.means,self.cov,self.covariance_type,self.n_jobs)
        log_product = log_normal_matrix + self.log_weights[:,np.newaxis].T
        log_prob_norm = logsumexp(log_product,axis=1)

        log_resp = log_product - log_prob_norm[:,np.newaxis]

        return log_prob_norm,log_resp
rebar.py 文件源码 项目:relax 作者: duvenaud 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def partial_eval(self, X, n_samples=5):
    if n_samples < 1000:
      res, iwae = self.sess.run(
          (self.lHat, self.iwae),
          feed_dict={self.x: X, self.n_samples: n_samples})
      res = [iwae] + res
    else:  # special case to handle OOM
      assert n_samples % 100 == 0, "When using large # of samples, it must be divisble by 100"
      res = []
      for i in xrange(int(n_samples/100)):
        logF, = self.sess.run(
            (self.logF,),
            feed_dict={self.x: X, self.n_samples: 100})
        res.append(logsumexp(logF, axis=1))
      res = [np.mean(logsumexp(res, axis=0) - np.log(n_samples))]
    return res


  # Random samplers
yacht_regression.py 文件源码 项目:SEVDL_MGP 作者: AMLab-Amsterdam 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loglike(nn, sample_preds, y):
    """Return the Avg. Test Log-Likelihood
    """
    if y.shape[1] == 1:
        y = y.ravel()
    sample_ll = np.zeros((sample_preds.shape[1], sample_preds.shape[0]))
    a, b = np.exp(nn.extra_inf['a1'].get_value()), np.exp(nn.extra_inf['b1'].get_value())
    etau, elogtau = (a / b).astype(np.float32), (psi(a) - np.log(b)).astype(np.float32)
    for sample in xrange(sample_preds.shape[0]):
        ypred = sample_preds[sample].astype(np.float32)
        if len(y.shape) > 1:
            sll = -.5 * np.sum(etau * (y - ypred)**2, axis=1)
        else:
            sll = -.5 * etau * (y - ypred)**2
        sample_ll[:, sample] = sll

    return np.mean(logsumexp(sample_ll, axis=1) - np.log(sample_preds.shape[0]) - .5 * np.log(2*np.pi) + .5 * elogtau.sum())
serving.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def logprob(self, data):
        logprobs = np.stack(
            [server.logprob(data) for server in self._ensemble])
        logprobs = logsumexp(logprobs, axis=0)
        logprobs -= np.log(len(self._ensemble))
        assert logprobs.shape == (data.shape[0], )
        return logprobs
diffexp.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def nb_exact_test(x_a, x_b, size_factor_a, size_factor_b, mu, phi):
    """ Compute p-value for a pairwise exact test using the negative binomial.
    Args:
      x_a (int) - Total count for a single gene in group A
      x_b (int) - Total count for a single gene in group B
      size_factor_a (float) - Sum of size factors for group A
      size_factor_b (float) - Sum of size factors for group B
      mu (float) - Common mean count for this gene
      phi (float) - Common dispersion for this gene
    Returns:
      p-value (float); the probability that a random pair of counts under the null hypothesis is more extreme
      than the observed pair of counts. """
    size_factor_a = float(size_factor_a)
    size_factor_b = float(size_factor_b)
    mu = float(mu)
    phi = float(phi)

    if (x_a + x_b) == 0:
        return 1.0
    if phi == 0:
        return 1.0
    if size_factor_a == 0 or size_factor_b == 0:
        return 1.0

    all_x_a = np.arange(0, 1+x_a+x_b, 1)
    all_x_b = np.arange(x_a+x_b, -1, -1)

    def log_prob(x, size_factor):
        return neg_bin_log_pmf(x, size_factor*mu, phi/size_factor)

    log_p_obs = log_prob(x_a, size_factor_a) + log_prob(x_b, size_factor_b)
    log_p_all = log_prob(all_x_a, size_factor_a) + log_prob(all_x_b, size_factor_b)

    more_extreme = log_p_all <= log_p_obs
    if np.sum(more_extreme) == 0:
        return 0.0

    return np.exp(logsumexp(log_p_all[log_p_all <= log_p_obs]) - logsumexp(log_p_all))
logit_rules.py 文件源码 项目:mitre 作者: gerberlab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def flat_prior(self, state):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        """
        raw_phylogeny_weights = self.rule_population.flat_variable_weights 
        phylogeny_weights = scipy.stats.norm.logpdf(
            np.log(raw_phylogeny_weights),
            loc = state.phylogeny_mean,
            scale = state.phylogeny_std
        )

        total_duration = float(self.data.experiment_end - self.data.experiment_start)
        durations = (self.rule_population.flat_durations /
                     ((1.0+self.window_duration_epsilon)*total_duration)
                    )
        window_a = (
            state.window_concentration *
            state.window_typical_fraction
        )
        window_b = (
            state.window_concentration *
            (1.0-state.window_typical_fraction)
        )
        window_weights = scipy.stats.beta.logpdf(
            durations, 
            window_a,
            window_b
        )

        weights = phylogeny_weights + window_weights
        normalization = logsumexp(weights)
        return weights - normalization
logit_rules.py 文件源码 项目:mitre 作者: gerberlab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def flat_prior(self, state):
        """ Evaluate log-probability of each primitive in the population.

        Return value is properly normalized.

        This subclass ignores phylogenetic weights.

        """
        total_duration = float(self.data.experiment_end - self.data.experiment_start)
        durations = (self.rule_population.flat_durations /
                     ((1.0+self.window_duration_epsilon)*total_duration)
                    )
        window_a = (
            state.window_concentration *
            state.window_typical_fraction
        )
        window_b = (
            state.window_concentration *
            (1.0-state.window_typical_fraction)
        )
        window_weights = scipy.stats.beta.logpdf(
            durations, 
            window_a,
            window_b
        )

        weights = window_weights
        normalization = logsumexp(weights)
        return weights - normalization
user.py 文件源码 项目:musm-adt17 作者: stefanoteso 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _utils_to_pvals(self, utils):
        return np.exp(utils - logsumexp(self.noise * utils))
rl_tuner.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def reward_from_reward_rnn_scores(self, action, reward_scores):
    """Rewards based on probabilities learned from data by trained RNN.

    Computes the reward_network's learned softmax probabilities. When used as
    rewards, allows the model to maintain information it learned from data.

    Args:
      action: A one-hot encoding of the chosen action.
      reward_scores: The value for each note output by the reward_rnn.
    Returns:
      Float reward value.
    """
    action_note = np.argmax(action)
    normalization_constant = logsumexp(reward_scores)
    return reward_scores[action_note] - normalization_constant
ldamodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def bound(self, corpus, gamma=None, subsample_ratio=1.0):
        """
        Estimate the variational bound of documents from `corpus`:
        E_q[log p(corpus)] - E_q[log q(corpus)]

        `gamma` are the variational parameters on topic weights for each `corpus`
        document (=2d matrix=what comes out of `inference()`).
        If not supplied, will be inferred from the model.

        """
        score = 0.0
        _lambda = self.state.get_lambda()
        Elogbeta = dirichlet_expectation(_lambda)

        for d, doc in enumerate(corpus):  # stream the input doc-by-doc, in case it's too large to fit in RAM
            if d % self.chunksize == 0:
                logger.debug("bound: at document #%i" % d)
            if gamma is None:
                gammad, _ = self.inference([doc])
            else:
                gammad = gamma[d]
            Elogthetad = dirichlet_expectation(gammad)

            # E[log p(doc | theta, beta)]
            score += numpy.sum(cnt * logsumexp(Elogthetad + Elogbeta[:, id]) for id, cnt in doc)

            # E[log p(theta | alpha) - log q(theta | gamma)]; assumes alpha is a vector
            score += numpy.sum((self.alpha - gammad) * Elogthetad)
            score += numpy.sum(gammaln(gammad) - gammaln(self.alpha))
            score += gammaln(numpy.sum(self.alpha)) - gammaln(numpy.sum(gammad))

        # compensate likelihood for when `corpus` above is only a sample of the whole corpus
        score *= subsample_ratio

        # E[log p(beta | eta) - log q (beta | lambda)]; assumes eta is a scalar
        score += numpy.sum((self.eta - _lambda) * Elogbeta)
        score += numpy.sum(gammaln(_lambda) - gammaln(self.eta))
        score += numpy.sum(gammaln(self.eta * self.num_terms) - gammaln(numpy.sum(_lambda, 1)))
        return score
test_utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_log_sum_exp(self):
        with self.test_session(use_gpu=True) as sess:
            a = np.array([[[1., 3., 0.2], [0.7, 2., 1e-6]],
                          [[0., 1e6, 1.], [1., 1., 1.]]])
            for keepdims in [True, False]:
                true_values = misc.logsumexp(a, (0, 2), keepdims=keepdims)
                test_values = sess.run(log_sum_exp(
                    tf.constant(a), (0, 2), keepdims))
                self.assertAllClose(test_values, true_values)
test_utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_log_mean_exp(self):
        with self.test_session(use_gpu=True) as sess:
            a = np.array([[[1., 3., 0.2], [0.7, 2., 1e-6]],
                          [[0., 1e6, 1.], [1., 1., 1.]]])
            for keepdims in [True, False]:
                true_values = misc.logsumexp(a, (0, 2), keepdims=keepdims) - \
                              np.log(a.shape[0] * a.shape[2])
                test_values = sess.run(log_mean_exp(
                    tf.constant(a), (0, 2), keepdims))
                self.assertAllClose(test_values, true_values)

            b = np.array([[0., 1e-6, 10.1]])
            test_values = sess.run(log_mean_exp(b, 0, keep_dims=False))
            self.assertTrue(np.abs(test_values - b).max() < 1e-6)
test_univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given, np.int32)
                cat = Categorical(logits)
                log_p = cat.log_prob(given)

                def _one_hot(x, depth):
                    n_elements = x.size
                    ret = np.zeros((n_elements, depth))
                    ret[np.arange(n_elements), x.flat] = 1
                    return ret.reshape(list(x.shape) + [depth])

                target_log_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = cat.prob(given)
                target_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * np.exp(normalized_logits), -1)
                self.assertAllClose(p.eval(), target_p)

            _test_value([0.], [0, 0, 0])
            _test_value([-50., -10., -50.], [0, 1, 2, 1])
            _test_value([0., 4.], [[0, 1], [0, 1]])
            _test_value([[2., 3., 1.], [5., 7., 4.]],
                        np.ones([3, 1, 1], dtype=np.int32))
test_multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_value(self):
        with self.test_session(use_gpu=True):
            def _test_value(logits, given):
                logits = np.array(logits, np.float32)
                normalized_logits = logits - misc.logsumexp(
                    logits, axis=-1, keepdims=True)
                given = np.array(given, np.int32)
                cat = OnehotCategorical(logits)
                log_p = cat.log_prob(tf.one_hot(given, logits.shape[-1],
                                                dtype=tf.int32))

                def _one_hot(x, depth):
                    n_elements = x.size
                    ret = np.zeros((n_elements, depth))
                    ret[np.arange(n_elements), x.flat] = 1
                    return ret.reshape(list(x.shape) + [depth])

                target_log_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * normalized_logits, -1)
                self.assertAllClose(log_p.eval(), target_log_p)
                p = cat.prob(tf.one_hot(given, logits.shape[-1],
                                        dtype=tf.int32))
                target_p = np.sum(_one_hot(
                    given, logits.shape[-1]) * np.exp(normalized_logits), -1)
                self.assertAllClose(p.eval(), target_p)

            _test_value([0.], [0, 0, 0])
            _test_value([-50., -10., -50.], [0, 1, 2, 1])
            _test_value([0., 4.], [[0, 1], [0, 1]])
            _test_value([[2., 3., 1.], [5., 7., 4.]],
                        np.ones([3, 1, 1], dtype=np.int32))


问题


面经


文章

微信
公众号

扫码关注公众号