python类digamma()的实例源码

tricks.py 文件源码 项目:gumbel-relatives 作者: matejbalog 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def lnZ_Exponential_MSE(M):
    return (np.log(M) - digamma(M)) ** 2 + lnZ_Exponential_var(M)


# Estimating estimator MSEs by sampling
entropy_estimators.py 文件源码 项目:cgpm 作者: probcomp 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def entropy(x, k=3, base=2):
  """The classic K-L k-nearest neighbor continuous entropy estimator.
  x should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
  if x is a one-dimensional scalar and we have four samples
  """
  assert k <= len(x)-1, 'Set k smaller than num. samples - 1.'
  d = len(x[0])
  N = len(x)
  intens = 1e-10 # Small noise to break degeneracy, see doc.
  x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
  tree = ss.cKDTree(x)
  nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in x]
  const = digamma(N) - digamma(k) + d*log(2)
  return (const + d*np.mean(map(log, nn))) / log(base)
entropy_estimators.py 文件源码 项目:cgpm 作者: probcomp 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def avgdigamma(points, dvec):
  # This part finds number of neighbors in some radius in the marginal space
  # returns expectation value of <psi(nx)>.
  N = len(points)
  tree = ss.cKDTree(points)
  avg = 0.
  for i in xrange(N):
    dist = dvec[i]
    # Subtlety, we don't include the boundary point,
    # but we are implicitly adding 1 to kraskov def bc center point is included.
    num_points = len(tree.query_ball_point(points[i], dist-1e-15,
      p=float('inf')))
    avg += digamma(num_points) / N
  return avg
doFeats_1.py 文件源码 项目:Tencent_Social_Ads 作者: freelzy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __fixed_point_iteration(self, tries, success, alpha, beta):
        '''fixed point iteration'''
        sumfenzialpha = 0.0
        sumfenzibeta = 0.0
        sumfenmu = 0.0
        for i in range(len(tries)):
            sumfenzialpha += (special.digamma(success[i]+alpha) - special.digamma(alpha))
            sumfenzibeta += (special.digamma(tries[i]-success[i]+beta) - special.digamma(beta))
            sumfenmu += (special.digamma(tries[i]+alpha+beta) - special.digamma(alpha+beta))

        return alpha*(sumfenzialpha/sumfenmu), beta*(sumfenzibeta/sumfenmu)
doFeats_1.py 文件源码 项目:Tencent_Social_Ads 作者: freelzy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __fixed_point_iteration(self, imps, clks, alpha, beta):
        numerator_alpha = 0.0
        numerator_beta = 0.0
        denominator = 0.0

        for i in range(len(imps)):
            numerator_alpha += (special.digamma(clks[i] + alpha) - special.digamma(alpha))
            numerator_beta += (special.digamma(imps[i] - clks[i] + beta) - special.digamma(beta))
            denominator += (special.digamma(imps[i] + alpha + beta) - special.digamma(alpha + beta))

        return alpha * (numerator_alpha / denominator), beta * (numerator_beta / denominator)
doFeats_2.py 文件源码 项目:Tencent_Social_Ads 作者: freelzy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __fixed_point_iteration(self, imps, clks, alpha, beta):
        numerator_alpha = 0.0
        numerator_beta = 0.0
        denominator = 0.0

        for i in range(len(imps)):
            numerator_alpha += (special.digamma(clks[i] + alpha) - special.digamma(alpha))
            numerator_beta += (special.digamma(imps[i] - clks[i] + beta) - special.digamma(beta))
            denominator += (special.digamma(imps[i] + alpha + beta) - special.digamma(alpha + beta))

        return alpha * (numerator_alpha / denominator), beta * (numerator_beta / denominator)
mdmr.py 文件源码 项目:dmr 作者: mpkato 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _dll(self, x):
        alpha = self.get_alpha(x)
        return -(np.sum(self._dll_common(x)\
            * (special.digamma(np.sum(alpha, axis=1))[:,np.newaxis,np.newaxis]\
            - special.digamma(np.sum(self.n_m_z+alpha, axis=1))[:,np.newaxis,np.newaxis]\
            + special.digamma(self.n_m_z+alpha)[:,:,np.newaxis]\
            - special.digamma(alpha)[:,:,np.newaxis]), axis=0)\
            - x / (self.sigma ** 2))
dmr.py 文件源码 项目:dmr 作者: mpkato 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _dll(self, x):
        alpha = self.get_alpha(x)
        result = np.sum(self.vecs[:,np.newaxis,:] * alpha[:,:,np.newaxis]\
            * (special.digamma(np.sum(alpha, axis=1))[:,np.newaxis,np.newaxis]\
            - special.digamma(np.sum(self.n_m_z+alpha, axis=1))[:,np.newaxis,np.newaxis]\
            + special.digamma(self.n_m_z+alpha)[:,:,np.newaxis]\
            - special.digamma(alpha)[:,:,np.newaxis]), axis=0)\
            - x / (self.sigma ** 2)
        result = -result
        return result
ldaseqmodel.py 文件源码 项目:nonce2vec 作者: minimalparts 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def update_phi(self, doc_number, time):
        """
        Update variational multinomial parameters, based on a document and a time-slice.
        This is done based on the original Blei-LDA paper, where:
        log_phi := beta * exp(?(gamma)), over every topic for every word.

        TODO: incorporate lee-sueng trick used in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
        """
        num_topics = self.lda.num_topics
        # digamma values
        dig = np.zeros(num_topics)

        for k in range(0, num_topics):
            dig[k] = digamma(self.gamma[k])

        n = 0   # keep track of iterations for phi, log_phi
        for word_id, count in self.doc:
            for k in range(0, num_topics):
                self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]

            log_phi_row = self.log_phi[n]
            phi_row = self.phi[n]

            # log normalize
            v = log_phi_row[0]
            for i in range(1, len(log_phi_row)):
                v = np.logaddexp(v, log_phi_row[i])

            # subtract every element by v
            log_phi_row = log_phi_row - v 
            phi_row = np.exp(log_phi_row)
            self.log_phi[n] = log_phi_row
            self.phi[n] = phi_row
            n +=1 # increase iteration

        return self.phi, self.log_phi
ldaseqmodel.py 文件源码 项目:nonce2vec 作者: minimalparts 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def compute_lda_lhood(self):
        """
        compute the likelihood bound
        """
        num_topics = self.lda.num_topics
        gamma_sum = np.sum(self.gamma)

        # to be used in DIM
        # sigma_l = 0
        # sigma_d = 0 

        lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
        self.lhood[num_topics] = lhood

        # influence_term = 0
        digsum = digamma(gamma_sum)

        model = "DTM"
        for k in range(0, num_topics):
            # below code only to be used in DIM mode
            # if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
            #     influence_topic = ldapost.doc_weight[k]
            #     influence_term = - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))

            e_log_theta_k = digamma(self.gamma[k]) - digsum
            lhood_term = (self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
            # TODO: check why there's an IF
            n = 0
            for word_id, count in self.doc:
                if self.phi[n][k] > 0:
                    lhood_term += count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
                n += 1
            self.lhood[k] = lhood_term
            lhood += lhood_term
            # in case of DIM add influence term
            # lhood += influence_term

        return lhood
mmsb.py 文件源码 项目:pymake 作者: dtrckd 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def limit_k(self, N, directed=True):
        alpha, gmma, delta = self.get_hyper()
        N = int(N)
        if directed is True:
            m = alpha * N * (digamma(N+alpha) - digamma(alpha))
        else:
            m = alpha * N * (digamma(2*N+alpha) - digamma(alpha))

        # Number of class in the CRF
        K = int(gmma * (digamma(m+gmma) - digamma(gmma)))
        return K
estimator.py 文件源码 项目:CNValloc 作者: m1m0r1 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def calc_lower_ll(self):
        g = digamma(self.gamma) - digamma(self.gamma.sum(axis=1)[:, np.newaxis])
        N = self._data.nsamples
        psi_d = np.exp(self.log_psi + self._data.log_d[:, :, :, np.newaxis])

        ll = self.ll_offset
        ll += (psi_d.sum(axis=0) * self.log_beta).sum()
        ll += (psi_d.sum(axis=1).sum(axis=1) * g).sum()
        ll += N * (gammaln(self.alpha.sum()) - gammaln(self.alpha).sum()) + (self.alpha * g.sum(axis=0)).sum()
        ll -= (gammaln(self.gamma.sum(axis=1)) - gammaln(self.gamma).sum(axis=1) + (self.gamma * g).sum(axis=1)).sum()
        ll -= (psi_d * self.log_psi).sum()
        return ll
TestExpFamBregman_ZeroMeanGauss.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def checkFacts_pdf_Phi(
        nu=0, tau=0, phi_grid=None, **kwargs):
    cPrior = c_Prior(nu=nu, tau=tau)
    if phi_grid is None:
        phi_grid = make_phi_grid(**kwargs)
    logpdf_grid = tau * phi_grid - nu * c_Phi(phi_grid) - cPrior
    pdf_grid = np.exp(logpdf_grid)
    mu_grid = phi2mu(phi_grid)

    IntegralVal = np.trapz(pdf_grid, phi_grid)
    E_phi_numeric = np.trapz(pdf_grid * phi_grid, phi_grid)
    E_phi_formula = -(0.5 * nu + 1) / tau
    E_mu_numeric = np.trapz(pdf_grid * mu_grid, phi_grid)
    E_mu_formula = tau/nu
    mode_phi_numeric = phi_grid[np.argmax(pdf_grid)]
    mode_phi_formula = mu2phi(tau/nu)

    E_c_numeric = np.trapz(pdf_grid * c_Phi(phi_grid), phi_grid)
    E_c_formula = - 0.5 * digamma(0.5 * nu + 1) + 0.5 * np.log(tau)

    print "nu=%7.3f tau=%7.3f" % (nu, tau)
    print "     Integral=% 7.3f   should be % 7.3f" % (IntegralVal, 1.0)
    print "        E[mu]=% 7.3f   should be % 7.3f" % (E_mu_numeric, E_mu_formula)
    print "       E[phi]=% 7.3f   should be % 7.3f" % (E_phi_numeric, E_phi_formula)
    print "    E[c(phi)]=% 7.3f   should be % 7.3f" % (E_c_numeric, E_c_formula)
    print "    mode[phi]=% 7.3f   should be % 7.3f" % (
        mode_phi_numeric, mode_phi_formula)
TestExpFamBregman_Gauss.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def checkFacts_pdf_Phi(
        nu=0, tau=0, phi_grid=None, **kwargs):
    cPrior = c_Prior(nu=nu, tau=tau)
    if phi_grid is None:
        phi_grid = make_phi_grid(**kwargs)
    logpdf_grid = tau * phi_grid - nu * c_Phi(phi_grid) - cPrior
    pdf_grid = np.exp(logpdf_grid)
    mu_grid = phi2mu(phi_grid)

    IntegralVal = np.trapz(pdf_grid, phi_grid)
    E_phi_numeric = np.trapz(pdf_grid * phi_grid, phi_grid)
    E_phi_formula = -(0.5 * nu + 1) / tau
    E_mu_numeric = np.trapz(pdf_grid * mu_grid, phi_grid)
    E_mu_formula = tau/nu
    mode_phi_numeric = phi_grid[np.argmax(pdf_grid)]
    mode_phi_formula = mu2phi(tau/nu)

    E_c_numeric = np.trapz(pdf_grid * c_Phi(phi_grid), phi_grid)
    E_c_formula = - 0.5 * digamma(0.5 * nu + 1) + 0.5 * np.log(tau)

    print "nu=%7.3f tau=%7.3f" % (nu, tau)
    print "     Integral=% 7.3f   should be % 7.3f" % (IntegralVal, 1.0)
    print "        E[mu]=% 7.3f   should be % 7.3f" % (E_mu_numeric, E_mu_formula)
    print "       E[phi]=% 7.3f   should be % 7.3f" % (E_phi_numeric, E_phi_formula)
    print "    E[c(phi)]=% 7.3f   should be % 7.3f" % (E_c_numeric, E_c_formula)
    print "    mode[phi]=% 7.3f   should be % 7.3f" % (
        mode_phi_numeric, mode_phi_formula)
GaussRegressYFromFixedXObsModel.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def E_log_d(w_E=None, P_EE=None, pnu=None, ptau=None, **kwargs):
    ''' Expected value of log of precision parameter delta

    Returns
    -------
    Elogdelta : scalar
    '''
    return digamma(0.5 * pnu) - np.log(0.5 * ptau)
MultObsModel.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _E_logphi(self, k=None):
        if k is None or k == 'prior':
            lam = self.Prior.lam
            Elogphi = digamma(lam) - digamma(np.sum(lam))
        elif k == 'all':
            AMat = self.Post.lam
            Elogphi = digamma(AMat) \
                - digamma(np.sum(AMat, axis=1))[:, np.newaxis]
        else:
            Elogphi = digamma(self.Post.lam[k]) - \
                digamma(self.Post.lam[k].sum())
        return Elogphi
BernObsModel.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _E_logphi(self, k=None):
        if k is None or k == 'prior':
            lam1 = self.Prior.lam1
            lam0 = self.Prior.lam0
        elif k == 'all':
            lam1 = self.Post.lam1
            lam0 = self.Post.lam0
        else:
            lam1 = self.Post.lam1[k]
            lam0 = self.Post.lam0[k]
        Elogphi = digamma(lam1) - digamma(lam1 + lam0)
        return Elogphi
BernObsModel.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _E_logphiT_log1mphiT(self, k=None):
        if k == 'all':
            lam1T = self.Post.lam1.T.copy()
            lam0T = self.Post.lam0.T.copy()
            digammaBoth = digamma(lam1T + lam0T)
            ElogphiT = digamma(lam1T) - digammaBoth
            Elog1mphiT = digamma(lam0T) - digammaBoth
        else:
            ElogphiT = self._E_logphiT(k)
            Elog1mphiT = self._E_log1mphiT(k)
        return ElogphiT, Elog1mphiT
LocalStepSingleDocSimpler.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def calcELBOForSingleDocFromCountVec(
        DocTopicCount_K=None, 
        cts_U=None,
        sumResp_U=None,
        alphaEbeta_K=None,
        logLik_UK=None,
        L_max=0.0):
    ''' Compute ELBO for single doc as function of doc-topic counts.

    Returns
    -------
    L : scalar float
        equals ELBO as function of local parameters of single document
        up to an additive constant independent of DocTopicCount
    '''
    theta_K = DocTopicCount_K + alphaEbeta_K
    logPrior_K = digamma(theta_K)
    L_theta = np.sum(gammaln(theta_K)) - np.inner(DocTopicCount_K, logPrior_K)
    explogPrior_K = np.exp(logPrior_K)
    if sumResp_U is None:
        maxlogLik_U = np.max(logLik_UK, axis=1)
        explogLik_UK = logLik_UK - maxlogLik_U[:,np.newaxis]
        np.exp(explogLik_UK, out=explogLik_UK)
        sumResp_U = np.dot(explogLik_UK, explogPrior_K)
        L_max = np.inner(cts_U, maxlogLik_U)
    L_resp = np.inner(cts_U, np.log(sumResp_U))
    return L_theta + L_resp + L_max
LocalStepManyDocs.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def updateLPGivenDocTopicCount(LP, DocTopicCount,
                               alphaEbeta, alphaEbetaRem=None):
    ''' Update local parameters given doc-topic counts for many docs.

    Returns for FiniteTopicModel (alphaEbetaRem is None)
    --------
    LP : dict of local params, with updated fields
        * theta : 2D array, nDoc x K
        * ElogPi : 2D array, nDoc x K

    Returns for HDPTopicModel (alphaEbetaRem is not None)
    --------
        * theta : 2D array, nDoc x K
        * ElogPi : 2D array, nDoc x K
        * thetaRem : scalar
        * ElogPiRem : scalar
    '''
    theta = DocTopicCount + alphaEbeta

    if alphaEbetaRem is None:
        # FiniteTopicModel
        digammaSumTheta = digamma(theta.sum(axis=1))
    else:
        # HDPTopicModel
        digammaSumTheta = digamma(theta.sum(axis=1) + alphaEbetaRem)
        LP['thetaRem'] = alphaEbetaRem
        LP['ElogPiRem'] = digamma(alphaEbetaRem) - digammaSumTheta
        LP['digammaSumTheta'] = digammaSumTheta  # Used for merges

    ElogPi = digamma(theta) - digammaSumTheta[:, np.newaxis]
    LP['theta'] = theta
    LP['ElogPi'] = ElogPi
    return LP


问题


面经


文章

微信
公众号

扫码关注公众号