python类psi()的实例源码

hdpmodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True
scalemixture.py 文件源码 项目:CSB 作者: csb-toolbox 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def inv_digamma_minus_log(y, tol=1e-10, n_iter=100):
    """
    Solve y = psi(alpha) - log(alpha) for alpha by fixed point
    integration.
    """
    if y >= -log(6.):
        x = 1 / (2 * (1 - exp(y)))
    else:
        x = 1.e-10
    for _i in range(n_iter):
        z = approx_psi(x) - log(x) - y
        if abs(z) < tol:
            break
        x -= x * z / (x * d_approx_psi(x) - 1)
        x = abs(x)
    return x
scalemixture.py 文件源码 项目:CSB 作者: csb-toolbox 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def initial_values(self, tol=1e-10):
        """
        Generate initial values by doing fixed point
        iterations to solve for alpha
        """
        n, a, b = self.n, self.a, self.b

        if abs(b) < 1e-10:
            alpha = inv_psi(a / n)
        else:
            alpha = 1.

        z = tol + 1.

        while abs(z) > tol:

            z = n * psi(alpha) - \
                b / numpy.clip(alpha, 1e-300, 1e300) - a

            alpha -= z / (n * d_approx_psi(alpha) - b
                          / (alpha ** 2 + 1e-300))
            alpha = numpy.clip(alpha, 1e-100, 1e300)

        return numpy.clip(alpha - 1 / (n + 1e-300), 1e-100, 1e300), \
               alpha + 1 / (n + 1e-300), alpha
scalemixture.py 文件源码 项目:CSB 作者: csb-toolbox 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def estimate(self, context, data):
        pdf = ScaleMixture()
        alpha = context.prior.alpha
        beta = context.prior.beta
        d = context._d

        if len(data.shape) == 1:
            data = data[:, numpy.newaxis]

        a = alpha + 0.5 * d * len(data.shape)
        b = beta + 0.5 * data.sum(-1) ** 2

        s = a / b
        log_s = psi(a) - log(b)

        pdf.scales = s
        context.prior.estimate([s, log_s])
        pdf.prior = context.prior

        return pdf
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True
yacht_regression.py 文件源码 项目:SEVDL_MGP 作者: AMLab-Amsterdam 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loglike(nn, sample_preds, y):
    """Return the Avg. Test Log-Likelihood
    """
    if y.shape[1] == 1:
        y = y.ravel()
    sample_ll = np.zeros((sample_preds.shape[1], sample_preds.shape[0]))
    a, b = np.exp(nn.extra_inf['a1'].get_value()), np.exp(nn.extra_inf['b1'].get_value())
    etau, elogtau = (a / b).astype(np.float32), (psi(a) - np.log(b)).astype(np.float32)
    for sample in xrange(sample_preds.shape[0]):
        ypred = sample_preds[sample].astype(np.float32)
        if len(y.shape) > 1:
            sll = -.5 * np.sum(etau * (y - ypred)**2, axis=1)
        else:
            sll = -.5 * etau * (y - ypred)**2
        sample_ll[:, sample] = sll

    return np.mean(logsumexp(sample_ll, axis=1) - np.log(sample_preds.shape[0]) - .5 * np.log(2*np.pi) + .5 * elogtau.sum())
hdpmodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
hdpmodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def expect_log_sticks(sticks):
    """
    For stick-breaking hdp, return the E[log(sticks)]
    """
    dig_sum = sp.psi(np.sum(sticks, 0))
    ElogW = sp.psi(sticks[0]) - dig_sum
    Elog1_W = sp.psi(sticks[1]) - dig_sum

    n = len(sticks[0]) + 1
    Elogsticks = np.zeros(n)
    Elogsticks[0: n - 1] = ElogW
    Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
    return Elogsticks
hdpmodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def update_chunk(self, chunk, update=True, opt_o=True):
        # Find the unique words in this chunk...
        unique_words = dict()
        word_list = []
        for doc in chunk:
            for word_id, _ in doc:
                if word_id not in unique_words:
                    unique_words[word_id] = len(unique_words)
                    word_list.append(word_id)

        Wt = len(word_list) # length of words in these documents

        # ...and do the lazy updates on the necessary columns of lambda
        rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
        self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
        self.m_Elogbeta[:, word_list] = \
            sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        ss = SuffStats(self.m_T, Wt, len(chunk))

        Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks

        # run variational inference on some new docs
        score = 0.0
        count = 0
        for doc in chunk:
            if len(doc) > 0:
                doc_word_ids, doc_word_counts = zip(*doc)
                doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
                    word_list, unique_words, doc_word_ids,
                    doc_word_counts, self.m_var_converge)
                count += sum(doc_word_counts)
                score += doc_score

        if update:
            self.update_lambda(ss, word_list, opt_o)

        return (score, count)
scalemixture.py 文件源码 项目:CSB 作者: csb-toolbox 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, x):

        from scipy.special import gammaln

        return self.a * x - \
               self.n * gammaln(numpy.clip(x, 1e-308, 1e308)) + \
               self.b * log(x), \
               self.a - self.n * psi(x) + self.b / x
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def expect_log_sticks(sticks):
    """
    For stick-breaking hdp, return the E[log(sticks)]
    """
    dig_sum = sp.psi(np.sum(sticks, 0))
    ElogW = sp.psi(sticks[0]) - dig_sum
    Elog1_W = sp.psi(sticks[1]) - dig_sum

    n = len(sticks[0]) + 1
    Elogsticks = np.zeros(n)
    Elogsticks[0: n - 1] = ElogW
    Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
    return Elogsticks
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def update_chunk(self, chunk, update=True, opt_o=True):
        # Find the unique words in this chunk...
        unique_words = dict()
        word_list = []
        for doc in chunk:
            for word_id, _ in doc:
                if word_id not in unique_words:
                    unique_words[word_id] = len(unique_words)
                    word_list.append(word_id)

        Wt = len(word_list) # length of words in these documents

        # ...and do the lazy updates on the necessary columns of lambda
        rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
        self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
        self.m_Elogbeta[:, word_list] = \
            sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        ss = SuffStats(self.m_T, Wt, len(chunk))

        Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks

        # run variational inference on some new docs
        score = 0.0
        count = 0
        for doc in chunk:
            if len(doc) > 0:
                doc_word_ids, doc_word_counts = zip(*doc)
                doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
                    word_list, unique_words, doc_word_ids,
                    doc_word_counts, self.m_var_converge)
                count += sum(doc_word_counts)
                score += doc_score

        if update:
            self.update_lambda(ss, word_list, opt_o)

        return (score, count)
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def update_chunk(self, chunk, update=True, opt_o=True):
        # Find the unique words in this chunk...
        unique_words = dict()
        word_list = []
        for doc in chunk:
            for word_id, _ in doc:
                if word_id not in unique_words:
                    unique_words[word_id] = len(unique_words)
                    word_list.append(word_id)

        Wt = len(word_list) # length of words in these documents

        # ...and do the lazy updates on the necessary columns of lambda
        rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
        self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
        self.m_Elogbeta[:, word_list] = \
            sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        ss = SuffStats(self.m_T, Wt, len(chunk))

        Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks

        # run variational inference on some new docs
        score = 0.0
        count = 0
        for doc in chunk:
            if len(doc) > 0:
                doc_word_ids, doc_word_counts = zip(*doc)
                doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
                    word_list, unique_words, doc_word_ids,
                    doc_word_counts, self.m_var_converge)
                count += sum(doc_word_counts)
                score += doc_score

        if update:
            self.update_lambda(ss, word_list, opt_o)

        return (score, count)
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def expect_log_sticks(sticks):
    """
    For stick-breaking hdp, return the E[log(sticks)]
    """
    dig_sum = sp.psi(np.sum(sticks, 0))
    ElogW = sp.psi(sticks[0]) - dig_sum
    Elog1_W = sp.psi(sticks[1]) - dig_sum

    n = len(sticks[0]) + 1
    Elogsticks = np.zeros(n)
    Elogsticks[0: n - 1] = ElogW
    Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
    return Elogsticks
hdpmodel.py 文件源码 项目:topical_word_embeddings 作者: thunlp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def update_chunk(self, chunk, update=True, opt_o=True):
        # Find the unique words in this chunk...
        unique_words = dict()
        word_list = []
        for doc in chunk:
            for word_id, _ in doc:
                if word_id not in unique_words:
                    unique_words[word_id] = len(unique_words)
                    word_list.append(word_id)

        Wt = len(word_list) # length of words in these documents

        # ...and do the lazy updates on the necessary columns of lambda
        rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
        self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
        self.m_Elogbeta[:, word_list] = \
            sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        ss = SuffStats(self.m_T, Wt, len(chunk))

        Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks

        # run variational inference on some new docs
        score = 0.0
        count = 0
        for doc in chunk:
            if len(doc) > 0:
                doc_word_ids, doc_word_counts = zip(*doc)
                doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
                    word_list, unique_words, doc_word_ids,
                    doc_word_counts, self.m_var_converge)
                count += sum(doc_word_counts)
                score += doc_score

        if update:
            self.update_lambda(ss, word_list, opt_o)

        return (score, count)
onlineldavb.py 文件源码 项目:DataScience-And-MachineLearning-Handbook-For-Coders 作者: wxyyxc1992 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(psi(alpha) - psi(n.sum(alpha)))
    return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
base_c.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def estimation(self, y1, y2):
        """ Estimate cross-entropy.

        Parameters
        ----------
        y1 : (number of samples1, dimension)-ndarray
             One row of y1 corresponds to one sample.
        y2 : (number of samples2, dimension)-ndarray
             One row of y2 corresponds to one sample.

        Returns
        -------
        c : float
            Estimated cross-entropy.

        References
        ----------
        Nikolai Leonenko, Luc Pronzato, and Vippal Savani. A class of
        Renyi information estimators for multidimensional densities.
        Annals of Statistics, 36(5):2153-2182, 2008.

        Examples
        --------
        c = co.estimation(y1,y2)  

        """

        # verification:
        self.verification_equal_d_subspaces(y1, y2)

        num_of_samples2, dim = y2.shape  # number of samples, dimension

        # computation:
        v = volume_of_the_unit_ball(dim)
        distances_y2y1 = knn_distances(y2, y1, False, self.knn_method,
                                       self.k, self.eps, 2)[0]
        c = log(v) + log(num_of_samples2) - psi(self.k) + \
            dim * mean(log(distances_y2y1[:, -1]))

        return c
PoissonMF.py 文件源码 项目:GraphicalModelForRecommendation 作者: AlgorithmFan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _compute_expectations(gamma, rho):
    return (gamma/rho, special.psi(gamma) - np.log(rho))
onlineldavb.py 文件源码 项目:Apply-LDA 作者: xiegonghai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(psi(alpha) - psi(n.sum(alpha)))
    return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
onlineldavb.py 文件源码 项目:Apply-LDA 作者: xiegonghai 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(psi(alpha) - psi(n.sum(alpha)))
    return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
mmsb_vb.py 文件源码 项目:pymake 作者: dtrckd 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run_e_step(self):
        """ compute variational expectations
        """
        ll = 0.

        for p in range(self.N):
            for q in range(self.N):
                new_phi = np.zeros(self.K)

                for g in range(self.K):
                    new_phi[g] = np.exp(psi(self.gamma[p,g])-psi(np.sum(self.gamma[p,:]))) * np.prod(( (self.B[g,:]**self.Y[p,q])
                        * ((1.-self.B[g,:])**(1.-self.Y[p,q])) )
                        ** self.phi[q,p,:] )
                self.phi[p,q,:] = new_phi/np.sum(new_phi)

                new_phi = np.zeros(self.K)
                for h in range(self.K):
                    new_phi[h] = np.exp(psi(self.gamma[q,h])-psi(np.sum(self.gamma[q,:]))) * np.prod(( (self.B[:,h]**self.Y[p,q])
                        * ((1.-self.B[:,h])**(1.-self.Y[p,q])) )
                        ** self.phi[p,q,:] )
                self.phi[q,p,:] = new_phi/np.sum(new_phi)

                for k in range(self.K):
                    self.gamma[p,k] = self.alpha[k] + np.sum(self.phi[p,:,k]) + np.sum(self.phi[:,p,k])
                    self.gamma[q,k] = self.alpha[k] + np.sum(self.phi[q,:,k]) + np.sum(self.phi[:,q,k])

        return ll
utils.py 文件源码 项目:stochasticLDA 作者: qlai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def dirichlet_expectation(alpha):
    '''see onlineldavb.py by Blei et al'''
    if (len(alpha.shape) == 1):
        return (psi(alpha) - psi(n.sum(alpha)))
    return (psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
utils.py 文件源码 项目:stochasticLDA 作者: qlai 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def beta_expectation(a, b, k):
    mysum = psi(a + b)
    Elog_a = psi(a) - mysum
    Elog_b = psi(b) - mysum
    Elog_beta = n.zeros(k)
    Elog_beta[0] = Elog_a[0]
    # print Elog_beta
    for i in range(1, k):
        Elog_beta[i] = Elog_a[i] + n.sum(Elog_b[0:i])
        # print Elog_beta
    # print Elog_beta
    return Elog_beta
main.py 文件源码 项目:LDA-Variational-EM 作者: laserwave 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def variationalInference(docs, d, gamma, phi):
    phisum = 0
    oldphi = np.zeros([K])
    digamma_gamma = np.zeros([K])

    for z in range(0, K):
        gamma[d][z] = alpha + docs[d].wordCount * 1.0 / K
        digamma_gamma[z] = psi(gamma[d][z])
        for w in range(0, len(docs[d].itemIdList)):
            phi[w, z] = 1.0 / K

    for iteration in range(0, iterInference):
        for w in range(0, len(docs[d].itemIdList)):
            phisum = 0
            for z in range(0, K):
                oldphi[z] = phi[w, z]
                phi[w, z] = digamma_gamma[z] + varphi[z, docs[d].itemIdList[w]]
                if z > 0:
                    phisum = math.log(math.exp(phisum) + math.exp(phi[w, z]))
                else:
                    phisum = phi[w, z]
            for z in range(0, K):
                phi[w, z] = math.exp(phi[w, z] - phisum)
                gamma[d][z] =  gamma[d][z] + docs[d].itemCountList[w] * (phi[w, z] - oldphi[z])
                digamma_gamma[z] = psi(gamma[d][z])


# calculate the gamma parameter of new document
DPGMM.py 文件源码 项目:megamix 作者: 14thibea 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _step_E(self, points):
        """
        In this step the algorithm evaluates the responsibilities of each points in each cluster

        Parameters
        ----------
        points : an array (n_points,dim)

        Returns
        -------
        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.
        log_prob_norm : an array (n_points,)
            logarithm of the probability of each sample in points

        """

        n_points,dim = points.shape

        log_gaussian = _log_normal_matrix(points,self.means,self.cov,'full',self.n_jobs)
        log_gaussian -= 0.5 * dim * np.log(self.nu)
        digamma_sum = np.sum(psi(.5 * (self.nu - np.arange(0, dim)[:,np.newaxis])),0)
        log_lambda = digamma_sum + dim * np.log(2)

        log_prob = self.log_weights + log_gaussian + 0.5 * (log_lambda - dim / self.beta)

        log_prob_norm = logsumexp(log_prob, axis=1)
        log_resp = log_prob - log_prob_norm[:,np.newaxis]

        return log_prob_norm,log_resp
test_online_lda.py 文件源码 项目:Parallel-SGD 作者: angadgill 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_dirichlet_expectation():
    """Test Cython version of Dirichlet expectation calculation."""
    x = np.logspace(-100, 10, 10000)
    expectation = np.empty_like(x)
    _dirichlet_expectation_1d(x, 0, expectation)
    assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
                    atol=1e-19)

    x = x.reshape(100, 100)
    assert_allclose(_dirichlet_expectation_2d(x),
                    psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
                    rtol=1e-11, atol=3e-9)


问题


面经


文章

微信
公众号

扫码关注公众号