python类det()的实例源码

x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def analytical_value_k_expected(distr1, distr2, sigma, par1, par2):
    """ Analytical value of expected kernel for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str
                     Names of the distributions.
    sigma: float, >0
           Std parameter of the expected kernel.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 = 
                 'normal': par1["mean"], par1["cov"] and par2["mean"], 
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    k : float
        Analytical value of the expected kernel.

    References
    ----------
    Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard 
    Scholkopf. Learning from distributions via support measure machines.
    In Advances in Neural Information Processing Systems (NIPS), pages
    10-18, 2011.

    """

    if distr1 == 'normal' and distr2 == 'normal':

        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        gam = 1 / sigma**2
        diffm = m1 - m2 
        exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
        k = exp(-exp_arg / 2) / \
            sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
    else:
        raise Exception('Distribution=?')

    return k
x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
                                     par2):
    """ Analytical value of the Sharma-Mittal divergence.

    Parameters
    ----------    
    distr1, distr2 : str-s
                     Names of distributions.
    alpha : float, 0 < alpha \ne 1
            Parameter of the Sharma-Mittal divergence.
    beta : float, beta \ne 1
           Parameter of the Sharma-Mittal divergence.
    par1, par2 : dictionary-s
                 Parameters of distributions.
                 If (distr1,distr2) = ('normal','normal'), then distr1 =
                 N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
                 distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
                 par2['cov'].

    Returns
    -------
    D : float
        Analytical value of the Tsallis divergence.

    References
    ----------          
    Frank Nielsen and Richard Nock. A closed-form expression for the 
    Sharma-Mittal entropy of exponential families. Journal of Physics A: 
    Mathematical and Theoretical, 45:032003, 2012.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']

        c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
        diffm = m1 - m2

        # Jensen difference divergence, c2:
        j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
                                                                alpha) /
                 absolute(det(c))) + alpha * (1 - alpha) *
             dot(dot(diffm, inv(c)), diffm)) / 2
        c2 = exp(-j)

        d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)

    else:
        raise Exception('Distribution=?')

    return d
rvm.py 文件源码 项目:prml 作者: Yevgnen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10):
        """Fit a RVM model with the training data ``(X, T)``."""
        # Initialize the hyperparameters
        self._init_hyperparameters(X, T)

        # Compute design matrix
        n_samples = X.shape[0]
        phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)]  # Add x0

        alpha = self.cov
        beta = self.beta

        log_evidence = -1e10
        for iter in range(max_iter):
            alpha[alpha >= bound] = bound
            rv_indices = sp.nonzero(alpha < bound)[0]
            rv_phi = phi[:, rv_indices]
            rv_alpha = alpha[rv_indices]

            # Compute the posterior distribution
            post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi))
            post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T))

            # Re-estimate the hyperparameters
            gamma = 1 - rv_alpha * post_cov.diagonal()
            rv_alpha = gamma / (post_mean * post_mean)
            beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2

            # Evalueate the log evidence and test the relative change
            C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T)
            log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T)))
            diff = spla.norm(log_evidence_new - log_evidence)
            if (diff < tol * spla.norm(log_evidence)):
                break

            log_evidence = log_evidence_new
            alpha[rv_indices] = rv_alpha

        # Should re-compute the posterior distribution
        self.rv_indices = rv_indices
        self.cov = post_cov
        self.mean = post_mean
        self.beta = beta

        return self
johnson.py 文件源码 项目:PyDiatomic 作者: stggh 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def amplitude(wf, R, edash, mu):
    # Mies    F ~ JA + NB       J ~ sin(kR)/kR
    # normalization sqrt(2 mu/pu hbar^2) = zz
    zz = np.sqrt(2*mu/const.pi)/const.hbar

    oo, n, nopen = wf.shape

    # two asymptotic points on wavefunction wf[:, j]
    i1 = oo-5
    i2 = i1-1
    x1 = R[i1]*1.0e-10
    x2 = R[i2]*1.0e-10

    A = np.zeros((nopen, nopen))
    B = np.zeros((nopen, nopen))
    oc = 0
    for j in range(n):
        if edash[j] < 0:
            continue
        # open channel
        ke = np.sqrt(2*mu*edash[j]*const.e)/const.hbar
        rtk = np.sqrt(ke)
        kex1 = ke*x1
        kex2 = ke*x2

        j1 = sph_jn(0, kex1)[0]*x1*rtk*zz
        y1 = sph_yn(0, kex1)[0]*x1*rtk*zz

        j2 = sph_jn(0, kex2)[0]*x2*rtk*zz
        y2 = sph_yn(0, kex2)[0]*x2*rtk*zz

        det = j1*y2 - j2*y1

        for k in range(nopen):
            A[oc, k] = (y2*wf[i1, j, k] - y1*wf[i2, j, k])/det
            B[oc, k] = (j1*wf[i2, j, k] - j2*wf[i1, j, k])/det

        oc += 1

    AI = linalg.inv(A)
    K = B @ AI

    return K, AI, B
dpgmm.py 文件源码 项目:Parallel-SGD 作者: angadgill 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in range(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in range(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                   - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in range(self.n_components):
                diff = X - self.means_[k]
                self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = pinvh(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in range(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = pinvh(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(
                    self.dof_[k], self.scale_[k], self.det_scale_[k],
                    n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])


问题


面经


文章

微信
公众号

扫码关注公众号