python类psi()的实例源码

nn_utils.py 文件源码 项目:SEVDL_MGP 作者: AMLab-Amsterdam 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def perform(self, node, inputs, output_storage):
        x = inputs[0]
        z = output_storage[0]
        z[0] = sp.psi(x)
distributions.py 文件源码 项目:NAIBR 作者: raphael-group 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def mleFun(self, par, data, sm):
        p = par[0]
        r = par[1]
        n = len(data)
        f0 = sm/(r+sm)-p
        f1 = np.sum(special.psi(data+r)) - n*special.psi(r) + n*np.log(r/(r+sm))
        return np.array([f0, f1])
mi.py 文件源码 项目:mifs 作者: danielhomola 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _mi_dc(x, y, k):
    """
    Calculates the mututal information between a continuous vector x and a
    disrete class vector y.

    This implementation can calculate the MI between the joint distribution of
    one or more continuous variables (X[:, 1:3]) with a discrete variable (y).

    Thanks to Adam Pocock, the author of the FEAST package for the idea.

    Brian C. Ross, 2014, PLOS ONE
    Mutual Information between Discrete and Continuous Data Sets
    """

    y = y.flatten()
    n = x.shape[0]
    classes = np.unique(y)
    knn = NearestNeighbors(n_neighbors=k)
    # distance to kth in-class neighbour
    d2k = np.empty(n)
    # number of points within each point's class
    Nx = []
    for yi in y:
        Nx.append(np.sum(y == yi))

    # find the distance of the kth in-class point
    for c in classes:
        mask = np.where(y == c)[0]
        knn.fit(x[mask, :])
        d2k[mask] = knn.kneighbors()[0][:, -1]

    # find the number of points within the distance of the kth in-class point
    knn.fit(x)
    m = knn.radius_neighbors(radius=d2k, return_distance=False)
    m = [i.shape[0] for i in m]

    # calculate MI based on Equation 2 in Ross 2014
    MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
    return MI
mi.py 文件源码 项目:mifs 作者: danielhomola 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _entropy(X, k=1):
    """
    Returns the entropy of the X.

    Written by Gael Varoquaux:
    https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429

    Parameters
    ----------
    X : array-like, shape (n_samples, n_features)
        The data the entropy of which is computed
    k : int, optional
        number of nearest neighbors for density estimation

    References
    ----------
    Kozachenko, L. F. & Leonenko, N. N. 1987 Sample estimate of entropy
    of a random vector. Probl. Inf. Transm. 23, 95-101.
    See also: Evans, D. 2008 A computationally efficient estimator for
    mutual information, Proc. R. Soc. A 464 (2093), 1203-1215.
    and:

    Kraskov A, Stogbauer H, Grassberger P. (2004). Estimating mutual
    information. Phys Rev E 69(6 Pt 2):066138.

    F. Perez-Cruz, (2008). Estimation of Information Theoretic Measures
    for Continuous Random Variables. Advances in Neural Information
    Processing Systems 21 (NIPS). Vancouver (Canada), December.
    return d*mean(log(r))+log(volume_unit_ball)+log(n-1)-log(k)

    """

    # Distance to kth nearest neighbor
    r = _nearest_distances(X, k)
    n, d = X.shape
    volume_unit_ball = (np.pi ** (.5 * d)) / gamma(.5 * d + 1)
    return (d * np.mean(np.log(r + np.finfo(X.dtype).eps)) +
            np.log(volume_unit_ball) + psi(n) - psi(k))
base_h.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def estimation(self, y):
        """ Estimate Shannon entropy.

        Parameters
        ----------
        y : (number of samples, dimension)-ndarray
            One row of y corresponds to one sample.

        Returns
        -------
        h : float
            Estimated Shannon entropy.

        References
        ----------
        M. N. Goria, Nikolai N. Leonenko, V. V. Mergel, and P. L. Novi 
        Inverardi. A new class of random vector entropy estimators and its 
        applications in testing statistical hypotheses. Journal of 
        Nonparametric Statistics, 17: 277-297, 2005. (S={k})

        Harshinder Singh, Neeraj Misra, Vladimir Hnizdo, Adam Fedorowicz
        and Eugene Demchuk. Nearest neighbor estimates of entropy.
        American Journal of Mathematical and Management Sciences, 23,
        301-321, 2003. (S={k})

        L. F. Kozachenko and Nikolai N. Leonenko. A statistical estimate
        for the entropy of a random vector. Problems of Information
        Transmission, 23:9-16, 1987. (S={1})

        Examples
        --------
        h = co.estimation(y)

        """

        num_of_samples, dim = y.shape
        distances_yy = knn_distances(y, y, True, self.knn_method, self.k,
                                     self.eps, 2)[0]
        v = volume_of_the_unit_ball(dim)
        distances_yy[:, self.k - 1][distances_yy[:, self.k-1] == 0] = 1e-6
        h = log(num_of_samples - 1) - psi(self.k) + log(v) + \
            dim * sum(log(distances_yy[:, self.k-1])) / num_of_samples

        return h
DPGMM.py 文件源码 项目:megamix 作者: 14thibea 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _step_M(self,points,log_resp):
        """
        In this step the algorithm updates the values of the parameters (means, covariances,
        alpha, beta, nu).

        Parameters
        ----------
        points : an array (n_points,dim)

        log_resp: an array (n_points,n_components)
            an array containing the logarithm of the responsibilities.

        """

        n_points,dim = points.shape

        resp = np.exp(log_resp)

        # Convenient statistics
        N = np.sum(resp,axis=0) + 10*np.finfo(resp.dtype).eps            #Array (n_components,)
        X_barre = 1/N[:,np.newaxis] * np.dot(resp.T,points)              #Array (n_components,dim)
        S = _full_covariance_matrices(points,X_barre,N,resp,self.reg_covar,self.n_jobs)

        #Parameters update
        self.alpha = np.asarray([1.0 + N,
                                  self.alpha_0 + np.hstack((np.cumsum(N[::-1])[-2::-1], 0))]).T
        self.alpha += np.asarray([-self.pypcoeff * np.ones(self.n_components),
                                      self.pypcoeff * np.arange(self.n_components)]).T
        self.beta = self.beta_0 + N
        self.nu = self.nu_0 + N

        # Weights update
        for i in range(self.n_components):
            if i==0:
                self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i]))
            else:
                self.log_weights[i] = psi(self.alpha[i][0]) - psi(np.sum(self.alpha[i]))
                self.log_weights[i] += self.log_weights[i-1] + psi(self.alpha[i-1][1]) - psi(self.alpha[i-1][0])

        # Means update
        means = self.beta_0 * self._means_prior + N[:,np.newaxis] * X_barre
        self.means = means * np.reciprocal(self.beta)[:,np.newaxis]
        self.means_estimated = self.means

        # Covariance update
        for i in range(self.n_components):
            diff = X_barre[i] - self._means_prior
            product = self.beta_0 * N[i]/self.beta[i] * np.outer(diff,diff)
            self._inv_prec[i] = self._inv_prec_prior + N[i] * S[i] + product

            det_inv_prec = np.linalg.det(self._inv_prec[i])
            self._log_det_inv_prec[i] = np.log(det_inv_prec)
            self.cov[i] = self._inv_prec[i] / self.nu[i]


问题


面经


文章

微信
公众号

扫码关注公众号