python类multivariate_normal()的实例源码

model_analyzer.py 文件源码 项目:Generative-ConvACs 作者: HUJI-Deep 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def precompute_marginals(self):
        sys.stderr.write('Precomputing marginals...\n')
        self._pdfs = [None] * self._num_instances
        # precomputing all possible marginals
        for i in xrange(self._num_instances):
            mean = self._corrected_means[i]
            cov = self._corrected_covs[i]
            self._pdfs[i] = [None] * (2 ** mean.shape[0])
            for marginal_pattern in itertools.product([False, True], repeat=mean.shape[0]):
                marginal_length = marginal_pattern.count(True)
                if marginal_length == 0:
                    continue
                m = np.array(marginal_pattern)
                marginal_mean = mean[m]
                mm = m[:, np.newaxis]
                marginal_cov = cov[np.dot(mm, mm.transpose())].reshape((marginal_length, marginal_length))
                self._pdfs[i][hash_bool_array(m)] = multivariate_normal(mean=marginal_mean, cov=marginal_cov)
norm_post_sim.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def norm_post_sim(modes,cov_matrix):
    post = multivariate_normal(modes,cov_matrix)
    nsims = 30000
    phi = np.zeros([nsims,len(modes)])

    for i in range(0,nsims):
         phi[i] = post.rvs()

    chain = np.array([phi[i][0] for i in range(len(phi))])
    for m in range(1,len(modes)):
        chain = np.vstack((chain,[phi[i][m] for i in range(len(phi))]))

    mean_est = [np.mean(np.array([phi[i][j] for i in range(len(phi))])) for j in range(len(modes))]
    median_est = [np.median(np.array([phi[i][j] for i in range(len(phi))])) for j in range(len(modes))]
    upper_95_est = [np.percentile(np.array([phi[i][j] for i in range(len(phi))]),95) for j in range(len(modes))]
    lower_95_est = [np.percentile(np.array([phi[i][j] for i in range(len(phi))]),5) for j in range(len(modes))]

    return chain, mean_est, median_est, upper_95_est, lower_95_est
ul_policies.py 文件源码 项目:bolero 作者: rock-learning 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def probabilities(self, Y):
        """Probabilities of parameter vectors Y

        Parameters
        ----------
        Y : array-like, shape (num_samples, n_weights)
            2d array of parameter vectors

        Returns
        ----------
        resp : array-like, shape (num_samples)
            the probabilities of the samples under this policy

        """
        if not compatible_version("scipy", ">= 0.14"):
            raise ImportError(
                "SciPy >= 0.14 is required for "
                "'scipy.stats.multivariate_normal'.")
        from scipy.stats import multivariate_normal
        return multivariate_normal(mean=self.mean, cov=self.Sigma).pdf(Y)
ul_policies.py 文件源码 项目:bolero 作者: rock-learning 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, context, explore=True):
        """Evaluates policy for given context.

        Samples weight vector from distribution if explore is true, otherwise
        return the distribution's mean (which depends on the context).

        Parameters
        ----------
        context: array-like, (n_context_dims,)
            context vector

        explore: bool
            if true, weight vector is sampled from distribution. otherwise the
            distribution's mean is returned
        """
        if explore:
            return self.random_state.multivariate_normal(
                self.W.dot(context), self.Sigma, size=[1])[0]
        else:
            return self.W.dot(context)
data_generation.py 文件源码 项目:information-bottleneck 作者: djstrouse 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def gen_blurred_diag_pxy(s):
    X = 1024
    Y = X

    # generate pdf
    from scipy.stats import multivariate_normal
    pxy = np.zeros((X,Y))
    rv = multivariate_normal(cov=s)
    for x in range(X):        
        pxy[x,:] = np.roll(rv.pdf(np.linspace(-X/2,X/2,X+1)[:-1]),int(X/2+x))
    pxy = pxy/np.sum(pxy)

    # plot p(x,y)
    import matplotlib.pyplot as plt
    plt.figure()
    plt.contourf(pxy)
    plt.ion()
    plt.title("p(x,y)")
    plt.show()

    return pxy
data.py 文件源码 项目:kernel-gof 作者: wittawatj 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def gmm_sample(self, mean=None, w=None, N=10000,n=10,d=2,seed=10):
        np.random.seed(seed)
        self.d = d
        if mean is None:
            mean = np.random.randn(n,d)*10
        if w is None:
            w = np.random.rand(n)
        w = w/sum(w)
        multi = np.random.multinomial(N,w)
        X = np.zeros((N,d))
        base = 0
        for i in range(n):
            X[base:base+multi[i],:] = np.random.multivariate_normal(mean[i,:], np.eye(self.d), multi[i])
            base += multi[i]

        llh = np.zeros(N)
        for i in range(n):
            llh += w[i] * stats.multivariate_normal.pdf(X, mean[i,:], np.eye(self.d))
        #llh = llh/sum(llh)
        return X, llh
community_embeddings.py 文件源码 项目:nodeembedding-to-communityembedding 作者: andompesta 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss(self, nodes, model, beta, chunksize=150):
        """
        Forward function used to compute o3 loss
        :param input_labels: of the node present in the batch
        :param model: model containing all the shared data
        :param beta: trade off param
        """
        ret_loss = 0
        for node_index in chunkize_serial(map(lambda x: model.vocab(x).index, nodes), chunksize):
            input = model.node_embedding[node_index]

            batch_loss = np.zeros(len(node_index), dtype=np.float32)
            for com in range(model.k):
                rd = multivariate_normal(model.centroid[com], model.covariance_mat[com])
                # check if can be done as matrix operation
                batch_loss += rd.logpdf(input).astype(np.float32) * model.pi[node_index, com]

            ret_loss = abs(batch_loss.sum())

        return ret_loss * (beta/model.k)
constrainted_hartigan.py 文件源码 项目:ecml17 作者: gmum 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def init_link_labels(self, x0, x1, p):

        pdfs = [multivariate_normal(mean=m, cov=c, allow_singular=True) for m, c in zip(self.means, self.covs)]

        neglog_costs = np.zeros((2, self.means.shape[0]))

        for i in xrange(len(self.means)):
            neglog_costs[0, i] = -np.log(pdfs[i].pdf(x0))
            neglog_costs[1, i] = -np.log(pdfs[i].pdf(x1))

        neglog_link_costs = np.clip(neglog_costs, 0, 9999999999)

        def _cost(k, l):
            return (-np.log(self.weights[k]) * neglog_link_costs[0, k]) + (-np.log(self.weights[l]) * neglog_link_costs[1, l])

        must = must_consistent(self.partition)
        cannot = cannot_consistent(self.partition)

        if p == 1:  # must link
            return must[np.argmin([_cost(k, l) for k, l in must])]
        elif p == -1:  # cannot link
            return cannot[np.argmin([_cost(k, l) for k, l in cannot])]
        else:
            raise ValueError("At this point link is expected to be -1 or 1 only!")
bayesquad.py 文件源码 项目:icinco-code 作者: jacobnzw 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _int_var_rbf(self, X, hyp, jitter=1e-8):
        """
        Posterior integral variance of the Gaussian Process quadrature.
        X - vector (1, 2*xdim**2+xdim)
        hyp - kernel hyperparameters [s2, el_1, ... el_d]
        """
        # reshape X to SP matrix
        X = np.reshape(X, (self.n, self.d))
        # set kernel hyper-parameters
        s2, el = hyp[0], hyp[1:]
        self.kern.param_array[0] = s2  # variance
        self.kern.param_array[1:] = el  # lengthscale
        K = self.kern.K(X)
        L = np.diag(el ** 2)
        # posterior variance of the integral
        ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
        postvar = -ks.dot(solve(K + jitter * np.eye(self.n), ks.T))
        return postvar
bayesquad.py 文件源码 项目:icinco-code 作者: jacobnzw 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _int_var_rbf_hyp(self, hyp, X, jitter=1e-8):
        """
        Posterior integral variance as a function of hyper-parameters
        :param hyp: RBF kernel hyper-parameters [s2, el_1, ..., el_d]
        :param X: sigma-points
        :param jitter: numerical jitter (for stabilizing computations)
        :return: posterior integral variance
        """
        # reshape X to SP matrix
        X = np.reshape(X, (self.n, self.d))
        # set kernel hyper-parameters
        s2, el = 1, hyp  # sig_var hyper always set to 1
        self.kern.param_array[0] = s2  # variance
        self.kern.param_array[1:] = el  # lengthscale
        K = self.kern.K(X)
        L = np.diag(el ** 2)
        # posterior variance of the integral
        ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
        postvar = s2 * np.sqrt(det(2 * inv(L) + np.eye(self.d))) ** -1 - ks.dot(
            solve(K + jitter * np.eye(self.n), ks.T))
        return postvar
transformed_distribution_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def testEntropy(self):
    with self.test_session():
      shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
      diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
      actual_mvn_entropy = np.concatenate([
          [stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
          for i in range(len(diag))])
      fake_mvn = self._cls()(
          ds.MultivariateNormalDiag(
              array_ops.zeros_like(shift),
              array_ops.ones_like(diag),
              validate_args=True),
          bs.AffineLinearOperator(
              shift,
              scale=la.LinearOperatorDiag(diag, is_non_singular=True),
              validate_args=True),
          validate_args=True)
      self.assertAllClose(actual_mvn_entropy,
                          fake_mvn.entropy().eval())
mvn_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _testPDFShapes(self, mvn_dist, mu, sigma):
    with self.test_session() as sess:
      mvn = mvn_dist(mu, sigma)
      x = 2 * array_ops.ones_like(mu)

      log_pdf = mvn.log_prob(x)
      pdf = mvn.prob(x)

      mu_value = np.ones([3, 3, 2])
      sigma_value = np.zeros([3, 3, 2, 2])
      sigma_value[:] = np.identity(2)
      x_value = 2. * np.ones([3, 3, 2])
      feed_dict = {mu: mu_value, sigma: sigma_value}

      scipy_mvn = stats.multivariate_normal(
          mean=mu_value[(0, 0)], cov=sigma_value[(0, 0)])
      expected_log_pdf = scipy_mvn.logpdf(x_value[(0, 0)])
      expected_pdf = scipy_mvn.pdf(x_value[(0, 0)])

      log_pdf_evaled, pdf_evaled = sess.run([log_pdf, pdf], feed_dict=feed_dict)
      self.assertAllEqual([3, 3], log_pdf_evaled.shape)
      self.assertAllEqual([3, 3], pdf_evaled.shape)
      self.assertAllClose(expected_log_pdf, log_pdf_evaled[0, 0])
      self.assertAllClose(expected_pdf, pdf_evaled[0, 0])
mvn_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testLogPDFScalarBatch(self):
    with self.test_session():
      mu = self._rng.rand(2)
      chol, sigma = self._random_chol(2, 2)
      mvn = distributions.MultivariateNormalCholesky(mu, chol)
      x = self._rng.rand(2)

      log_pdf = mvn.log_prob(x)
      pdf = mvn.prob(x)

      scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)

      expected_log_pdf = scipy_mvn.logpdf(x)
      expected_pdf = scipy_mvn.pdf(x)
      self.assertEqual((), log_pdf.get_shape())
      self.assertEqual((), pdf.get_shape())
      self.assertAllClose(expected_log_pdf, log_pdf.eval())
      self.assertAllClose(expected_pdf, pdf.eval())
mvn_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def testLogPDFXIsHigherRank(self):
    with self.test_session():
      mu = self._rng.rand(2)
      chol, sigma = self._random_chol(2, 2)
      mvn = distributions.MultivariateNormalCholesky(mu, chol)
      x = self._rng.rand(3, 2)

      log_pdf = mvn.log_prob(x)
      pdf = mvn.prob(x)

      scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)

      expected_log_pdf = scipy_mvn.logpdf(x)
      expected_pdf = scipy_mvn.pdf(x)
      self.assertEqual((3,), log_pdf.get_shape())
      self.assertEqual((3,), pdf.get_shape())
      self.assertAllClose(expected_log_pdf, log_pdf.eval())
      self.assertAllClose(expected_pdf, pdf.eval())
mvn_test.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testSampleWithSampleShape(self):
    with self.test_session():
      mu = self._rng.rand(3, 5, 2)
      chol, sigma = self._random_chol(3, 5, 2, 2)

      mvn = distributions.MultivariateNormalCholesky(mu, chol)
      samples_val = mvn.sample((10, 11, 12), seed=137).eval()

      # Check sample shape
      self.assertEqual((10, 11, 12, 3, 5, 2), samples_val.shape)

      # Check sample means
      x = samples_val[:, :, :, 1, 1, :]
      self.assertAllClose(
          x.reshape(10 * 11 * 12, 2).mean(axis=0), mu[1, 1], atol=1e-2)

      # Check that log_prob(samples) works
      log_prob_val = mvn.log_prob(samples_val).eval()
      x_log_pdf = log_prob_val[:, :, :, 1, 1]
      expected_log_pdf = stats.multivariate_normal(
          mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).logpdf(x)
      self.assertAllClose(expected_log_pdf, x_log_pdf)
distributions.py 文件源码 项目:bayestsa 作者: thalesians 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, mean, covariance, randomstate=None):
        self.__mean = npu.immutablecopyof(npu.tondim1(mean))
        self.__covariance = npu.immutablecopyof(npp.checkshapeissquare(npu.tondim2(covariance)))
        self.__randomstate = npu.getrandomstate() if randomstate is None else randomstate
        self.__impl = stats.multivariate_normal(self.__mean, self.__covariance)
distributions.py 文件源码 项目:bayestsa 作者: thalesians 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sample(self, size=1):
        return self.__randomstate.multivariate_normal(self.__mean, self.__covariance, size)
mv_gaussian.py 文件源码 项目:SourceFilterContoursMelody 作者: juanjobosch 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fit_gaussians(x_train_boxcox, y_train):
    """ Fit class-dependent multivariate gaussians on the training set.

    Parameters
    ----------
    x_train_boxcox : np.array [n_samples, n_features_trans]
        Transformed training features.
    y_train : np.array [n_samples]
        Training labels.

    Returns
    -------
    rv_pos : multivariate normal
        multivariate normal for melody class
    rv_neg : multivariate normal
        multivariate normal for non-melody class
    """
    pos_idx = np.where(y_train == 1)[0]
    mu_pos = np.mean(x_train_boxcox[pos_idx, :], axis=0)
    cov_pos = np.cov(x_train_boxcox[pos_idx, :], rowvar=0)

    neg_idx = np.where(y_train == 0)[0]
    mu_neg = np.mean(x_train_boxcox[neg_idx, :], axis=0)
    cov_neg = np.cov(x_train_boxcox[neg_idx, :], rowvar=0)
    rv_pos = multivariate_normal(mean=mu_pos, cov=cov_pos, allow_singular=True)
    rv_neg = multivariate_normal(mean=mu_neg, cov=cov_neg, allow_singular=True)
    return rv_pos, rv_neg
samplers.py 文件源码 项目:trendpy 作者: RonsenbergVI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def define_parameters(self):
        params=Parameters()

        params.append(Parameter("trend", multivariate_normal, (self.size,1)))
        params.append(Parameter("sigma2", invgamma, (1,1)))
        params.append(Parameter("lambda2", gamma, (1,1)))
        params.append(Parameter("omega", invgauss, (self.size-self.total_variation_order,1)))

        self.parameters = params
utils.py 文件源码 项目:gait-recognition 作者: marian-margeta 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_gauss_pdf(sigma):
    n = sigma * 8

    x, y = np.mgrid[0:n, 0:n]
    pos = np.empty(x.shape + (2,))
    pos[:, :, 0] = x
    pos[:, :, 1] = y

    rv = multivariate_normal([n / 2, n / 2], [[sigma ** 2, 0], [0, sigma ** 2]])
    pdf = rv.pdf(pos)

    heatmap = pdf / np.max(pdf)

    return heatmap
dynlin.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def simulation_smoother(self,beta):
        """ Koopman's simulation smoother - simulates from states given
        model parameters and observations

        Parameters
        ----------

        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        - A simulated state evolution
        """         

        T, Z, R, Q, H = self._ss_matrices(beta)

        # Generate e_t+ and n_t+
        rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
        q_dist = ss.multivariate_normal([0.0, 0.0], Q)
        rnd_q = q_dist.rvs(self.data.shape[0]+1)

        # Generate a_t+ and y_t+
        a_plus = np.zeros((T.shape[0],self.data.shape[0]+1)) 
        a_plus[0,0] = np.mean(self.data[0:5])
        y_plus = np.zeros(self.data.shape[0])

        for t in range(0,self.data.shape[0]+1):
            if t == 0:
                a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
                y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
            else:
                if t != self.data.shape[0]:
                    a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
                    y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]

        alpha_hat, _ = self.smoothed_state(self.data,beta)
        alpha_hat_plus, _ = self.smoothed_state(y_plus,beta)
        alpha_tilde = alpha_hat - alpha_hat_plus + a_plus

        return alpha_tilde
dar.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def simulation_smoother(self,beta):
        """ Koopman's simulation smoother - simulates from states given
        model parameters and observations

        Parameters
        ----------

        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        - A simulated state evolution
        """         

        T, Z, R, Q, H = self._ss_matrices(beta)

        # Generate e_t+ and n_t+
        rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
        q_dist = ss.multivariate_normal([0.0, 0.0], Q)
        rnd_q = q_dist.rvs(self.data.shape[0]+1)

        # Generate a_t+ and y_t+
        a_plus = np.zeros((T.shape[0],self.data.shape[0]+1)) 
        a_plus[0,0] = np.mean(self.data[0:5])
        y_plus = np.zeros(self.data.shape[0])

        for t in range(0,self.data.shape[0]+1):
            if t == 0:
                a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
                y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
            else:
                if t != self.data.shape[0]:
                    a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
                    y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]

        alpha_hat, _ = self.smoothed_state(self.data,beta)
        alpha_hat_plus, _ = self.smoothed_state(y_plus,beta)
        alpha_tilde = alpha_hat - alpha_hat_plus + a_plus

        return alpha_tilde
llm.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def simulation_smoother(self,beta):
        """ Koopman's simulation smoother - simulates from states given
        model latent variables and observations

        Parameters
        ----------

        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        - A simulated state evolution
        """         

        T, Z, R, Q, H = self._ss_matrices(beta)

        # Generate e_t+ and n_t+
        rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
        q_dist = ss.multivariate_normal([0.0], Q)
        rnd_q = q_dist.rvs(self.data.shape[0]+1)

        # Generate a_t+ and y_t+
        a_plus = np.zeros((T.shape[0],self.data.shape[0]+1)) 
        a_plus[0,0] = np.mean(self.data[0:5])
        y_plus = np.zeros(self.data.shape[0])

        for t in range(0,self.data.shape[0]+1):
            if t == 0:
                a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t]
                y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
            else:
                if t != self.data.shape[0]:
                    a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t]
                    y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]

        alpha_hat,_ = self.smoothed_state(self.data,beta)
        alpha_hat_plus,_ = self.smoothed_state(y_plus,beta)
        alpha_tilde = alpha_hat - alpha_hat_plus + a_plus

        return alpha_tilde
llt.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def simulation_smoother(self,beta):
        """ Koopman's simulation smoother - simulates from states given
        model latent variables and observations

        Parameters
        ----------

        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        - A simulated state evolution
        """         

        T, Z, R, Q, H = self._ss_matrices(beta)

        # Generate e_t+ and n_t+
        rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
        q_dist = ss.multivariate_normal([0.0, 0.0], Q)
        rnd_q = q_dist.rvs(self.data.shape[0]+1)

        # Generate a_t+ and y_t+
        a_plus = np.zeros((T.shape[0],self.data.shape[0]+1)) 
        a_plus[0,0] = np.mean(self.data[0:5])
        y_plus = np.zeros(self.data.shape[0])

        for t in range(0,self.data.shape[0]+1):
            if t == 0:
                a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
                y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
            else:
                if t != self.data.shape[0]:
                    a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
                    y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]

        alpha_hat,_ = self.smoothed_state(self.data,beta)
        alpha_hat_plus,_ = self.smoothed_state(y_plus,beta)
        alpha_tilde = alpha_hat - alpha_hat_plus + a_plus

        return alpha_tilde
mv_gaussian.py 文件源码 项目:motif 作者: rabitt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def fit(self, X, Y):
        """ Fit class-dependent multivariate gaussians on the training set.

        Parameters
        ----------
        x_train_boxcox : np.array [n_samples, n_features_trans]
            Transformed training features.
        y_train : np.array [n_samples]
            Training labels.

        Returns
        -------
        rv_pos : multivariate normal
            multivariate normal for melody class
        rv_neg : multivariate normal
            multivariate normal for non-melody class
        """
        X_boxcox = self._fit_boxcox(X)
        pos_idx = np.where(Y == 1)[0]
        mu_pos = np.mean(X_boxcox[pos_idx, :], axis=0)
        cov_pos = np.cov(X_boxcox[pos_idx, :], rowvar=0)

        neg_idx = np.where(Y == 0)[0]
        mu_neg = np.mean(X_boxcox[neg_idx, :], axis=0)
        cov_neg = np.cov(X_boxcox[neg_idx, :], rowvar=0)
        rv_pos = multivariate_normal(
            mean=mu_pos, cov=cov_pos, allow_singular=True
        )
        rv_neg = multivariate_normal(
            mean=mu_neg, cov=cov_neg, allow_singular=True
        )
        self.rv_pos = rv_pos
        self.rv_neg = rv_neg
ul_policies.py 文件源码 项目:bolero 作者: rock-learning 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __call__(self, context=None, explore=True):
        """Evaluates policy.

        Samples weight vector from distribution if explore is true, otherwise
        return the distribution's mean.

        Parameters
        ----------
        context : array-like, (n_context_dims,)
            context vector (ignored by this policy, defaults to None)

        explore : bool
            if true, weight vector is sampled from distribution. otherwise the
            distribution's mean is returned

        Returns
        -------
        parameter_vector: array-like, (n_weights,)
            the selected parameters
        """
        # Note: Context is ignored
        if not explore:
            return self.mean
        else:
            # Sample from normal distribution
            return self.random_state.multivariate_normal(
                mean=self.mean, cov=self.Sigma, size=1)[0]
distributions.py 文件源码 项目:abcpy 作者: eth-cscs 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sample(self, k):
        #samples = self.distribution.rvs(k).reshape(k,p)
        samples = self.rng.multivariate_normal(self.mean, self.cov, k)
        return samples
distributions.py 文件源码 项目:abcpy 作者: eth-cscs 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pdf(self, x):
        return multivariate_normal(self.mean, self.cov).pdf(x)
distributions.py 文件源码 项目:abcpy 作者: eth-cscs 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sample(self, k):
        p = len(self.mean)
        if self.df == np.inf:
            chisq = 1.0
        else:
            chisq = self.rng.chisquare(self.df, k) / self.df
            chisq = chisq.reshape(-1,1).repeat(p, axis=1)
        mvn = self.rng.multivariate_normal(np.zeros(p), self.cov, k)
        return self.mean + np.divide(mvn, np.sqrt(chisq))
fisher.py 文件源码 项目:aesthetics 作者: shubhamchaudhary 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _likelihood_statistics(self, img_descriptors):
        """
        :param img_descriptors: X
        :return: 0th order, 1st order, 2nd order statistics
                 as described by equation 20, 21, 22 in reference [1]
        """

        def likelihood_moment(x, posterior_probability, moment):
            x_moment = np.power(np.float32(x), moment) if moment > 0 else np.float32([1])
            return x_moment * posterior_probability

        def zeros(like):
            return np.zeros(like.shape).tolist()

        means, covariances, weights = self.gmm.means, self.gmm.covariances, self.gmm.weights
        normals = [multivariate_normal(mean=means[k], cov=covariances[k]) for k in range(0, len(weights))]
        """ Gaussian Normals """
        gaussian_pdfs = [np.array([g_k.pdf(sample) for g_k in normals]) for sample in img_descriptors]
        """ u(x) for equation 15, page 4 in reference 1 """
        statistics_0_order, statistics_1_order, statistics_2_order = zeros(weights), zeros(weights), zeros(weights)
        for k in range(0, len(weights)):
            for index, sample in enumerate(img_descriptors):
                posterior_probability = FisherVector.posterior_probability(gaussian_pdfs[index], weights)
                statistics_0_order[k] = statistics_0_order[k] + likelihood_moment(sample, posterior_probability[k], 0)
                statistics_1_order[k] = statistics_1_order[k] + likelihood_moment(sample, posterior_probability[k], 1)
                statistics_2_order[k] = statistics_2_order[k] + likelihood_moment(sample, posterior_probability[k], 2)

        return np.array(statistics_0_order), np.array(statistics_1_order), np.array(statistics_2_order)


问题


面经


文章

微信
公众号

扫码关注公众号