python类inv()的实例源码

x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
    """ Analytical value of the cross-entropy for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str
                     Name of the distributions.
    par1, par2 : dictionaries
                 Parameters of the distribution. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    c : float
        Analytical value of the cross-entropy.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)

        invc2 = inv(c2)
        diffm = m1 - m2

        c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
                   dot(diffm, dot(invc2, diffm)))
    else:
        raise Exception('Distribution=?')

    return c
x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
    """ Analytical value of the KL divergence for the given distributions.

    Parameters
    ----------    
    distr1, distr2 : str-s
                    Names of the distributions.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    d : float
        Analytical value of the Kullback-Leibler divergence.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']
        dim = len(m1)    

        invc2 = inv(c2)
        diffm = m1 - m2

        d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
                   dot(diffm, dot(invc2, diffm)) - dim)
    else:
        raise Exception('Distribution=?')

    return d
x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def analytical_value_i_renyi(distr, alpha, par):
    """ Analytical value of the Renyi mutual information.

    Parameters
    ----------    
    distr : str
            Name of the distribution.
    alpha : float
            Parameter of the Renyi mutual information.
    par : dictionary
          Parameters of the distribution. If distr = 'normal': par["cov"]
          is the covariance matrix.

    Returns
    -------
    i : float
        Analytical value of the Renyi mutual information.

    """

    if distr == 'normal':
        c = par["cov"]        

        t1 = -alpha / 2 * log(det(c))
        t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
        t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
        i = 1 / (alpha - 1) * (t1 + t2 - t3)            
    else:
        raise Exception('Distribution=?')

    return i
x_analytical_values.py 文件源码 项目:adversarial-variational-bayes 作者: gdikov 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
    """ Analytical value of Hellinger distance for the given distributions.

    Parameters
    ----------
    distr1, distr2 : str-s
                    Names of the distributions.
    par1, par2 : dictionary-s
                 Parameters of the distributions. If distr1 = distr2 =
                 'normal': par1["mean"], par1["cov"] and par2["mean"],
                 par2["cov"] are the means and the covariance matrices.

    Returns
    -------
    d : float
        Analytical value of the Hellinger distance.

    """

    if distr1 == 'normal' and distr2 == 'normal':
        # covariance matrices, expectations:
        c1, m1 = par1['cov'], par1['mean']
        c2, m2 = par2['cov'], par2['mean']

        # "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
        diffm = m1 - m2
        avgc = (c1 + c2) / 2
        inv_avgc = inv(avgc)
        d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
            exp(-dot(diffm, dot(inv_avgc, diffm))/8)  # D^2

        d = sqrt(d)
    else:
        raise Exception('Distribution=?')

    return d
tStudentProcess.py 文件源码 项目:pyGPGO 作者: hawk31 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def logpdf(x, df, mu, Sigma):
    """
    Marginal log-likelihood of a Student-t Process

    Parameters
    ----------
    x: array-like
        Point to be evaluated
    df: float
        Degrees of freedom (>2.0)
    mu: array-like
        Mean of the process.
    Sigma: array-like
        Covariance matrix of the process.

    Returns
    -------
    logp: float
        log-likelihood 

    """
    d = len(x)
    x = np.atleast_2d(x)
    xm = x - mu
    V = df * Sigma
    V_inv = np.linalg.inv(V)
    _, logdet = slogdet(np.pi * V)

    logz = -gamma(df / 2.0 + d / 2.0) + gamma(df / 2.0) + 0.5 * logdet
    logp = -0.5 * (df + d) * np.log(1 + np.sum(np.dot(xm, V_inv) * xm, axis=1))

    logp = logp - logz

    return logp[0]
tStudentProcess.py 文件源码 项目:pyGPGO 作者: hawk31 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predict(self, Xstar, return_std=False):
        """
        Returns mean and covariances for the posterior t-Student process.

        Parameters
        ----------
        Xstar: np.ndarray, shape=((nsamples, nfeatures))
            Testing instances to predict.
        return_std: bool
            Whether to return the standard deviation of the posterior process. Otherwise,
            it returns the whole covariance matrix of the posterior process.

        Returns
        -------
        np.ndarray
            Mean of the posterior process for testing instances.
        np.ndarray
            Covariance of the posterior process for testing instances.
        """
        Xstar = np.atleast_2d(Xstar)
        self.K21 = self.covfunc.K(Xstar, self.X)
        self.K22 = self.covfunc.K(Xstar, Xstar)
        self.K12 = self.covfunc.K(self.X, Xstar)
        self.K22_tilde = self.K22 - np.dot(np.dot(self.K21, inv(self.K11)), self.K12)

        phi2 = np.dot(np.dot(self.K21, inv(self.K11)), self.y)
        cov = (self.nu + self.beta1 - 2) / (self.nu + self.n1 - 2) * self.K22_tilde
        if return_std:
            return phi2, np.diag(cov)
        return phi2, cov
linear_regression.py 文件源码 项目:astrology 作者: mattsgithub 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def train(self, X, y, **kwargs):
        features = kwargs.get('features')
        self.fit_intercept = kwargs.get('fit_intercept')

        self.y = y
        self.N = y.shape[0]
        # Ignore column vector
        if self.fit_intercept:
            self.features = ['bias'] + features
            X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
            self.p = X.shape[1] - (1 if self.fit_intercept else 0)
        else:
            self.features = features
            self.p = X.shape[1]

        self.X = X
        XT = X.T
        std_error_matrix = inv(XT.dot(X))
        self.beta = std_error_matrix.dot(XT).dot(y)

        # Prediction
        self.y_hat = X.dot(self.beta)

        # Residual sum of squares
        self.rss = np.sum((y - self.y_hat)**2)

        # Estimated variance
        self.df = (self.N - self.p - 1)
        self.pop_var = self.rss / self.df

        # Standard error
        self.std_error = np.sqrt(std_error_matrix.diagonal() * self.pop_var)

        # t scores
        self.t = self.beta / self.std_error
linear_regression.py 文件源码 项目:astrology 作者: mattsgithub 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def plot_f_distrib_for_many_coefficients(self, features):
        from scipy.stats import f

        # Remove a particular subset of features
        X = np.delete(self.X, [self.features.index(_) for _ in features], 1)

        # Prediction from reduced model
        XT = X.T
        std_error_matrix = inv(XT.dot(X))
        beta = std_error_matrix.dot(XT).dot(self.y)
        y_hat = X.dot(beta)
        rss_reduced_model = np.sum((self.y - y_hat)**2)

        dfn = len(features)
        dfd = self.df

        # This should be distributed as chi squared
        # with degrees of freedom equal to number
        # of dropped features
        rss_diff = (rss_reduced_model - self.rss)
        chi_1 = rss_diff / dfn
        chi_2 = self.pop_var
        f_score = chi_1 / chi_2

        # 5% and 95% percentile
        f_05, f_95 = f.ppf([0.05, 0.95], dfn, dfd)

        x = np.linspace(0.001, 5.0)

        plt.axvline(x=f_05)
        plt.axvline(x=f_95)

        plt.scatter(f_score, f.pdf(f_score, dfn, dfd), marker='o', color='red')
        plt.plot(x, f.pdf(x, dfn, dfd), color='gray', lw=5, alpha=0.6)
        plt.title('f-distribtion for dropping features: {0}'.format(features))
        plt.show()
detect.py 文件源码 项目:pysptools 作者: ctherien 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def ACE(M, t):
    """
    Performs the adaptive cosin/coherent estimator algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
      X Jin, S Paswater, H Cline.  "A Comparative Study of Target Detection
      Algorithms for Hyperspectral Imagery."  SPIE Algorithms and Technologies
      for Multispectral, Hyperspectral, and Ultraspectral Imagery XV.  Vol
      7334.  2009.
    """
    N, p = M.shape
    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R_hat = np.cov(M.T)
    G = lin.inv(R_hat)

    results = np.zeros(N, dtype=np.float32)
    ##% From Broadwater's paper
    ##%tmp = G*S*inv(S.'*G*S)*S.'*G;
    tmp = np.array(np.dot(t.T, np.dot(G, t)))
    dot_G_M = np.dot(G, M[0:,:].T)
    num = np.square(np.dot(t, dot_G_M))
    for k in range(N):
        denom = np.dot(tmp, np.dot(M[k], dot_G_M[:,k]))
        results[k] = num[k] / denom
    return results
detect.py 文件源码 项目:pysptools 作者: ctherien 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def MatchedFilter(M, t):
    """
    Performs the matched filter algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
      X Jin, S Paswater, H Cline.  "A Comparative Study of Target Detection
     Algorithms for Hyperspectral Imagery."  SPIE Algorithms and Technologies
     for Multispectral, Hyperspectral, and Ultraspectral Imagery XV.  Vol
     7334.  2009.
    """

    N, p = M.shape
    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R_hat = np.cov(M.T)
    G = lin.inv(R_hat)

    tmp = np.array(np.dot(t.T, np.dot(G, t)))
    w = np.dot(G, t)
    return np.dot(w, M.T) / tmp
detect.py 文件源码 项目:pysptools 作者: ctherien 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def CEM(M, t):
    """
    Performs the constrained energy minimization algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
        Qian Du, Hsuan Ren, and Chein-I Cheng. A Comparative Study of
        Orthogonal Subspace Projection and Constrained Energy Minimization.
        IEEE TGRS. Volume 41. Number 6. June 2003.
    """
    def corr(M):
        p, N = M.shape
        return np.dot(M, M.T) / N

    N, p = M.shape
    R_hat = corr(M.T)
    Rinv = lin.inv(R_hat)
    denom = np.dot(t.T, np.dot(Rinv, t))
    t_Rinv = np.dot(t.T, Rinv)
    return np.dot(t_Rinv , M[0:,:].T) / denom
detect.py 文件源码 项目:pysptools 作者: ctherien 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def GLRT(M, t):
    """
    Performs the generalized likelihood test ratio algorithm for target
    detection.

    Parameters:
        M: `numpy array`
            2d matrix of HSI data (N x p).

        t: `numpy array`
            A target endmember (p).

    Returns: `numpy array`
        Vector of detector output (N).

    References:
        T F AyouB, "Modified GLRT Signal Detection Algorithm," IEEE
        Transactions on Aerospace and Electronic Systems, Vol 36, No 3, July
        2000.
    """
    N, p = M.shape

    # Remove mean from data
    u = M.mean(axis=0)
    M = M - np.kron(np.ones((N, 1)), u)
    t = t - u;

    R = lin.inv(np.cov(M.T))
    results = np.zeros(N, dtype=np.float)

    t_R_t_dot = np.dot(t, np.dot(R, t.T))
    for k in range(N):
        x = M[k]
        R_x_dot = np.dot(R, x)
        num = np.dot(t, R_x_dot)**2
        denom = t_R_t_dot * (1 + np.dot(x.T, R_x_dot))
        results[k] = num / denom
    return results
Solvers.py 文件源码 项目:ThermoCodeLib 作者: longlevan 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def MultiDimNewtRaph(f,x0,dx=1e-6,args=(),ytol=1e-5,w=1.0,JustOneStep=False):
    """
    A Newton-Raphson solver where the Jacobian is always re-evaluated rather than
    re-using the information as in the fsolve method of scipy.optimize
    """
    #Convert to numpy array, force type conversion to float
    x=np.array(x0,dtype=np.float)
    error=999
    J=np.zeros((len(x),len(x)))

    #If a float is passed in for dx, convert to a numpy-like list the same shape
    #as x
    if isinstance(dx,int):
        dx=dx*np.ones_like(float(x))
    elif isinstance(dx,float):
        dx=dx*np.ones_like(x)

    r0=array(f(x,*args))
    while abs(error)>ytol:
        #Build the Jacobian matrix by columns
        for i in range(len(x)):
            epsilon=np.zeros_like(x)
            epsilon[i]=dx[i]
            J[:,i]=(array(f(x+epsilon,*args))-r0)/epsilon[i]
        v=np.dot(-inv(J),r0)
        x=x+w*v
        #Calculate the residual vector at the new step
        r0=f(x,*args)
        error = np.max(np.abs(r0))
        #Just do one step and stop
        if JustOneStep==True:
            return x
    return x
HSICTestObject.py 文件源码 项目:kerpy 作者: oxmlcs 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_induced_kernel_matrix_on_data(self,data_x,data_y):
        '''Z follows the same distribution as X; W follows that of Y.
        The current data generating methods we use 
        generate X and Y at the same time. '''
        size_induced_set = max(self.num_inducex,self.num_inducey)
        #print "size_induce_set", size_induced_set
        if self.data_generator is None:
            subsample_idx = np.random.randint(self.num_samples, size=size_induced_set)
            self.data_z = data_x[subsample_idx,:]
            self.data_w = data_y[subsample_idx,:]
        else:
            self.data_z, self.data_w = self.data_generator(size_induced_set)
            self.data_z[[range(self.num_inducex)],:]
            self.data_w[[range(self.num_inducey)],:]
        print 'Induce Set'
        if self.kernelX_use_median:
            sigmax = self.kernelX.get_sigma_median_heuristic(data_x)
            self.kernelX.set_width(float(sigmax))
        if self.kernelY_use_median:
            sigmay = self.kernelY.get_sigma_median_heuristic(data_y)
            self.kernelY.set_width(float(sigmay))
        Kxz = self.kernelX.kernel(data_x,self.data_z)
        Kzz = self.kernelX.kernel(self.data_z)
        #R = inv(sqrtm(Kzz))
        R = inv(sqrtm(Kzz + np.eye(np.shape(Kzz)[0])*10**(-6)))
        phix = Kxz.dot(R)
        Kyw = self.kernelY.kernel(data_y,self.data_w)
        Kww = self.kernelY.kernel(self.data_w)
        #S = inv(sqrtm(Kww))
        S = inv(sqrtm(Kww + np.eye(np.shape(Kww)[0])*10**(-6)))
        phiy = Kyw.dot(S)
        return phix, phiy
CoordTf.py 文件源码 项目:Boxy-disky-parameter-of-E-galaxies 作者: spica095 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fitEllipse (x, y):
     x = x [:, np.newaxis]
     y = y [:, np.newaxis]
     D = np.hstack(( x*x , x*y , y*y , x, y, np.ones_like(x)))
     S = np.dot (D.T ,D)
     C = np.zeros ([6 , 6])
     C[0, 2] = C[2, 0] = 2 
     C[1, 1] = -1
     E ,V = eig( np.dot(inv(S),C ))
     n = np.argmax(np.abs(E))
     a = V[:, n]
     if a[0] < 0 : a = -a
     return a
hk_price_singlebeliefs.py 文件源码 项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def price_single_beliefs(transition, dividend_payoff, ?=.75):
    """
    Function to Solve Single Beliefs
    """
    # First compute inverse piece
    imbq_inv = la.inv(np.eye(transition.shape[0]) - ?*transition)

    # Next compute prices
    prices = ? * np.dot(np.dot(imbq_inv, transition), dividend_payoff)

    return prices
gaussian_contours.py 文件源码 项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def plot3():
    Z = gen_gaussian_plot_vals(x_hat, ?)
    cs1 = ax.contour(X, Y, Z, 6, colors="black")
    ax.clabel(cs1, inline=1, fontsize=10)
    M = ? * G.T * linalg.inv(G * ? * G.T + R)
    x_hat_F = x_hat + M * (y - G * x_hat)
    ?_F = ? - M * G * ?
    new_Z = gen_gaussian_plot_vals(x_hat_F, ?_F)
    cs2 = ax.contour(X, Y, new_Z, 6, colors="black")
    ax.clabel(cs2, inline=1, fontsize=10)
    ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
    ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black")
ivec.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def extract(self, N, F):
    L = np.zeros((self.tv_dim, self.tv_dim))
    L[self.itril] = N.dot(self.T_iS_Tt)
    L += np.tril(L, -1).T + self.Im
    Cxx = inv(L)
    B = self.T_iS.dot(F)
    Ex = Cxx.dot(B)
    return Ex
ivec.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def expectation_tv(self, data_list):
    N, F = read_data(data_list, self.nmix, self.ndim)
    nfiles = F.shape[0]
    nframes = N.sum()
    LU = np.zeros((self.nmix, self.tv_dim * (self.tv_dim + 1) // 2))
    RU = np.zeros((self.tv_dim, self.nmix * self.ndim))
    LLK = 0.
    T_invS = self.Tm / self.Sigma
    T_iS_Tt = self.comp_T_invS_Tt()
    parts = 2500  # modify this based on your HW resources (e.g., memory)
    nbatch = int(nfiles / parts + 0.99999)
    for batch in range(nbatch):
      start = batch * parts
      fin = min((batch + 1) * parts, nfiles)
      length = fin - start
      N1 = N[start:fin]
      F1 = F[start:fin]
      L1 = N1.dot(T_iS_Tt)
      B1 = F1.dot(T_invS.T)
      Ex = np.empty((length, self.tv_dim))
      Exx = np.empty((length, self.tv_dim * (self.tv_dim + 1) // 2))
      llk = np.zeros((length, 1))
      for ix in range(length):
        L = np.zeros((self.tv_dim, self.tv_dim))
        L[self.itril] = L1[ix]
        L += np.tril(L, -1).T + self.Im
        Cxx = inv(L)
        B = B1[ix][:, np.newaxis]
        this_Ex = Cxx.dot(B)
        llk[ix] = self.res_llk(this_Ex, B)
        Ex[ix] = this_Ex.T
        Exx[ix] = (Cxx + this_Ex.dot(this_Ex.T))[self.itril]
      RU += Ex.T.dot(F1)
      LU += N1.T.dot(Exx)
      LLK += llk.sum()
    self.Tm = None
    tmp_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=16))
    tmpfile = self.tmpdir + 'tmat_' + tmp_string + '.h5'
    h5write(tmpfile, LU, 'LU')
    return RU, LLK, nframes
kernel_model.py 文件源码 项目:prml 作者: Yevgnen 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def predict_one(self, X, T, x, beta, C):
        """Predict one single new point ``x``.
        The hyperparameter ``beta`` and Gram matrix ``C`` should be estimated
        and calculated from the training data, respectively. """

        kXx = self.kernel.inner(X, x)
        kxx = self.kernel.inner(x, x)

        inv_C = linalg.inv(C)
        c = kxx + 1 / beta

        self.pred_mean = kXx.T.dot(inv_C).dot(T)
        self.pred_cov = c - kXx.T.dot(inv_C).dot(kXx)  # Why the cov so small ?

        return self.pred_mean, self.pred_cov


问题


面经


文章

微信
公众号

扫码关注公众号