python类diag()的实例源码

Stick.py 文件源码 项目:F_UNCLE 作者: fraserphysics 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_sigma(self, models):
        r"""Returns the variance matrix

        variance is

        .. math::

            \Sigma_i = \sigma_t^2 + \frac{\sigma_x^2}{V_{CJ}}

        **Where**
            - :math:`\sigma_t` is the error in time measurements
            - :math:`\sigma_x` is the error in sensor position
            - :math:`V_{CJ}` is the detonation velocity

        see :py:meth:`F_UNCLE.Utils.Experiment.Experiment.get_sigma`

        """

        eos = self.check_models(models)[0]

        return np.diag(np.ones(self.shape()))
runatilib.py 文件源码 项目:ababe 作者: unkcpz 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def supcell(file, scale, outmode):
    from ababe.io.io import GeneralIO
    import os
    import numpy as np

    basefname = os.path.basename(file)

    gcell = GeneralIO.from_file(file)

    scale_matrix = np.diag(np.array(scale))
    sc = gcell.supercell(scale_matrix)

    out = GeneralIO(sc)

    print("PROCESSING: {:}".format(file))
    if outmode == 'stdio':
        out.write_file(fname=None, fmt='vasp')
    else:
        ofname = "{:}_SUPC.{:}".format(basefname.split('.')[0], outmode)
        out.write_file(ofname)
score.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'):
    n_cl = net.blobs[layer].channels
    if save_format:
        save_format = save_format.format(iter)
    hist, loss = compute_hist(net, save_format, dataset, layer, gt)
    # mean loss
    print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss
    # overall accuracy
    acc = np.diag(hist).sum() / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc
    # per-class accuracy
    acc = np.diag(hist) / hist.sum(1)
    print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc)
    # per-class IU
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu)
    freq = hist.sum(1) / hist.sum()
    print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \
            (freq[freq > 0] * iu[freq > 0]).sum()
    return hist
utils.py 文件源码 项目:fcn 作者: wkentaro 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def label_accuracy_score(label_trues, label_preds, n_class):
    """Returns accuracy score evaluation result.

      - overall accuracy
      - mean accuracy
      - mean IU
      - fwavacc
    """
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    return acc, acc_cls, mean_iu, fwavacc


# -----------------------------------------------------------------------------
# Visualization
# -----------------------------------------------------------------------------
img_augmentation.py 文件源码 项目:cancer 作者: yancz1989 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fit(self, X):
        '''Required for featurewise_center, featurewise_std_normalization
        and zca_whitening.

        # Arguments
            X: Numpy array, the data to fit on.
        '''
        if self.featurewise_center:
            self.mean = np.mean(X, axis=0)
            X -= self.mean

        if self.featurewise_std_normalization:
            self.std = np.std(X, axis=0)
            X /= (self.std + 1e-7)

        if self.zca_whitening:
            flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
            sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
            U, S, V = linalg.svd(sigma)
            self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
kalman_filter.py 文件源码 项目:kalman_filter_multi_object_tracking 作者: srianant 项目源码 文件源码 阅读 103 收藏 0 点赞 0 评论 0
def __init__(self):
        """Initialize variable used by Kalman Filter class
        Args:
            None
        Return:
            None
        """
        self.dt = 0.005  # delta time

        self.A = np.array([[1, 0], [0, 1]])  # matrix in observation equations
        self.u = np.zeros((2, 1))  # previous state vector

        # (x,y) tracking object center
        self.b = np.array([[0], [255]])  # vector of observations

        self.P = np.diag((3.0, 3.0))  # covariance matrix
        self.F = np.array([[1.0, self.dt], [0.0, 1.0]])  # state transition mat

        self.Q = np.eye(self.u.shape[0])  # process noise matrix
        self.R = np.eye(self.b.shape[0])  # observation noise matrix
        self.lastResult = np.array([[0], [255]])
toy2d_intractable.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def draw(vmean, vlogstd):
        from scipy import stats
        plt.cla()
        xlimits = [-2, 2]
        ylimits = [-4, 2]

        def log_prob(z):
            z1, z2 = z[:, 0], z[:, 1]
            return stats.norm.logpdf(z2, 0, 1.35) + \
                stats.norm.logpdf(z1, 0, np.exp(z2))

        plot_isocontours(ax, lambda z: np.exp(log_prob(z)), xlimits, ylimits)

        def variational_contour(z):
            return stats.multivariate_normal.pdf(
                z, vmean, np.diag(np.exp(vlogstd)))

        plot_isocontours(ax, variational_contour, xlimits, ylimits)
        plt.draw()
        plt.pause(1.0 / 30.0)
ROI_cov_Qu_SigmaJ.py 文件源码 项目:MEEG_connectivity 作者: YingYang 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E,  GL,
                     nu, V, prior_on = False):
    eps = 1E-13
    p = Phi.shape[0]
    n_ROI = len(sigma_J_list)
    Qu = Phi.dot(Phi.T)
    G_Sigma_G = np.zeros(MMT.shape)
    for i in range(n_ROI):
        G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T)
    cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T) 
    inv_cov = np.linalg.inv(cov)    
    eigs = np.real(np.linalg.eigvals(cov)) + eps
    log_det_cov = np.sum(np.log(eigs))  
    result = q*log_det_cov + np.trace(MMT.dot(inv_cov))
    if prior_on:
        inv_Q = np.linalg.inv(Qu)
        #det_Q = np.linalg.det(Qu)
        log_det_Q = np.sum(np.log(np.diag(Phi)**2))
        result =  result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q))
    return result

#==============================================================================
# update both Qu and Sigma_J, gradient of Qu and Sigma J
nn.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def fit(self, x):
        s = x.shape
        x = x.copy().reshape((s[0],np.prod(s[1:])))
        m = np.mean(x, axis=0)
        x -= m
        sigma = np.dot(x.T,x) / x.shape[0]
        U, S, V = linalg.svd(sigma)
        tmp = np.dot(U, np.diag(1./np.sqrt(S+self.regularization)))
        tmp2 = np.dot(U, np.diag(np.sqrt(S+self.regularization)))
        self.ZCA_mat = th.shared(np.dot(tmp, U.T).astype(th.config.floatX))
        self.inv_ZCA_mat = th.shared(np.dot(tmp2, U.T).astype(th.config.floatX))
        self.mean = th.shared(m.astype(th.config.floatX))
pylspm.py 文件源码 项目:pylspm 作者: lseman 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def htmt(self):

        htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
                             index=self.manifests, columns=self.manifests)

        mean = []
        allBlocks = []
        for i in range(self.lenlatent):
            block_ = self.Variables['measurement'][
                self.Variables['latent'] == self.latent[i]]
            allBlocks.append(list(block_.values))
            block = htmt_.ix[block_, block_]
            mean_ = (block - np.diag(np.diag(block))).values
            mean_[mean_ == 0] = np.nan
            mean.append(np.nanmean(mean_))

        comb = [[k, j] for k in range(self.lenlatent)
                for j in range(self.lenlatent)]

        comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
                 for i in range(self.lenlatent ** 2)]

        comb__ = []
        for i in range(self.lenlatent ** 2):
            block = (htmt_.ix[allBlocks[comb[i][1]],
                              allBlocks[comb[i][0]]]).values
#            block[block == 1] = np.nan
            comb__.append(np.nanmean(block))

        htmt__ = np.divide(comb__, comb_)
        where_are_NaNs = np.isnan(htmt__)
        htmt__[where_are_NaNs] = 0

        htmt = pd.DataFrame(np.tril(htmt__.reshape(
            (self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)

        return htmt
uwb_tracker_node.py 文件源码 项目:uwb_tracker_ros 作者: eth-ait 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _solve_equation_least_squares(self, A, B):
        """Solve system of linear equations A X = B.
        Currently using Pseudo-inverse because it also allows for singular matrices.

        Args:
             A (numpy.ndarray): Left-hand side of equation.
             B (numpy.ndarray): Right-hand side of equation.

        Returns:
             X (numpy.ndarray): Solution of equation.
        """
        # Pseudo-inverse
        X = np.dot(np.linalg.pinv(A), B)
        # LU decomposition
        # lu, piv = scipy.linalg.lu_factor(A)
        # X = scipy.linalg.lu_solve((lu, piv), B)
        # Vanilla least-squares from numpy
        # X, _, _, _ = np.linalg.lstsq(A, B)
        # QR decomposition
        # Q, R, P = scipy.linalg.qr(A, mode='economic', pivoting=True)
        # # Find first zero element in R
        # out = np.where(np.diag(R) == 0)[0]
        # if out.size == 0:
        #     i = R.shape[0]
        # else:
        #     i = out[0]
        # B_prime = np.dot(Q.T, B)
        # X = np.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
        # X[P[:i], :] = scipy.linalg.solve_triangular(R[:i, :i], B_prime[:i, :])
        return X
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_whitening_matrix(X, fudge=1E-18):
   from numpy.linalg import eigh
   Xcov = numpy.dot(X.T, X)/X.shape[0]
   d,V  = eigh(Xcov)
   D    = numpy.diag(1./numpy.sqrt(d+fudge))
   W    = numpy.dot(numpy.dot(V,D), V.T)
   return W
mklmm.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def fit(self, X, C, y, regions, kernelType, reml=True, maxiter=100):

        #construct a list of kernel names (one for each region) 
        if (kernelType == 'adapt'): kernelNames = self.buildKernelAdapt(X, C, y, regions, reml, maxiter)
        else: kernelNames = [kernelType] * len(regions)         

        #perform optimization
        kernelObj, hyp_kernels, sig2e, fixedEffects = self.optimize(X, C, y, kernelNames, regions, reml, maxiter)

        #compute posterior distribution
        Ktraintrain = kernelObj.getTrainKernel(hyp_kernels)
        post = self.infExact_scipy_post(Ktraintrain, C, y, sig2e, fixedEffects)

        #fix intercept if phenotype is binary
        if (len(np.unique(y)) == 2):            
            controls = (y<y.mean())
            cases = ~controls
            meanVec = C.dot(fixedEffects)
            mu, var = self.getPosteriorMeanAndVar(np.diag(Ktraintrain), Ktraintrain, post, meanVec)                                     
            fixedEffects[0] -= optimize.minimize_scalar(self.getNegLL, args=(mu, np.sqrt(sig2e+var), controls, cases), method='brent').x                

        #construct trainObj
        trainObj = dict([])
        trainObj['sig2e'] = sig2e
        trainObj['hyp_kernels'] = hyp_kernels
        trainObj['fixedEffects'] = fixedEffects     
        trainObj['kernelNames'] = kernelNames

        return trainObj
mklmm.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)
        if (len(Xtest) != len(self.kernels)): raise Exception('Xtest should be a list with length equal to #kernels!')
        diag = 0
        params_ind = 0
        for k_i, k in enumerate(self.kernels):
            numHyp = k.getNumParams()
            diag += k.getTestKernelDiag(params[params_ind:params_ind+numHyp], Xtest[k_i])
            params_ind += numHyp
        return diag




#the only parameter here is the bias term c...
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)
        b = np.exp(params[0])
        k = np.exp(params[1])
        M = 1.0 / (b + np.exp(k*self.D))
        Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
        return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T))




#LD kernel with exponential decay but no bias term
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)        
        k = np.exp(params[0])
        M = 1.0 / (1.0 + k*self.D)
        Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
        return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T))





#LD kernel with polynomial decay
kernels.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def getTestKernelDiag(self, params, Xtest):
        self.checkParams(params)        
        p = np.exp(params[0])
        k = np.exp(params[1])
        M = 1.0 / (1.0 + k*(self.D**p))
        Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
        return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T))
gpUtils.py 文件源码 项目:MKLMM 作者: omerwe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def symmetrize(X): return X + X.T - np.diag(X.diagonal())


#this code is directly translated from the GPML Matlab package (see attached license)
sgcrf.py 文件源码 项目:sgcrfpy 作者: dswah 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def l1_norm_off_diag(A):
        "convenience method for l1 norm, excluding diagonal"
        # let's speed this up later
        # assume A symmetric, sparse too
        return np.linalg.norm(A - np.diag(A.diagonal()), ord=1)


问题


面经


文章

微信
公众号

扫码关注公众号