python类diag_indices()的实例源码

Train_39_Node_Net.py 文件源码 项目:LearnGraphDiscovery 作者: eugenium 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def toPartialCorr(Prec):
    D=Prec[np.diag_indices(Prec.shape[0])]
    P=Prec.copy()
    D=np.outer(D,D)
    return -P/np.sqrt(D)
EvaluationTools.py 文件源码 项目:LearnGraphDiscovery 作者: eugenium 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def toPartialCorr(Prec):
    D=Prec[np.diag_indices(Prec.shape[0])]
    P=Prec.copy()
    D=np.outer(D,D)
    D[D == 0] = 1
    if(np.sum(D==0)):
        print 'Ds',np.sum(D==0)
    if(np.sum(D<0)):
        print 'Dsminus',np.sum(D<0)        
    return -P/np.sqrt(D)
corrcoef_matrix.py 文件源码 项目:teneto 作者: wiheto 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def corrcoef_matrix(matrix):
    # Code originating from http://stackoverflow.com/a/24547964 by http://stackoverflow.com/users/2455058/jingchao

    r = np.corrcoef(matrix)
    rf = r[np.triu_indices(r.shape[0], 1)]
    df = matrix.shape[1] - 2
    ts = rf * rf * (df / (1 - rf * rf))
    pf = betai(0.5 * df, 0.5, df / (df + ts))
    p = np.zeros(shape=r.shape)
    p[np.triu_indices(p.shape[0], 1)] = pf
    p[np.tril_indices(p.shape[0], -1)] = pf
    p[np.diag_indices(p.shape[0])] = np.ones(p.shape[0])
    return r, p
FITC_network.py 文件源码 项目:deepGP_approxEP 作者: thangbui 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def get_hypers(self):
        params = {'ls': [],
                  'sf': [],
                  'zu': [],
                  'sn': [],
                  'eta1_R': [],
                  'eta2': []}
        for i in range(self.nolayers):
            layeri = self.layers[i]
            Mi = layeri.M
            Dini = layeri.Din
            Douti = layeri.Dout
            params['ls'].append(layeri.ls.get_value())
            params['sf'].append(layeri.sf.get_value())
            params['sn'].append(layeri.sn.get_value())
            triu_ind = np.triu_indices(Mi)
            diag_ind = np.diag_indices(Mi)
            params_zu_i = []
            params_eta2_i = []
            params_eta1_Ri = []
            for d in range(Douti):
                params_zu_i.append(layeri.zu[d].get_value())
                params_eta2_i.append(layeri.theta_2[d])

                Rd = layeri.theta_1_R[d]
                Rd[diag_ind] = np.log(Rd[diag_ind])
                params_eta1_Ri.append(Rd[triu_ind].reshape((Mi*(Mi+1)/2, 1)))

            params['zu'].append(params_zu_i)
            params['eta1_R'].append(params_eta1_Ri)
            params['eta2'].append(params_eta2_i)
        return params
FITC_network.py 文件源码 项目:deepGP_approxEP 作者: thangbui 项目源码 文件源码 阅读 57 收藏 0 点赞 0 评论 0
def get_hypers(self):
        params = {'ls': [],
                  'sf': [],
                  'zu': [],
                  'sn': [],
                  'eta1_R': [],
                  'eta2': []}
        for i in range(self.no_layers):
            Mi = self.no_pseudos[i]
            Dini = self.layer_sizes[i]
            Douti = self.layer_sizes[i+1]
            params['ls'].append(self.ls[i])
            params['sf'].append(self.sf[i])
            if not (self.no_output_noise and (i == self.no_layers-1)):
                params['sn'].append(self.sn[i])
            triu_ind = np.triu_indices(Mi)
            diag_ind = np.diag_indices(Mi)
            params_zu_i = []
            params_eta2_i = []
            params_eta1_Ri = []
            if self.zu_tied:
                params_zu_i = self.zu[i]
            else:
                for d in range(Douti):
                    params_zu_i.append(self.zu[i][d])

            for d in range(Douti):
                params_eta2_i.append(self.theta_2[i][d])
                Rd = self.theta_1_R[i][d]
                Rd[diag_ind] = np.log(Rd[diag_ind])
                params_eta1_Ri.append(Rd[triu_ind].reshape((Mi*(Mi+1)/2,)))

            params['zu'].append(params_zu_i)
            params['eta1_R'].append(params_eta1_Ri)
            params['eta2'].append(params_eta2_i)
        return params
FITC_network.py 文件源码 项目:deepGP_approxEP 作者: thangbui 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def update_hypers(self, params):
        for i in range(self.no_layers):
            Mi = self.no_pseudos[i]
            Dini = self.layer_sizes[i]
            Douti = self.layer_sizes[i+1]
            self.ls[i] = params['ls'][i]
            self.ones_M_ls[i] = np.outer(self.ones_M[i], 1.0 / np.exp(2*self.ls[i]))
            self.sf[i] = params['sf'][i]
            if not ((self.no_output_noise) and (i == self.no_layers-1)):
                self.sn[i] = params['sn'][i]
            triu_ind = np.triu_indices(Mi)
            diag_ind = np.diag_indices(Mi)
            if self.zu_tied:
                zi = params['zu'][i]
                self.zu[i] = zi
            else:
                for d in range(Douti):
                    zid = params['zu'][i][d]
                    self.zu[i][d] = zid

            for d in range(Douti):
                theta_m_d = params['eta2'][i][d]
                theta_R_d = params['eta1_R'][i][d]
                R = np.zeros((Mi, Mi))
                R[triu_ind] = theta_R_d.reshape(theta_R_d.shape[0], )
                R[diag_ind] = np.exp(R[diag_ind])
                self.theta_1_R[i][d] = R
                self.theta_1[i][d] = np.dot(R.T, R)
                self.theta_2[i][d] = theta_m_d
test_lyapunov.py 文件源码 项目:nengolib 作者: arvoelke 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_balreal():
    isys = Lowpass(0.05)
    noise = 0.5*Lowpass(0.01) + 0.5*Alpha(0.005)
    p = 0.8
    sys = p*isys + (1-p)*noise

    T, Tinv, S = balanced_transformation(sys)
    assert np.allclose(inv(T), Tinv)
    assert np.allclose(S, hsvd(sys))

    balsys = sys.transform(T, Tinv)
    assert balsys == sys

    assert np.all(S >= 0)
    assert np.all(S[0] > 0.3)
    assert np.all(S[1:] < 0.05)
    assert np.allclose(sorted(S, reverse=True), S)

    P = control_gram(balsys)
    Q = observe_gram(balsys)

    diag = np.diag_indices(len(P))
    offdiag = np.ones_like(P, dtype=bool)
    offdiag[diag] = False
    offdiag = np.where(offdiag)

    assert np.allclose(P[diag], S)
    assert np.allclose(P[offdiag], 0)
    assert np.allclose(Q[diag], S)
    assert np.allclose(Q[offdiag], 0)
ai.py 文件源码 项目:CrossboneGames 作者: adrianstaniec 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def __add_third_in_line(matrix, player, win_or_defend):
    """Look for 2 in line and try to win or defend in 1 move"""
    done = False
    opponent = model.opponent(player)
    # = horizontals
    for row in range(3):
        if not done:
            done, line = win_or_defend(matrix[row, :], player)
            if done:
                matrix[row, :] = line
                # || verticals
    for col in range(3):
        if not done:
            done, line = win_or_defend(matrix[:, col], player)
            if done:
                matrix[:, col] = line
    if not done:
        # \ diagonal
        done, line = win_or_defend(matrix.diagonal(), player)
        if done:
            matrix[np.diag_indices(3)] = line
    if not done:
        # / diagonal
        done, line = win_or_defend(np.fliplr(matrix).diagonal(), player)
        if done:
            np.fliplr(matrix)[np.diag_indices(3)] = line
    if not done:
        return matrix, player
    else:
        return matrix, opponent
OptimizerHDPPE.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def _get_diagIDs(K):
  if K in diagIDsDict:
    return diagIDsDict[K]
  else:
    diagIDs = np.diag_indices(K)
    diagIDsDict[K] = diagIDs
    return diagIDs
SeqOfWordsData.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _calcWordTypeCooccurMatrix(self, Q, sameWordVec, nDoc):
        """ Transform building blocks into the final Q matrix

        Returns
        -------
        Q : 2D array, size W x W (where W is vocab_size)
        """
        Q /= np.float32(nDoc)
        sameWordVec /= np.float32(nDoc)
        diagIDs = np.diag_indices(self.vocab_size)
        Q[diagIDs] -= sameWordVec

        # Fix small numerical issues (like diag entries of -1e-15 instead of 0)
        np.maximum(Q, 1e-100, out=Q)
        return Q
BagOfWordsData.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _calcWordTypeCooccurMatrix(self, Q, sameWordVec, nDoc):
        """ Transform building blocks into the final Q matrix

        Returns
        -------
        Q : 2D array, size W x W (where W is vocab_size)
        """
        Q /= np.float32(nDoc)
        sameWordVec /= np.float32(nDoc)
        diagIDs = np.diag_indices(self.vocab_size)
        Q[diagIDs] -= sameWordVec

        # Fix small numerical issues (like diag entries of -1e-15 instead of 0)
        np.maximum(Q, 1e-100, out=Q)
        return Q
OptimizerRhoOmegaBetter.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def _get_diagIDs(K):
    if K in diagIDsDict:
        return diagIDsDict[K]
    else:
        diagIDs = np.diag_indices(K)
        diagIDsDict[K] = diagIDs
        return diagIDs
OptimizerRhoOmega.py 文件源码 项目:bnpy 作者: bnpy 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def _get_diagIDs(K):
    if K in diagIDsDict:
        return diagIDsDict[K]
    else:
        diagIDs = np.diag_indices(K)
        diagIDsDict[K] = diagIDs
        return diagIDs
Concurrent_AP.py 文件源码 项目:ProjectOfDataMining 作者: IljaNovo 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def add_preference(hdf5_file, preference):
    """Assign the value 'preference' to the diagonal entries
        of the matrix of similarities stored in the HDF5 data structure 
        at 'hdf5_file'.
    """

    Worker.hdf5_lock.acquire()

    with tables.open_file(hdf5_file, 'r+') as fileh:
        S = fileh.root.aff_prop_group.similarities
        diag_ind = np.diag_indices(S.nrows)
        S[diag_ind] = preference

    Worker.hdf5_lock.release()
Concurrent_AP.py 文件源码 项目:ProjectOfDataMining 作者: IljaNovo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def check_convergence(hdf5_file, iteration, convergence_iter, max_iter):
    """If the estimated number of clusters has not changed for 'convergence_iter'
        consecutive iterations in a total of 'max_iter' rounds of message-passing, 
        the procedure herewith returns 'True'.
        Otherwise, returns 'False'.
        Parameter 'iteration' identifies the run of message-passing 
        that has just completed.
    """

    Worker.hdf5_lock.acquire()

    with tables.open_file(hdf5_file, 'r+') as fileh:
        A = fileh.root.aff_prop_group.availabilities
        R = fileh.root.aff_prop_group.responsibilities
        P = fileh.root.aff_prop_group.parallel_updates

        N = A.nrows
        diag_ind = np.diag_indices(N)

        E = (A[diag_ind] + R[diag_ind]) > 0
        P[:, iteration % convergence_iter] = E

        e_mat = P[:]
        K = E.sum(axis = 0)

    Worker.hdf5_lock.release()

    if iteration >= convergence_iter:
        se = e_mat.sum(axis = 1)
        unconverged = (np.sum((se == convergence_iter) + (se == 0)) != N)
        if (not unconverged and (K > 0)) or (iteration == max_iter):
                return True

    return False
mds_with_anchors.py 文件源码 项目:openbadge-analysis 作者: HumanDynamics 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _gt_weights(W):
    """Computes the weights V for a Guttman transform V X = B(X) Z."""
    V = -W
    V[np.diag_indices(V.shape[0])] = W.sum(axis=1) - W.diagonal()

    return V
mds_with_anchors.py 文件源码 项目:openbadge-analysis 作者: HumanDynamics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _gt_mapping(D, W, Z):
    """Computes the mapping B(X) for a Guttman transform V X = B(X) Z."""
    # Compute the Euclidean distances between all pairs of points
    Dz = distance.cdist(Z, Z)
    # Fill the diagonal of Dz, because *we don't want a division by zero*
    np.fill_diagonal(Dz, 1e-5)

    B = - W * D / Dz
    np.fill_diagonal(B, 0.0)
    B[np.diag_indices(B.shape[0])] = -np.sum(B, axis=1)

    return B
classification.py 文件源码 项目:inqbus.rainflow 作者: Inqbus 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def binning_as_matrix(
        bin_count,
        array,
        minimum=None,
        maximum=None,
        axis=[],
        remove_small_cycles=True):
    """
    :param bin_count:
    :param array:
    :param maximum: maximum value to be recognized. Values bigger than max
    will be filtered
    :param minimum: minimum value to be recognized. Values smaller than min
    will be filtered
    :param axis: list of placements for axis. Possible list-elements are:
            * 'bottom'
            * 'left'
            * 'right'
            * 'top'
    :param remove_small_cycles: if True cycles where start and end are
        identical after binning will be removed
    :return: data matrix with start in rows and target in columns
    """

    # Bilding a 2d-histogram

    if minimum is None:
        minimum = np.nanmin(array)
    if maximum is None:
        maximum = np.nanmax(array)

    minimum = float(minimum)
    maximum = float(maximum)

    start = array[:, 0]
    target = array[:, 1]

    classified_data = np.histogram2d(start, target, bins=bin_count, range=[
                                     [minimum, maximum], [minimum, maximum]])

    res_matrix = classified_data[0]
    intervall_edges = classified_data[1]

    axis_value = np.diff(intervall_edges) / 2.0 + intervall_edges[0:-1]

    if remove_small_cycles:
        indexes = np.diag_indices(bin_count)
        res_matrix[indexes] = 0.0

    if axis:
        res_matrix = append_axis(res_matrix, horizontal_axis=axis_value,
                                 vertical_axis=axis_value, axis=axis)

    return res_matrix
system.py 文件源码 项目:describe 作者: SINGROUP 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_displacement_tensor(self):
        """A matrix where the entry A[i, j, :] is the vector
        self.cartesian_pos[i] - self.cartesian_pos[j].

        For periodic systems the distance of an atom from itself is the
        smallest displacement of an atom from one of it's periodic copies, and
        the distance of two different atoms is the distance of two closest
        copies.

        Returns:
            np.array: 3D matrix containing the pairwise distance vectors.
        """
        if self._displacement_tensor is None:

            if self.pbc.any():
                pos = self.get_scaled_positions()
                disp_tensor = pos[:, None, :] - pos[None, :, :]

                # Take periodicity into account by wrapping coordinate elements
                # that are bigger than 0.5 or smaller than -0.5
                indices = np.where(disp_tensor > 0.5)
                disp_tensor[indices] = 1 - disp_tensor[indices]
                indices = np.where(disp_tensor < -0.5)
                disp_tensor[indices] = disp_tensor[indices] + 1

                # Transform to cartesian
                disp_tensor = self.to_cartesian(disp_tensor)

                # Figure out the smallest basis vector and set it as
                # displacement for diagonal
                cell = self.get_cell()
                basis_lengths = np.linalg.norm(cell, axis=1)
                min_index = np.argmin(basis_lengths)
                min_basis = cell[min_index]
                diag_indices = np.diag_indices(len(disp_tensor))
                disp_tensor[diag_indices] = min_basis

            else:
                pos = self.get_positions()
                disp_tensor = pos[:, None, :] - pos[None, :, :]

            self._displacement_tensor = disp_tensor

        return self._displacement_tensor
test_pta.py 文件源码 项目:enterprise 作者: nanograv 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def test_parameterized_orf(self):
        T1 = 3.16e8
        pl = utils.powerlaw(log10_A=parameter.Uniform(-18,-12),
                            gamma=parameter.Uniform(1,7))
        orf = hd_orf_generic(a=parameter.Uniform(0,5),
                             b=parameter.Uniform(0,5),
                             c=parameter.Uniform(0,5))
        rn = gp_signals.FourierBasisGP(spectrum=pl, Tspan=T1, components=30)
        crn = gp_signals.FourierBasisCommonGP(spectrum=pl, orf=orf,
                                              components=30, name='gw',
                                              Tspan=T1)

        model = rn + crn
        pta = model(self.psrs[0]) + model(self.psrs[1])

        lA1, gamma1 = -13, 1e-15
        lA2, gamma2 = -13.3, 1e-15
        lAc, gammac = -13.1, 1e-15
        a, b, c = 1.9, 0.4, 0.23

        params = {'gw_log10_A': lAc, 'gw_gamma': gammac,
                  'gw_a': a, 'gw_b':b, 'gw_c':c,
                  'B1855+09_log10_A': lA1, 'B1855+09_gamma': gamma1,
                  'J1909-3744_log10_A': lA2, 'J1909-3744_gamma': gamma2}

        phi = pta.get_phi(params)
        phiinv = pta.get_phiinv(params)

        F1, f1 = utils.createfourierdesignmatrix_red(
            self.psrs[0].toas, nmodes=30, Tspan=T1)
        F2, f2 = utils.createfourierdesignmatrix_red(
            self.psrs[1].toas, nmodes=30, Tspan=T1)

        msg = 'F matrix incorrect'
        assert np.allclose(pta.get_basis(params)[0], F1, rtol=1e-10), msg
        assert np.allclose(pta.get_basis(params)[1], F2, rtol=1e-10), msg

        nftot = 120
        phidiag = np.zeros(nftot)
        phit = np.zeros((nftot, nftot))

        phidiag[:60] = utils.powerlaw(f1, lA1, gamma1)
        phidiag[:60] += utils.powerlaw(f1, lAc, gammac)
        phidiag[60:] = utils.powerlaw(f2, lA2, gamma2)
        phidiag[60:] += utils.powerlaw(f2, lAc, gammac)

        phit[np.diag_indices(nftot)] = phidiag
        orf = hd_orf_generic(self.psrs[0].pos, self.psrs[1].pos, a=a, b=b, c=c)
        spec = utils.powerlaw(f1, log10_A=lAc, gamma=gammac)
        phit[:60, 60:] = np.diag(orf*spec)
        phit[60:, :60] = phit[:60, 60:]

        msg = '{} {}'.format(np.diag(phi), np.diag(phit))
        assert np.allclose(phi, phit, rtol=1e-15, atol=1e-17), msg
        msg = 'PTA Phi inverse is incorrect {}.'.format(params)
        assert np.allclose(phiinv, np.linalg.inv(phit),
                           rtol=1e-15, atol=1e-17), msg


问题


面经


文章

微信
公众号

扫码关注公众号