python类einsum()的实例源码

UVShape.py 文件源码 项目:Modeling-Cloth 作者: the3dadvantage 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def total_length_selected(ed='empty', coords='empty', ob='empty'):
    '''Returns the total length of all edge segments'''
    if ob == 'empty':
        ob = bpy.context.object
    if coords == 'empty':    
        coords = get_coords(ob)
    if ed == 'empty':    
        ed = get_edge_idx(ob)
    edc = coords[ed]
    e1 = edc[:, 0]
    e2 = edc[:, 1]
    ee1 = e1 - e2
    sel = get_selected_edges(ob)    
    ee = ee1[sel]    
    leng = np.einsum('ij,ij->i', ee, ee)
    return np.sum(np.sqrt(leng))
puzzle.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def transitions_old(width, height, configs=None, one_per_state=False):
    digit = width * height
    if configs is None:
        configs = generate_configs(digit)
    if one_per_state:
        def pickone(thing):
            index = np.random.randint(0,len(thing))
            return thing[index]
        transitions = np.array([
            generate(
                [c1,pickone(successors(c1,width,height))],width,height)
            for c1 in configs ])
    else:
        transitions = np.array([ generate([c1,c2],width,height)
                                 for c1 in configs for c2 in successors(c1,width,height) ])
    return np.einsum('ab...->ba...',transitions)
plot.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def puzzle_plot(p):
    p.setup()
    def name(template):
        return template.format(p.__name__)
    from itertools import islice
    configs = list(islice(p.generate_configs(9), 1000)) # be careful, islice is not immutable!!!
    import numpy.random as random
    random.shuffle(configs)
    configs = configs[:10]
    puzzles = p.generate(configs, 3, 3)
    print(puzzles.shape, "mean", puzzles.mean(), "stdev", np.std(puzzles))
    plot_image(puzzles[-1], name("{}.png"))
    plot_image(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1),name("{}+noise.png"))
    plot_image(np.round(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1)),name("{}+noise+round.png"))
    plot_grid(puzzles, name("{}s.png"))
    _transitions = p.transitions(3,3,configs=configs)
    print(_transitions.shape)
    transitions_for_show = \
        np.einsum('ba...->ab...',_transitions) \
          .reshape((-1,)+_transitions.shape[2:])
    print(transitions_for_show.shape)
    plot_grid(transitions_for_show, name("{}_transitions.png"))
dump_significance.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def run(ae,xs):
    zs = ae.encode_binary(xs)
    ys = ae.decode_binary(zs)
    mod_ys = []
    correlations = []
    print(ys.shape)
    print("corrlations:")
    print("bit \ image  {}".format(range(len(xs))))
    for i in range(ae.N):
        mod_zs = np.copy(zs)
        # increase the latent value from 0 to 1 and check the difference
        for j in range(11):
            mod_zs[:,i] = j / 10.0
            mod_ys.append(ae.decode_binary(mod_zs))
        zero_zs,one_zs = np.copy(zs),np.copy(zs)
        zero_zs[:,i] = 0.
        one_zs[:,i] = 1.
        correlation = np.mean(np.square(ae.decode_binary(zero_zs) - ae.decode_binary(one_zs)),
                              axis=(1,2))
        correlations.append(correlation)
        print("{:>5} {}".format(i,correlation))
    plot_grid2(np.einsum("ib...->bi...",np.array(mod_ys)).reshape((-1,)+ys.shape[1:]),
               w=11,path=ae.local("dump_significance.png"))
    return np.einsum("ib->bi",correlations)
scf.py 文件源码 项目:McMurchie-Davidson 作者: jjgoings 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def buildFock(self):
        """Routine to build the AO basis Fock matrix"""
        if self.direct:
            if self.incFockRst: # restart incremental fock build?
                self.G = formPT(self.P,np.zeros_like(self.P),self.bfs,
                                self.nbasis,self.screen,self.scrTol)
                self.G = 0.5*(self.G + self.G.T) 
                self.F = self.Core.astype('complex') + self.G
            else:
                self.G = formPT(self.P,self.P_old,self.bfs,self.nbasis,
                                self.screen,self.scrTol)
                self.G = 0.5*(self.G + self.G.T) 
                self.F = self.F_old + self.G

        else:
            self.J = np.einsum('pqrs,sr->pq', self.TwoE.astype('complex'),self.P)
            self.K = np.einsum('psqr,sr->pq', self.TwoE.astype('complex'),self.P)
            self.G = 2.*self.J - self.K
            self.F = self.Core.astype('complex') + self.G
polyCrystal.py 文件源码 项目:Graphene 作者: ashivni 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def pointsInRegion(regNum, vor, p, overlap=0.0):
    """
    returns the subset of points p that are inside the regNum region of the voronoi object
    vor. The boundaries of the region are extended by an amount given by 'overlap'.
    """
    reg = vor.regions[vor.point_region[regNum]]  # region associated with the point
    if -1 in reg:
        raise Exception('Open region associated with generator')
    nVerts = len(reg)  # number of verticies in the region
    p0 = vor.points[regNum]

    for i in range(len(reg)):
        vert1, vert2 = vor.vertices[reg[i]], vor.vertices[reg[(i + 1) % len(reg)]]
        dr = vert1 - vert2  # edge
        dr = dr / numpy.linalg.norm(dr)  # normalize
        dn = numpy.array([dr[1], -dr[0]])  # normal to edge
        dn = dn if numpy.dot(dn, vert2 - p0[:2]) > 0 else -dn  # orient so that the normal is outwards
        d1 = numpy.einsum('i,ji', dn, vert2 + dn * overlap - p[:, :2])
        p = p[d1 * numpy.dot(dn, vert2 - p0[:2]) > 0]

    return p
test_einsum.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_einsum_misc(self):
        # This call used to crash because of a bug in
        # PyArray_AssignZero
        a = np.ones((1, 2))
        b = np.ones((2, 2, 1))
        assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])

        # The iterator had an issue with buffering this reduction
        a = np.ones((5, 12, 4, 2, 3), np.int64)
        b = np.ones((5, 12, 11), np.int64)
        assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
                        np.einsum('ijklm,ijn->', a, b))

        # Issue #2027, was a problem in the contiguous 3-argument
        # inner loop implementation
        a = np.arange(1, 3)
        b = np.arange(1, 5).reshape(2, 2)
        c = np.arange(1, 9).reshape(4, 2)
        assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
                    [[[1,  3], [3,  9], [5, 15], [7, 21]],
                    [[8, 16], [16, 32], [24, 48], [32, 64]]])
test_einsum.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_einsum_all_contig_non_contig_output(self):
        # Issue gh-5907, tests that the all contiguous special case
        # actually checks the contiguity of the output
        x = np.ones((5, 5))
        out = np.ones(10)[::2]
        correct_base = np.ones(10)
        correct_base[::2] = 5
        # Always worked (inner iteration is done with 0-stride):
        np.einsum('mi,mi,mi->m', x, x, x, out=out)
        assert_array_equal(out.base, correct_base)
        # Example 1:
        out = np.ones(10)[::2]
        np.einsum('im,im,im->m', x, x, x, out=out)
        assert_array_equal(out.base, correct_base)
        # Example 2, buffering causes x to be contiguous but
        # special cases do not catch the operation before:
        out = np.ones((2, 2, 2))[..., 0]
        correct_base = np.ones((2, 2, 2))
        correct_base[..., 0] = 2
        x = np.ones((2, 2), np.float32)
        np.einsum('ij,jk->ik', x, x, out=out)
        assert_array_equal(out.base, correct_base)
augmentation.py 文件源码 项目:gconv_experiments 作者: tscohen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dihedral_transform_batch(x):

    g = np.random.randint(low=0, high=8, size=x.shape[0])

    h, w = x.shape[-2:]
    hh = (h - 1) / 2.
    hw = (w - 1) / 2.

    I, J = np.meshgrid(np.linspace(-hh, hh, x.shape[-2]), np.linspace(-hw, hw, x.shape[-1]))
    C = np.r_[[I, J]]
    D4C = np.einsum('...ij,jkl->...ikl', D4, C)
    D4C[:, 0] += hh
    D4C[:, 1] += hw
    D4C = D4C.astype(int)

    x_out = np.empty_like(x)
    for i in range(x.shape[0]):
        I, J = D4C[g[i]]
        x_out[i, :] = x[i][:, J, I]

    return x_out
tools.py 文件源码 项目:quadpy 作者: nschloe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_vol(simplex):
    # Compute the volume via the Cayley-Menger determinant
    # <http://mathworld.wolfram.com/Cayley-MengerDeterminant.html>. One
    # advantage is that it can compute the volume of the simplex indenpendent
    # of the dimension of the space where it's embedded.

    # compute all edge lengths
    edges = numpy.subtract(simplex[:, None], simplex[None, :])
    ei_dot_ej = numpy.einsum('...k,...k->...', edges, edges)

    j = simplex.shape[0] - 1
    a = numpy.empty((j+2, j+2) + ei_dot_ej.shape[2:])
    a[1:, 1:] = ei_dot_ej
    a[0, 1:] = 1.0
    a[1:, 0] = 1.0
    a[0, 0] = 0.0

    a = numpy.moveaxis(a, (0, 1), (-2, -1))
    det = numpy.linalg.det(a)

    vol = numpy.sqrt((-1.0)**(j+1) / 2**j / math.factorial(j)**2 * det)
    return vol
_python_core.py 文件源码 项目:jitcdde 作者: neurophysik 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def scalar_product_interval(anchors, indizes_1, indizes_2):
    q = (anchors[1][0]-anchors[0][0])

    vector_1 = np.vstack([
        anchors[0][1][indizes_1],     # a_1
        anchors[0][2][indizes_1] * q, # b_1
        anchors[1][1][indizes_1],     # c_1
        anchors[1][2][indizes_1] * q, # d_1
    ])

    vector_2 = np.vstack([
        anchors[0][1][indizes_2],     # a_2
        anchors[0][2][indizes_2] * q, # b_2
        anchors[1][1][indizes_2],     # c_2
        anchors[1][2][indizes_2] * q, # d_2
    ])

    return np.einsum(
        vector_1, [0,2],
        sp_matrix, [0,1],
        vector_2, [1,2]
        )*q
_python_core.py 文件源码 项目:jitcdde 作者: neurophysik 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def scalar_product_partial(anchors, indizes_1, indizes_2, start):
    q = (anchors[1][0]-anchors[0][0])
    z = (start-anchors[1][0]) / q

    vector_1 = np.vstack([
        anchors[0][1][indizes_1],     # a_1
        anchors[0][2][indizes_1] * q, # b_1
        anchors[1][1][indizes_1],     # c_1
        anchors[1][2][indizes_1] * q, # d_1
    ])

    vector_2 = np.vstack([
        anchors[0][1][indizes_2],     # a_2
        anchors[0][2][indizes_2] * q, # b_2
        anchors[1][1][indizes_2],     # c_2
        anchors[1][2][indizes_2] * q, # d_2
    ])

    return np.einsum(
        vector_1, [0,2],
        partial_sp_matrix(z), [0,1],
        vector_2, [1,2]
        )*q
methods.py 文件源码 项目:tensorpac 作者: EtienneCmb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def mvl(pha, amp, optimize):
    """Mean Vector Length (Canolty, 2006).

    Parameters
    ----------
    pha : array_like
        Array of phases of shapes (npha, ..., npts)

    amp : array_like
        Array of amplitudes of shapes (namp, ..., npts)

    Returns
    -------
    pac : array_like
        PAC of shape (npha, namp, ...)
    """
    # Number of time points :
    npts = pha.shape[-1]
    return np.abs(np.einsum('i...j, k...j->ik...', amp, np.exp(1j * pha),
                            optimize=optimize)) / npts
methods.py 文件源码 项目:tensorpac 作者: EtienneCmb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def ps(pha, amp, optimize):
    """Phase Synchrony (Penny, 2008; Cohen, 2008).

    Parameters
    ----------
    pha : array_like
        Array of phases of shapes (npha, ..., npts)

    amp : array_like
        Array of amplitudes of shapes (namp, ..., npts)

    Returns
    -------
    pac : array_like
        PAC of shape (npha, namp, ...)
    """
    # Number of time points :
    npts = pha.shape[-1]
    pac = np.einsum('i...j, k...j->ik...', np.exp(-1j * amp), np.exp(1j * pha),
                    optimize=optimize)
    return np.abs(pac) / npts
acquisition.py 文件源码 项目:xdesign 作者: tomography 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def half_space(self):
        """Return the half space polytope respresentation of the infinite
        beam."""
        # add half beam width along the normal direction to each of the points
        half = self.normal * self.size / 2
        edges = [Line(self.p1 + half, self.p2 + half),
                 Line(self.p1 - half, self.p2 - half)]

        A = np.ndarray((len(edges), self.dim))
        B = np.ndarray(len(edges))

        for i in range(0, 2):
            A[i, :], B[i] = edges[i].standard

            # test for positive or negative side of line
            if np.einsum('i, i', self.p1._x, A[i, :]) > B[i]:
                A[i, :] = -A[i, :]
                B[i] = -B[i]

        p = pt.Polytope(A, B)
        return p
gpr_autoreg_example.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def forward_prop_random_thru_post_mm(self, model, mx, vx, mu, Su):
        Kuu_noiseless = compute_kernel(
            2 * model.ls, 2 * model.sf, model.zu, model.zu)
        Kuu = Kuu_noiseless + np.diag(jitter * np.ones((self.M, )))
        # TODO: remove inv
        Kuuinv = np.linalg.inv(Kuu)
        A = np.dot(Kuuinv, mu)
        Smm = Su + np.outer(mu, mu)
        B_sto = np.dot(Kuuinv, np.dot(Smm, Kuuinv)) - Kuuinv
        psi0 = np.exp(2.0 * model.sf)
        psi1, psi2 = compute_psi_weave(
            2 * model.ls, 2 * model.sf, mx, vx, model.zu)
        mout = np.einsum('nm,md->nd', psi1, A)
        Bpsi2 = np.einsum('ab,nab->n', B_sto, psi2)[:, np.newaxis]
        vout = psi0 + Bpsi2 - mout**2
        return mout, vout
base_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _forward_prop_deterministic_thru_post(self, x, return_info=False):
        """Propagate deterministic inputs thru posterior

        Args:
            x (float): input values, size K x Din
            return_info (bool, optional): Description

        Returns:
            float, size K x Dout: output means
            float, size K x Dout: output variances
        """
        psi0 = np.exp(2 * self.sf)
        psi1 = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        mout = np.einsum('nm,dm->nd', psi1, self.A)
        Bpsi2 = np.einsum('dab,na,nb->nd', self.B_det, psi1, psi1)
        vout = psi0 + Bpsi2
        if return_info:
            return mout, vout, psi1
        else:
            return mout, vout
base_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _forward_prop_random_thru_post_mm(self, mx, vx, return_info=False):
        """Propagate uncertain inputs thru posterior, using Moment Matching

        Args:
            mx (float): input means, size K x Din
            vx (TYPE): input variances, size K x Din
            return_info (bool, optional): Description

        Returns:
            float, size K x Dout: output means
            float, size K x Dout: output variances
        """
        psi0 = np.exp(2.0 * self.sf)
        psi1, psi2 = compute_psi_weave(
            2 * self.ls, 2 * self.sf, mx, vx, self.zu)
        mout = np.einsum('nm,dm->nd', psi1, self.A)
        Bpsi2 = np.einsum('dab,nab->nd', self.B_sto, psi2)
        vout = psi0 + Bpsi2 - mout**2
        if return_info:
            return mout, vout, psi1, psi2
        else:
            return mout, vout
base_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sample(self, x):
        """Summary

        Args:
            x (TYPE): Description

        Returns:
            TYPE: Description
        """
        Su = self.Su
        mu = self.mu
        Lu = np.linalg.cholesky(Su)
        epsilon = np.random.randn(self.Dout, self.M)
        u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)

        kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
        kff += np.diag(JITTER * np.ones(x.shape[0]))
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        qfu = np.dot(kfu, self.Kuuinv)
        mf = np.einsum('nm,dm->nd', qfu, u_sample)
        vf = kff - np.dot(qfu, kfu.T)
        Lf = np.linalg.cholesky(vf)
        epsilon = np.random.randn(x.shape[0], self.Dout)
        f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
        return f_sample
aep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 80 收藏 0 点赞 0 评论 0
def _forward_prop_deterministic_thru_cav(self, x):
        """Propagate deterministic inputs thru cavity

        Args:
            x (float): input values, size K x Din

        Returns:
            float, size K x Dout: output means
            float, size K x Dout: output variances
            float, size K x M: cross covariance matrix
        """
        kff = np.exp(2 * self.sf)
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        mout = np.einsum('nm,dm->nd', kfu, self.Ahat)
        Bkfukuf = np.einsum('dab,na,nb->nd', self.Bhat_det, kfu, kfu)
        vout = kff + Bkfukuf
        return mout, vout, kfu
aep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _forward_prop_random_thru_cav_mm(self, mx, vx):
        """Propagate uncertain inputs thru cavity, using simple Moment Matching

        Args:
            mx (float): input means, size K x Din
            vx (TYPE): input variances, size K x Din

        Returns:
            output means and variances, and intermediate info for backprop
        """
        psi0 = np.exp(2 * self.sf)
        psi1, psi2 = compute_psi_weave(
            2 * self.ls, 2 * self.sf, mx, vx, self.zu)
        mout = np.einsum('nm,dm->nd', psi1, self.Ahat)
        Bhatpsi2 = np.einsum('dab,nab->nd', self.Bhat_sto, psi2)
        vout = psi0 + Bhatpsi2 - mout**2
        return mout, vout, psi1, psi2
kernels.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def psi1compDer(dL_dpsi1, _psi1, variance, lengthscale, Z, mu, S):
    # here are the "statistics" for psi1
    # Produced intermediate results: dL_dparams w.r.t. psi1
    # _dL_dvariance     1
    # _dL_dlengthscale  Q
    # _dL_dZ            MxQ
    # _dL_dgamma        NxQ
    # _dL_dmu           NxQ
    # _dL_dS            NxQ

    lengthscale2 = np.square(lengthscale)

    Lpsi1 = dL_dpsi1 * _psi1
    Zmu = Z[None, :, :] - mu[:, None, :]  # NxMxQ
    denom = 1. / (S + lengthscale2)
    Zmu2_denom = np.square(Zmu) * denom[:, None, :]  # NxMxQ
    _dL_dvar = Lpsi1.sum() / variance
    _dL_dmu = np.einsum('nm,nmq,nq->nq', Lpsi1, Zmu, denom)
    _dL_dS = np.einsum('nm,nmq,nq->nq', Lpsi1, (Zmu2_denom - 1.), denom) / 2.
    _dL_dZ = -np.einsum('nm,nmq,nq->mq', Lpsi1, Zmu, denom)
    _dL_dl = np.einsum('nm,nmq,nq->q', Lpsi1, (Zmu2_denom +
                                               (S / lengthscale2)[:, None, :]), denom * lengthscale)

    return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
kernels.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def kfucompDer(dL_dkfu, kfu, variance, lengthscale, Z, mu, grad_x):
    # here are the "statistics" for psi1
    # Produced intermediate results: dL_dparams w.r.t. psi1
    # _dL_dvariance     1
    # _dL_dlengthscale  Q
    # _dL_dZ            MxQ

    lengthscale2 = np.square(lengthscale)

    Lpsi1 = dL_dkfu * kfu
    Zmu = Z[None, :, :] - mu[:, None, :]  # NxMxQ
    _dL_dvar = Lpsi1.sum() / variance
    _dL_dZ = -np.einsum('nm,nmq->mq', Lpsi1, Zmu / lengthscale2)
    _dL_dl = np.einsum('nm,nmq->q', Lpsi1, np.square(Zmu) / lengthscale**3)
    if grad_x:
        _dL_dx = np.einsum('nm,nmq->nq', Lpsi1, Zmu / lengthscale2)
        return _dL_dvar, _dL_dl, _dL_dZ, _dL_dx
    else:
        return _dL_dvar, _dL_dl, _dL_dZ
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _forward_prop_deterministic_thru_cav(self, n, x, alpha):
        """Summary

        Args:
            n (TYPE): Description
            x (TYPE): Description
            alpha (TYPE): Description

        Returns:
            TYPE: Description
        """
        muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha)
        Kuuinv = self.Kuuinv
        Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat)
        Bhat = np.einsum(
            'ab,ndbc->ndac',
            Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv
        kff = np.exp(2 * self.sf)
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        mout = np.einsum('nm,ndm->nd', kfu, Ahat)
        Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu)
        vout = kff + Bkfukuf
        extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat]
        return mout, vout, extra_res
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _forward_prop_deterministic_thru_post(self, x):
        """Summary

        Args:
            x (TYPE): Description

        Returns:
            TYPE: Description
        """
        Kuuinv = self.Kuuinv
        A = np.einsum('ab,db->da', Kuuinv, self.mu)
        B = np.einsum(
            'ab,dbc->dac',
            Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
        kff = np.exp(2 * self.sf)
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        mout = np.einsum('nm,dm->nd', kfu, A)
        Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
        vout = kff + Bpsi2
        return mout, vout

    # TODO
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _forward_prop_random_thru_post_mm(self, mx, vx):
        """Summary

        Args:
            mx (TYPE): Description
            vx (TYPE): Description

        Returns:
            TYPE: Description
        """
        Kuuinv = self.Kuuinv
        A = np.einsum('ab,db->da', Kuuinv, self.mu)
        Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu)
        B = np.einsum(
            'ab,dbc->dac',
            Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv
        psi0 = np.exp(2.0 * self.sf)
        psi1, psi2 = compute_psi_weave(
            2 * self.ls, 2 * self.sf, mx, vx, self.zu)
        mout = np.einsum('nm,dm->nd', psi1, A)
        Bpsi2 = np.einsum('dab,nab->nd', B, psi2)
        vout = psi0 + Bpsi2 - mout**2
        return mout, vout
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def sample(self, x):
        """Summary

        Args:
            x (TYPE): Description

        Returns:
            TYPE: Description
        """
        Su = self.Su
        mu = self.mu
        Lu = np.linalg.cholesky(Su)
        epsilon = np.random.randn(self.Dout, self.M)
        u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)

        kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
        kff += np.diag(JITTER * np.ones(x.shape[0]))
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        qfu = np.dot(kfu, self.Kuuinv)
        mf = np.einsum('nm,dm->nd', qfu, u_sample)
        vf = kff - np.dot(qfu, kfu.T)
        Lf = np.linalg.cholesky(vf)
        epsilon = np.random.randn(x.shape[0], self.Dout)
        f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
        return f_sample
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compute_cavity(self, n, alpha=1.0):
        """Summary

        Args:
            n (TYPE): Description
            alpha (float, optional): Description

        Returns:
            TYPE: Description
        """
        # compute the leave one out moments
        t1n = self.t1[n, :, :]
        t2n = self.t2[n, :, :, :]
        Suinvhat = self.Suinv - alpha * t2n
        SuinvMuhat = self.SuinvMu - alpha * t1n
        Suhat = np.linalg.inv(Suinvhat)
        muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat)
        return muhat, Suhat, SuinvMuhat, Suinvhat
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def forward_prop_thru_post(self, x):
        """Summary

        Args:
            x (TYPE): Description

        Returns:
            TYPE: Description
        """
        Kuuinv = self.Kuuinv
        A = np.einsum('ab,db->da', Kuuinv, self.mu)
        B = np.einsum(
            'ab,dbc->dac',
            Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
        kff = np.exp(2 * self.sf)
        kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
        mout = np.einsum('nm,dm->nd', kfu, A)
        Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
        vout = kff + Bpsi2
        return mout, vout
pep_models.py 文件源码 项目:geepee 作者: thangbui 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def update_posterior(self, x_train=None, new_hypers=False):
        """Summary

        Returns:
            TYPE: Description
        """
        # compute the posterior approximation
        if new_hypers and x_train is not None:
            Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu)
            KuuinvKuf = np.dot(self.Kuuinv, Kfu.T)
            self.Kfu = Kfu
            self.KuuinvKuf = KuuinvKuf
            self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train)

        KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances)
        T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf)
        T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances)
        Vinv = self.Kuuinv + T2u
        self.Suinv = Vinv
        self.Su = np.linalg.inv(Vinv)
        self.mu = np.einsum('dab,db->da', self.Su, T1u)
        self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu)
        self.beta = self.Kuuinv - np.einsum('ab,dbc->dac', 
            self.Kuuinv,
            np.einsum('dab,bc->dac', self.Su, self.Kuuinv))


问题


面经


文章

微信
公众号

扫码关注公众号