python类pinv()的实例源码

math.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def solve(a, b):
    """Returns the solution of A X = B."""
    try:
        return linalg.solve(a, b)
    except linalg.LinAlgError:
        return np.dot(linalg.pinv(a), b)
math.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def inv(a):
    """Returns the inverse of A."""
    try:
        return np.linalg.inv(a)
    except linalg.LinAlgError:
        return np.linalg.pinv(a)
test_linalg.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def do(self, a, b):
        a_ginv = linalg.pinv(a)
        assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
        assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
test_linalg.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def do(self, a, b):
        a_ginv = linalg.pinv(a)
        assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
        assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
tools.py 文件源码 项目:parametrix 作者: vincentchoqueuse 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def estimate_X_LS(y,H):
    """ Estimator of x for the Linear Model using Least Squares Estimator

        .. math::
        \\widehat{\\textbf{X}}=\\textbf{H}^{\dagger}\\textbf{Y}

        """
    N,L=H.shape
    H=np.matrix(H)
    X=lg.pinv(H)*y
    return X
test_linalg.py 文件源码 项目:lambda-numba 作者: rlhotovy 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def do(self, a, b):
        a_ginv = linalg.pinv(a)
        assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
        assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
test_linalg.py 文件源码 项目:deliver 作者: orchestor 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def do(self, a, b):
        a_ginv = linalg.pinv(a)
        assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
        assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
model.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def estimate_parameters(x, y, z, kappa):
        """
        Parameter estimation without error checking

        Parameters
        ----------
        x : ndarray
            Regressor matrix (nobs by nvar)
        y : ndarray
            Regressand matrix (nobs by 1)
        z : ndarray
            Instrument matrix (nobs by ninstr)
        kappa : scalar
            Parameter value for k-class estimator

        Returns
        -------
        params : ndarray
            Estimated parameters (nvar by 1)

        Notes
        -----
        Exposed as a static method to facilitate estimation with other data,
        e.g., bootstrapped samples.  Performs no error checking.
        """
        pinvz = pinv(z)
        p1 = (x.T @ x) * (1 - kappa) + kappa * ((x.T @ z) @ (pinvz @ x))
        p2 = (x.T @ y) * (1 - kappa) + kappa * ((x.T @ z) @ (pinvz @ y))
        return inv(p1) @ p2
model.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _estimate_kappa(self):
        y, x, z = self._wy, self._wx, self._wz
        is_exog = self._regressor_is_exog
        e = c_[y, x[:, ~is_exog]]
        x1 = x[:, is_exog]
        if x1.shape[1] == 0:
            # No exogenous regressors
            return 1

        ez = e - z @ (pinv(z) @ e)
        ex1 = e - x1 @ (pinv(x1) @ e)

        vpmzv_sqinv = inv_sqrth(ez.T @ ez)
        q = vpmzv_sqinv @ (ex1.T @ ex1) @ vpmzv_sqinv
        return min(eigvalsh(q))
test_model.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def data():
    n, q, k, p = 1000, 2, 5, 3
    np.random.seed(12345)
    clusters = np.random.randint(0, 10, n)
    rho = 0.5
    r = np.zeros((k + p + 1, k + p + 1))
    r.fill(rho)
    r[-1, 2:] = 0
    r[2:, -1] = 0
    r[-1, -1] = 0.5
    r += np.eye(9) * 0.5
    v = np.random.multivariate_normal(np.zeros(r.shape[0]), r, n)
    x = v[:, :k]
    z = v[:, k:k + p]
    e = v[:, [-1]]
    params = np.arange(1, k + 1) / k
    params = params[:, None]
    y = x @ params + e
    xhat = z @ np.linalg.pinv(z) @ x
    nobs, nvar = x.shape
    s2 = e.T @ e / nobs
    s2_debiased = e.T @ e / (nobs - nvar)
    v = xhat.T @ xhat / nobs
    vinv = np.linalg.inv(v)
    kappa = 0.99
    vk = (x.T @ x * (1 - kappa) + kappa * xhat.T @ xhat) / nobs
    return AttrDict(nobs=nobs, e=e, x=x, y=y, z=z, xhat=xhat,
                    params=params, s2=s2, s2_debiased=s2_debiased,
                    clusters=clusters, nvar=nvar, v=v, vinv=vinv, vk=vk,
                    kappa=kappa, dep=y, exog=x[:, q:], endog=x[:, :q],
                    instr=z)
test_model.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_2sls_direct(data):
    mod = IV2SLS(data.dep, add_constant(data.exog), data.endog, data.instr)
    res = mod.fit()
    x = np.c_[add_constant(data.exog), data.endog]
    z = np.c_[add_constant(data.exog), data.instr]
    y = data.y
    xhat = z @ pinv(z) @ x
    params = pinv(xhat) @ y
    assert_allclose(res.params, params.ravel())
    # This is just a quick smoke check of results
    get_all(res)
test_data.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_demean_both_large_t():
    data = PanelData(pd.Panel(np.random.standard_normal((1, 100, 10))))
    demeaned = data.demean('both')

    df = data.dataframe
    no_index = df.reset_index()
    cat = pd.Categorical(no_index[df.index.levels[0].name])
    d1 = pd.get_dummies(cat, drop_first=False).astype(np.float64)
    cat = pd.Categorical(no_index[df.index.levels[1].name])
    d2 = pd.get_dummies(cat, drop_first=True).astype(np.float64)
    d = np.c_[d1.values, d2.values]
    dummy_demeaned = df.values - d @ pinv(d) @ df.values
    assert_allclose(1 + np.abs(demeaned.values2d),
                    1 + np.abs(dummy_demeaned))
test_data.py 文件源码 项目:linearmodels 作者: bashtage 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_mean_weighted(data):
    x = PanelData(data.x)
    w = PanelData(data.w)
    missing = x.isnull | w.isnull
    x.drop(missing)
    w.drop(missing)
    entity_mean = x.mean('entity', weights=w)
    c = x.index.levels[0][x.index.labels[0]]
    d = pd.get_dummies(pd.Categorical(c, ordered=True))
    d = d[entity_mean.index]
    d = d.values
    root_w = np.sqrt(w.values2d)
    wx = root_w * x.values2d
    wd = d * root_w
    mu = np.linalg.lstsq(wd, wx)[0]
    assert_allclose(entity_mean, mu)

    time_mean = x.mean('time', weights=w)
    c = x.index.levels[1][x.index.labels[1]]
    d = pd.get_dummies(pd.Categorical(c, ordered=True))
    d = d[time_mean.index]
    d = d.values
    root_w = np.sqrt(w.values2d)
    wx = root_w * x.values2d
    wd = d * root_w
    mu = pinv(wd) @ wx
    assert_allclose(time_mean, mu)
test_linalg.py 文件源码 项目:Alfred 作者: jkachhadia 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def do(self, a, b):
        a_ginv = linalg.pinv(a)
        assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
        assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
explore.py 文件源码 项目:Power-Consumption-Prediction 作者: YoungGod 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def corr_fit_plot(s, k):
    """
    ?????????lag=k?
    ???
    """
    n = len(s)
    x = []; y = []
    for i in range(0,n-k):
        x.append([s[i]])
        y.append([s[i+k]])
    plt.scatter(x,y)

#    # using sklearn
#    re = LinearRegression()
#    re.fit(x,y)
#    pred = re.predict(x)
#    coef = re.coef_
#    plt.plot(x,pred,'r-')

    # least square by myself
    x = np.array(x)
    y = np.array(y)
    one = np.ones((x.shape[0],1))
    x = np.concatenate((one,np.array(x)),axis=1)
    coefs = np.dot(pinv(x),y)
    pred = coefs[0]*x[:,0] + coefs[1]*x[:,1]
    coef = coefs[1]
    plt.plot(x[:,1],pred,'r-')

    plt.title('Corr=%s'%coef+' Lag=%s'%k)
    plt.show()    

    return coef

#corr_fit_plot(s_power_consumption.values,1)
#corr_fit_plot(s_power_consumption.values,3)
#corr_fit_plot(s_power_consumption.values,5)
#corr_fit_plot(s_power_consumption.values,7)
networks.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def build_LVAMP_dense(prob,T,shrink,iid=False):
    """ Builds the non-SVD (i.e. dense) parameterization of LVAMP
    and returns a list of trainable points(name,xhat_,newvars)
    """
    eta,theta_init = shrinkage.get_shrinkage_function(shrink)
    layers=[]
    A = prob.A
    M,N = A.shape

    Hinit = np.matmul(prob.xinit,la.pinv(prob.yinit) )
    H_ = tf.Variable(Hinit,dtype=tf.float32,name='H0')
    xhat_lin_ = tf.matmul(H_,prob.y_)
    layers.append( ('Linear',xhat_lin_,None) )

    if shrink=='pwgrid':
        theta_init = np.linspace(.01,.99,15).astype(np.float32)
    vs_def = np.array(1,dtype=np.float32)
    if not iid:
        theta_init = np.tile( theta_init ,(N,1,1))
        vs_def = np.tile( vs_def ,(N,1))

    theta_ = tf.Variable(theta_init,name='theta0',dtype=tf.float32)
    vs_ = tf.Variable(vs_def,name='vs0',dtype=tf.float32)
    rhat_nl_ = xhat_lin_
    rvar_nl_ = vs_ * tf.reduce_sum(prob.y_*prob.y_,0)/N

    xhat_nl_,alpha_nl_ = eta(rhat_nl_ , rvar_nl_,theta_ )
    layers.append( ('LVAMP-{0} T={1}'.format(shrink,1),xhat_nl_, None ) )
    for t in range(1,T):
        alpha_nl_ = tf.reduce_mean( alpha_nl_,axis=0) # each col average dxdr

        gain_nl_ = 1.0 /(1.0 - alpha_nl_)
        rhat_lin_ = gain_nl_ * (xhat_nl_ - alpha_nl_ * rhat_nl_)
        rvar_lin_ = rvar_nl_ * alpha_nl_ * gain_nl_

        H_ = tf.Variable(Hinit,dtype=tf.float32,name='H'+str(t))
        G_ = tf.Variable(.9*np.identity(N),dtype=tf.float32,name='G'+str(t))
        xhat_lin_ = tf.matmul(H_,prob.y_) + tf.matmul(G_,rhat_lin_)

        layers.append( ('LVAMP-{0} lin T={1}'.format(shrink,1+t),xhat_lin_, (H_,G_) ) )

        alpha_lin_ = tf.expand_dims(tf.diag_part(G_),1)

        eps = .5/N
        alpha_lin_ = tf.maximum(eps,tf.minimum(1-eps, alpha_lin_ ) )

        vs_ = tf.Variable(vs_def,name='vs'+str(t),dtype=tf.float32)

        gain_lin_ = vs_ * 1.0/(1.0 - alpha_lin_)
        rhat_nl_ = gain_lin_ * (xhat_lin_ - alpha_lin_ * rhat_lin_)
        rvar_nl_ = rvar_lin_ * alpha_lin_ * gain_lin_

        theta_ = tf.Variable(theta_init,name='theta'+str(t),dtype=tf.float32)

        xhat_nl_,alpha_nl_ = eta(rhat_nl_ , rvar_nl_,theta_ )
        alpha_nl_ = tf.maximum(eps,tf.minimum(1-eps, alpha_nl_ ) )
        layers.append( ('LVAMP-{0}  nl T={1}'.format(shrink,1+t),xhat_nl_, (vs_,theta_,) ) )

    return layers
test_ctypes.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main():

    n = 2000

    # X = np.array([[1,2,9],[3,4,8], [-1, 6, 2]], np.float32)

    X = np.random.normal(size = (n,n))
    X = np.array(X, np.float32)

    Xpinv = np.empty(X.T.shape, np.float32)

    tic = time.time()
    X_pinv1 = la.pinv(X)
    tac = time.time()

    dt1 = tac-tic
    print("la.pinv time: %f" % dt1)

    # print("la.pinv(X):")
    # print(X_pinv1)

    # print(la.eig(X))

    # return

    # print("Original matrix (python):")
    # print X

    # print X.dtype

    tic = time.time()
    X_pinv_cuda = c_bind(X, Xpinv)
    tac = time.time()

    dt2 = tac-tic
    print("cuda.pinv time: %f" % dt2)

    time_ratio = dt1/dt2

    print("Speedup: %f" % time_ratio)

    # print("cuda.pinv(X):")
    # print X_pinv_cuda
test_pinv.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def main():
    """
    """

    A = np.array([[1,-2],[3,5]])
    A = A.T
    n, p = A.shape

    # print A

    res = svd(A)
    u = res[0]
    s = res[1]
    v = res[2]

    A_pinv = pinv(A)
    A_inv = inv(A)

    print A_inv

    return

    A_pinv2 = v.T.dot(np.diag(1/s)).dot(u.T)

    print "intermediate"
    print v.T.dot(np.diag(1/s))

    print "final"
    print A_pinv2

    # R1 = A.dot(A_inv).dot(A)
    # R2 = A.dot(A_pinv).dot(A)
    # R3 = A.dot(A_pinv2).dot(A)

    # R1 = A.dot(A_inv)
    # R2 = A.dot(A_pinv)
    # R3 = A.dot(A_pinv2)

    # print R1
    # print R2
    # print R3

    # print s

    # print u
    # print v
algorithms.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def ridge_regression(data, labels, mu=0.0):
    r"""Implementation of the Regularized Least Squares solver.

    It solves the ridge regression problem with parameter ``mu`` on the
    `l2-norm`.

    Parameters
    ----------
    data : (N, P) ndarray
        Data matrix.
    labels : (N,)  or (N, 1) ndarray
        Labels vector.
    mu : float, optional (default is `0.0`)
        `l2-norm` penalty.

    Returns
    --------
    beta : (P, 1) ndarray
        Ridge regression solution.

    Examples
    --------
    >>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]])
    >>> beta = numpy.array([0.1, 0.1, 0.0])
    >>> Y = numpy.dot(X, beta)
    >>> beta = l1l2py.algorithms.ridge_regression(X, Y, 1e3).T
    >>> len(numpy.flatnonzero(beta))
    3

    """
    n, p = data.shape

    if n < p:
        tmp = np.dot(data, data.T)
        if mu:
            tmp += mu*n*np.eye(n)
        tmp = la.pinv(tmp)

        return np.dot(np.dot(data.T, tmp), labels.reshape(-1, 1))
    else:
        tmp = np.dot(data.T, data)
        if mu:
            tmp += mu*n*np.eye(p)
        tmp = la.pinv(tmp)

        return np.dot(tmp, np.dot(data.T, labels.reshape(-1, 1)))
test_pinv.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def main():
    """
    """

    A = np.array([[1,-2],[3,5]])
    A = A.T
    n, p = A.shape

    # print A

    res = svd(A)
    u = res[0]
    s = res[1]
    v = res[2]

    A_pinv = pinv(A)
    A_inv = inv(A)

    print A_inv

    return

    A_pinv2 = v.T.dot(np.diag(1/s)).dot(u.T)

    print "intermediate"
    print v.T.dot(np.diag(1/s))

    print "final"
    print A_pinv2

    # R1 = A.dot(A_inv).dot(A)
    # R2 = A.dot(A_pinv).dot(A)
    # R3 = A.dot(A_pinv2).dot(A)

    # R1 = A.dot(A_inv)
    # R2 = A.dot(A_pinv)
    # R3 = A.dot(A_pinv2)

    # print R1
    # print R2
    # print R3

    # print s

    # print u
    # print v


问题


面经


文章

微信
公众号

扫码关注公众号