def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
python类potrf()的实例源码
def test_potrs(self):
a=torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b=torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def __init__(self, nFeatures, args):
super().__init__()
nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
assert(neq==0)
# self.fc1 = nn.Linear(nFeatures, nHidden)
self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())
Q = 1e-8*torch.eye(nHidden)
Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
self.L = Variable(torch.potrf(Q))
self.D = Parameter(0.3*torch.randn(nFeatures-1, nFeatures))
# self.lam = Parameter(20.*torch.ones(1))
self.h = Variable(torch.zeros(nineq))
self.nFeatures = nFeatures
self.nHidden = nHidden
self.neq = neq
self.nineq = nineq
self.args = args
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def test_cholesky(self):
x = torch.rand(10, 10) + 1e-1
A = torch.mm(x, x.t())
# default Case
C = torch.potrf(A)
B = torch.mm(C.t(), C)
self.assertEqual(A, B, 1e-14)
# test Upper Triangular
U = torch.potrf(A, True)
B = torch.mm(U.t(), U)
self.assertEqual(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.potrf(A, False)
B = torch.mm(L, L.t())
self.assertEqual(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix')
def test_potrs(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
b = torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03),
(-1.56, 4.00, -8.67, 1.75, 2.86),
(9.81, -4.09, -4.57, -8.61, 8.99))).t()
# make sure 'a' is symmetric PSD
a = torch.mm(a, a.t())
# upper Triangular Test
U = torch.potrf(a)
x = torch.potrs(b, U)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
# lower Triangular Test
L = torch.potrf(a, False)
x = torch.potrs(b, L, False)
self.assertLessEqual(b.dist(torch.mm(a, x)), 1e-12)
def tset_potri(self):
a=torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
# make sure 'a' is symmetric PSD
a = a * a.t()
# compute inverse directly
inv0 = torch.inverse(a)
# default case
chol = torch.potrf(a)
inv1 = torch.potri(chol)
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# upper Triangular Test
chol = torch.potrf(a, 'U')
inv1 = torch.potri(chol, 'U')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# lower Triangular Test
chol = torch.potrf(a, 'L')
inv1 = torch.potri(chol, 'L')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
def tset_potri(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
# make sure 'a' is symmetric PSD
a = a * a.t()
# compute inverse directly
inv0 = torch.inverse(a)
# default case
chol = torch.potrf(a)
inv1 = torch.potri(chol)
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# upper Triangular Test
chol = torch.potrf(a, 'U')
inv1 = torch.potri(chol, 'U')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# lower Triangular Test
chol = torch.potrf(a, 'L')
inv1 = torch.potri(chol, 'L')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
def tset_potri(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
# make sure 'a' is symmetric PSD
a = a * a.t()
# compute inverse directly
inv0 = torch.inverse(a)
# default case
chol = torch.potrf(a)
inv1 = torch.potri(chol)
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# upper Triangular Test
chol = torch.potrf(a, 'U')
inv1 = torch.potri(chol, 'U')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# lower Triangular Test
chol = torch.potrf(a, 'L')
inv1 = torch.potri(chol, 'L')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
def forward(ctx, a, upper=True):
ctx.upper = upper
fact = torch.potrf(a, upper)
ctx.save_for_backward(fact)
return fact
def tset_potri(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
# make sure 'a' is symmetric PSD
a = a * a.t()
# compute inverse directly
inv0 = torch.inverse(a)
# default case
chol = torch.potrf(a)
inv1 = torch.potri(chol)
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# upper Triangular Test
chol = torch.potrf(a, 'U')
inv1 = torch.potri(chol, 'U')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# lower Triangular Test
chol = torch.potrf(a, 'L')
inv1 = torch.potri(chol, 'L')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
def pre_factor_kkt(Q, G, A):
""" Perform all one-time factorizations and cache relevant matrix products"""
nineq, nz, neq, _ = get_sizes(G, A)
# S = [ A Q^{-1} A^T A Q^{-1} G^T ]
# [ G Q^{-1} A^T G Q^{-1} G^T + D^{-1} ]
U_Q = torch.potrf(Q)
# partial cholesky of S matrix
U_S = torch.zeros(neq + nineq, neq + nineq).type_as(Q)
G_invQ_GT = torch.mm(G, torch.potrs(G.t(), U_Q))
R = G_invQ_GT
if neq > 0:
invQ_AT = torch.potrs(A.t(), U_Q)
A_invQ_AT = torch.mm(A, invQ_AT)
G_invQ_AT = torch.mm(G, invQ_AT)
# TODO: torch.potrf sometimes says the matrix is not PSD but
# numpy does? I filed an issue at
# https://github.com/pytorch/pytorch/issues/199
try:
U11 = torch.potrf(A_invQ_AT)
except:
U11 = torch.Tensor(np.linalg.cholesky(
A_invQ_AT.cpu().numpy())).type_as(A_invQ_AT)
# TODO: torch.trtrs is currently not implemented on the GPU
# and we are using gesv as a workaround.
U12 = torch.gesv(G_invQ_AT.t(), U11.t())[0]
U_S[:neq, :neq] = U11
U_S[:neq, neq:] = U12
R -= torch.mm(U12.t(), U12)
return U_Q, U_S, R
def factor_kkt(U_S, R, d):
""" Factor the U22 block that we can only do after we know D. """
nineq = R.size(0)
U_S[-nineq:, -nineq:] = torch.potrf(R + torch.diag(1 / d.cpu()).type_as(d))
def factor_solve_kkt(Q, D, G, A, rx, rs, rz, ry):
nineq, nz, neq, _ = get_sizes(G, A)
if neq > 0:
H_ = torch.cat([torch.cat([Q, torch.zeros(nz, nineq).type_as(Q)], 1),
torch.cat([torch.zeros(nineq, nz).type_as(Q), D], 1)], 0)
A_ = torch.cat([torch.cat([G, torch.eye(nineq).type_as(Q)], 1),
torch.cat([A, torch.zeros(neq, nineq).type_as(Q)], 1)], 0)
g_ = torch.cat([rx, rs], 0)
h_ = torch.cat([rz, ry], 0)
else:
H_ = torch.cat([torch.cat([Q, torch.zeros(nz, nineq).type_as(Q)], 1),
torch.cat([torch.zeros(nineq, nz).type_as(Q), D], 1)], 0)
A_ = torch.cat([G, torch.eye(nineq).type_as(Q)], 1)
g_ = torch.cat([rx, rs], 0)
h_ = rz
U_H_ = torch.potrf(H_)
invH_A_ = torch.potrs(A_.t(), U_H_)
invH_g_ = torch.potrs(g_.view(-1, 1), U_H_).view(-1)
S_ = torch.mm(A_, invH_A_)
U_S_ = torch.potrf(S_)
t_ = torch.mv(A_, invH_g_).view(-1, 1) - h_
w_ = -torch.potrs(t_, U_S_).view(-1)
v_ = torch.potrs(-g_.view(-1, 1) - torch.mv(A_.t(), w_), U_H_).view(-1)
return v_[:nz], v_[nz:], w_[:nineq], w_[nineq:] if neq > 0 else None
def test_potrf(self):
root = Variable(torch.tril(torch.rand(S, S)), requires_grad=True)
def run_test(upper):
def func(root):
x = torch.mm(root, root.t())
return torch.potrf(x, upper)
gradcheck(func, [root])
gradgradcheck(func, [root])
run_test(upper=True)
run_test(upper=False)
def tset_potri(self):
a = torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23),
(-6.05, -3.30, 5.36, -4.44, 1.08),
(-0.45, 2.58, -2.70, 0.27, 9.04),
(8.32, 2.71, 4.35, -7.17, 2.14),
(-9.67, -5.14, -7.26, 6.08, -6.87))).t()
# make sure 'a' is symmetric PSD
a = a * a.t()
# compute inverse directly
inv0 = torch.inverse(a)
# default case
chol = torch.potrf(a)
inv1 = torch.potri(chol)
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# upper Triangular Test
chol = torch.potrf(a, 'U')
inv1 = torch.potri(chol, 'U')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
# lower Triangular Test
chol = torch.potrf(a, 'L')
inv1 = torch.potri(chol, 'L')
self.assertLessEqual(inv0.dist(inv1), 1e-12)
def __init__(self, nFeatures, args):
super(OptNet, self).__init__()
nHidden, neq, nineq = 2*nFeatures-1,0,2*nFeatures-2
assert(neq==0)
self.fc1 = nn.Linear(nFeatures, nHidden)
self.M = Variable(torch.tril(torch.ones(nHidden, nHidden)).cuda())
if args.tvInit:
Q = 1e-8*torch.eye(nHidden)
Q[:nFeatures,:nFeatures] = torch.eye(nFeatures)
self.L = Parameter(torch.potrf(Q))
D = torch.zeros(nFeatures-1, nFeatures)
D[:nFeatures-1,:nFeatures-1] = torch.eye(nFeatures-1)
D[:nFeatures-1,1:nFeatures] -= torch.eye(nFeatures-1)
G_ = block((( D, -torch.eye(nFeatures-1)),
(-D, -torch.eye(nFeatures-1))))
self.G = Parameter(G_)
self.s0 = Parameter(torch.ones(2*nFeatures-2)+1e-6*torch.randn(2*nFeatures-2))
G_pinv = (G_.t().mm(G_)+1e-5*torch.eye(nHidden)).inverse().mm(G_.t())
self.z0 = Parameter(-G_pinv.mv(self.s0.data)+1e-6*torch.randn(nHidden))
lam = 21.21
W_fc1, b_fc1 = self.fc1.weight, self.fc1.bias
W_fc1.data[:,:] = 1e-3*torch.randn((2*nFeatures-1, nFeatures))
# W_fc1.data[:,:] = 0.0
W_fc1.data[:nFeatures,:nFeatures] += -torch.eye(nFeatures)
# b_fc1.data[:] = torch.zeros(2*nFeatures-1)
b_fc1.data[:] = 0.0
b_fc1.data[nFeatures:2*nFeatures-1] = lam
else:
self.L = Parameter(torch.tril(torch.rand(nHidden, nHidden)))
self.G = Parameter(torch.Tensor(nineq,nHidden).uniform_(-1,1))
self.z0 = Parameter(torch.zeros(nHidden))
self.s0 = Parameter(torch.ones(nineq))
self.nFeatures = nFeatures
self.nHidden = nHidden
self.neq = neq
self.nineq = nineq
self.args = args