def testMatmulBatchMatrix(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (k, 5)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
matrix = math_ops.matmul(chol, chol, adjoint_b=True)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
expected = math_ops.matmul(matrix, x)
self.assertEqual(expected.get_shape(), operator.matmul(x).get_shape())
self.assertAllClose(expected.eval(), operator.matmul(x).eval())
python类matmul()的实例源码
operator_pd_cholesky_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
operator_pd_cholesky_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def testMatmulBatchMatrixWithTranspose(self):
with self.test_session():
batch_shape = (2, 3)
for k in [1, 4]:
x_shape = batch_shape + (5, k)
x = self._rng.rand(*x_shape)
chol_shape = batch_shape + (k, k)
chol = self._random_cholesky_array(chol_shape)
matrix = math_ops.matmul(chol, chol, adjoint_b=True)
operator = operator_pd_cholesky.OperatorPDCholesky(chol)
operator_times_x = operator.matmul(x, transpose_x=True)
# tf.batch_matmul is defined x * y, so "y" is on the right, not "x".
expected = math_ops.matmul(matrix, x, adjoint_b=True)
self.assertEqual(expected.get_shape(), operator_times_x.get_shape())
self.assertAllClose(expected.eval(), operator_times_x.eval())
operator_pd_vdvt_update_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _updated_mat(self, mat, v, diag):
# Get dense matrix defined by its square root, which is an update of `mat`:
# A = (mat + v D v^T) (mat + v D v^T)^T
# D is the diagonal matrix with `diag` on the diagonal.
# If diag is None, then it defaults to the identity matrix, so DV^T = V^T
if diag is None:
diag_vt = array_ops.matrix_transpose(v)
else:
diag_mat = array_ops.matrix_diag(diag)
diag_vt = math_ops.matmul(diag_mat, v, adjoint_b=True)
v_diag_vt = math_ops.matmul(v, diag_vt)
sqrt = mat + v_diag_vt
a = math_ops.matmul(sqrt, sqrt, adjoint_b=True)
return a.eval()
def _sqrt_matmul(self, x, transpose_x=False):
v = self._v
m = self._operator
d = self._diag_operator
# The operators call the appropriate matmul/batch_matmul automatically. We
# cannot override.
# matmul is defined as: a * b, so transpose_a, transpose_b are used.
# transpose the left and right.
mx = m.matmul(x, transpose_x=transpose_x)
vt_x = math_ops.matmul(v, x, transpose_a=True, transpose_b=transpose_x)
d_vt_x = d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(v, d_vt_x)
return mx + v_d_vt_x
def _sqrt_solve(self, rhs):
# Recall the square root of this operator is M + VDV^T.
# The Woodbury formula gives:
# (M + VDV^T)^{-1}
# = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1}
# = M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
# where C is the capacitance matrix.
# TODO(jvdillon) Determine if recursively applying rank-1 updates is more
# efficient. May not be possible because a general n x n matrix can be
# represeneted as n rank-1 updates, and solving with this matrix is always
# done in O(n^3) time.
m = self._operator
v = self._v
cchol = self._chol_capacitance(batch_mode=False)
# The operators will use batch/singleton mode automatically. We don't
# override.
# M^{-1} rhs
minv_rhs = m.solve(rhs)
# V^T M^{-1} rhs
vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True)
# C^{-1} V^T M^{-1} rhs
cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs)
# V C^{-1} V^T M^{-1} rhs
v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs)
# M^{-1} V C^{-1} V^T M^{-1} rhs
minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs)
# M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
return minv_rhs - minv_v_cinv_vt_minv_rhs
def _sqrt_to_dense(self):
v = self._v
d = self._diag_operator
m = self._operator
d_vt = d.matmul(v, transpose_x=True)
# Batch op won't be efficient for singletons. Currently we don't break
# to_dense into batch/singleton methods.
v_d_vt = math_ops.batch_matmul(v, d_vt)
m_plus_v_d_vt = m.to_dense() + v_d_vt
return m_plus_v_d_vt
def _matmul(self, x, transpose_x=False):
# tf.matmul is defined a * b.
chol = array_ops.matrix_band_part(self._chol, -1, 0)
chol_times_x = math_ops.matmul(
chol, x, transpose_a=True, transpose_b=transpose_x)
return math_ops.matmul(chol, chol_times_x)
def _sqrt_matmul(self, x, transpose_x=False):
chol = array_ops.matrix_band_part(self._chol, -1, 0)
# tf.matmul is defined a * b
return math_ops.matmul(chol, x, transpose_b=transpose_x)
def _variance(self):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _batch_sqrt_matmul(self, x, transpose_x=False):
v = self._v
m = self._operator
d = self._diag_operator
# The operators call the appropriate matmul/batch_matmul automatically.
# We cannot override.
# batch_matmul is defined as: x * y, so adjoint_a and adjoint_b are the
# ways to transpose the left and right.
mx = m.matmul(x, transpose_x=transpose_x)
vt_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=transpose_x)
d_vt_x = d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(v, d_vt_x)
return mx + v_d_vt_x
def _sqrt_matmul(self, x, transpose_x=False):
v = self._v
m = self._operator
d = self._diag_operator
# The operators call the appropriate matmul/batch_matmul automatically. We
# cannot override.
# matmul is defined as: a * b, so transpose_a, transpose_b are used.
# transpose the left and right.
mx = m.matmul(x, transpose_x=transpose_x)
vt_x = math_ops.matmul(v, x, transpose_a=True, transpose_b=transpose_x)
d_vt_x = d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(v, d_vt_x)
return mx + v_d_vt_x
def _sqrt_solve(self, rhs):
# Recall the square root of this operator is M + VDV^T.
# The Woodbury formula gives:
# (M + VDV^T)^{-1}
# = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1}
# = M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
# where C is the capacitance matrix.
# TODO(jvdillon) Determine if recursively applying rank-1 updates is more
# efficient. May not be possible because a general n x n matrix can be
# represeneted as n rank-1 updates, and solving with this matrix is always
# done in O(n^3) time.
m = self._operator
v = self._v
cchol = self._chol_capacitance(batch_mode=False)
# The operators will use batch/singleton mode automatically. We don't
# override.
# M^{-1} rhs
minv_rhs = m.solve(rhs)
# V^T M^{-1} rhs
vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True)
# C^{-1} V^T M^{-1} rhs
cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs)
# V C^{-1} V^T M^{-1} rhs
v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs)
# M^{-1} V C^{-1} V^T M^{-1} rhs
minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs)
# M^{-1} - M^{-1} V C^{-1} V^T M^{-1}
return minv_rhs - minv_v_cinv_vt_minv_rhs
def _to_dense(self):
sqrt = self.sqrt_to_dense()
return math_ops.matmul(sqrt, sqrt, adjoint_b=True)
def _sqrt_to_dense(self):
v = self._v
d = self._diag_operator
m = self._operator
d_vt = d.matmul(v, transpose_x=True)
# Batch op won't be efficient for singletons. Currently we don't break
# to_dense into batch/singleton methods.
v_d_vt = math_ops.matmul(v, d_vt)
m_plus_v_d_vt = m.to_dense() + v_d_vt
return m_plus_v_d_vt
def _matmul(self, x, transpose_x=False):
# tf.matmul is defined a * b.
chol = array_ops.matrix_band_part(self._chol, -1, 0)
chol_times_x = math_ops.matmul(
chol, x, transpose_a=True, transpose_b=transpose_x)
return math_ops.matmul(chol, chol_times_x)
def _batch_matmul(self, x, transpose_x=False):
# tf.matmul is defined x * y, so "y" is on the right, not "x".
chol = array_ops.matrix_band_part(self._chol, -1, 0)
chol_times_x = math_ops.matmul(
chol, x, adjoint_a=True, adjoint_b=transpose_x)
return math_ops.matmul(chol, chol_times_x)
def _sqrt_matmul(self, x, transpose_x=False):
chol = array_ops.matrix_band_part(self._chol, -1, 0)
# tf.matmul is defined a * b
return math_ops.matmul(chol, x, adjoint_b=transpose_x)
def _to_dense(self):
chol = array_ops.matrix_band_part(self._chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True)
def _variance(self):
p = self.p * array_ops.expand_dims(array_ops.ones_like(self.n), -1)
outer_prod = math_ops.matmul(
array_ops.expand_dims(self._mean_val, -1), array_ops.expand_dims(p, -2))
return array_ops.matrix_set_diag(-outer_prod,
self._mean_val - self._mean_val * p)
def _forward(self, x):
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = math_ops.matmul(self.scale, x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
x += self.shift
return x