def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""Returns the sum along the diagonals of an array.
It computes the sum along the diagonals at ``axis1`` and ``axis2``.
Args:
a (cupy.ndarray): Array to take trace.
offset (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
axis1 (int): The first axis along which the trace is taken.
axis2 (int): The second axis along which the trace is taken.
dtype: Data type specifier of the output.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
.. seealso:: :func:`numpy.trace`
"""
# TODO(okuta): check type
return a.trace(offset, axis1, axis2, dtype, out)
python类trace()的实例源码
transformations.py 文件源码
项目:Neural-Networks-for-Inverse-Kinematics
作者: paramrajpura
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def trace(mpa, axes=(0, 1)):
"""Compute the trace of the given MPA.
If you specify axes (see partialtrace() for details), you must
ensure that the result has no physical legs anywhere.
:param mpa: MParray
:param axes: Axes for trace, ``(axis1, axis2)`` or ``(axes1, axes2, ...)``
with ``axesN=(axisN_1, axisN_2)`` or ``axesN=None``.
(default: ``(0, 1)``)
:returns: A single scalar of type ``mpa.dtype``
"""
out = partialtrace(mpa, axes)
out = out.to_array()
assert out.size == 1, 'trace must return a single scalar'
return out[None][0]
def test_sandwich(nr_sites, local_dim, rank, rgen, dtype):
mps = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mps2 = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mpo = factory.random_mpa(nr_sites, [local_dim] * 2, rank,
randstate=rgen, dtype=dtype)
mpo.canonicalize()
mpo /= mp.trace(mpo)
vec = mps.to_array().ravel()
op = mpo.to_array_global().reshape([local_dim**nr_sites] * 2)
res_arr = np.vdot(vec, np.dot(op, vec))
res_mpo = mp.inner(mps, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
vec2 = mps2.to_array().ravel()
res_arr = np.vdot(vec2, np.dot(op, vec))
res_mpo = mp.inner(mps2, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps, mps2)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def test_dipolar_tensor(self):
# initial stupid test...
###### TODO : do a reasonable test!!! ######
p = np.array([[0.,0.,0.]])
fc = np.array([[0.,0.,1.]],dtype=np.complex)
k = np.array([0.,0.,0.0])
phi= np.array([0.,])
mu = np.array([0.5,0.5,0.5])
sc = np.array([10,10,10],dtype=np.int32)
latpar = np.diag([2.,2.,2.])
r = 10.
res = lfcext.DipolarTensor(p,mu,sc,latpar,r)
np.testing.assert_array_almost_equal(res, np.zeros([3,3]))
mu = np.array([0.25,0.25,0.25])
res = lfcext.DipolarTensor(p,mu,sc,latpar,r)
np.testing.assert_array_almost_equal(np.trace(res), np.zeros([3]))
np.testing.assert_array_almost_equal(res, res.copy().T)
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E, GL,
nu, V, prior_on = False):
eps = 1E-13
p = Phi.shape[0]
n_ROI = len(sigma_J_list)
Qu = Phi.dot(Phi.T)
G_Sigma_G = np.zeros(MMT.shape)
for i in range(n_ROI):
G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T)
cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T)
inv_cov = np.linalg.inv(cov)
eigs = np.real(np.linalg.eigvals(cov)) + eps
log_det_cov = np.sum(np.log(eigs))
result = q*log_det_cov + np.trace(MMT.dot(inv_cov))
if prior_on:
inv_Q = np.linalg.inv(Qu)
#det_Q = np.linalg.det(Qu)
log_det_Q = np.sum(np.log(np.diag(Phi)**2))
result = result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q))
return result
#==============================================================================
# update both Qu and Sigma_J, gradient of Qu and Sigma J
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E, GL,
nu, V, prior_on = False):
eps = 1E-13
p = Phi.shape[0]
n_ROI = len(sigma_J_list)
Qu = Phi.dot(Phi.T)
G_Sigma_G = np.zeros(MMT.shape)
for i in range(n_ROI):
G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T)
cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T)
inv_cov = np.linalg.inv(cov)
eigs = np.real(np.linalg.eigvals(cov)) + eps
log_det_cov = np.sum(np.log(eigs))
result = q*log_det_cov + np.trace(MMT.dot(inv_cov))
if prior_on:
inv_Q = np.linalg.inv(Qu)
#det_Q = np.linalg.det(Qu)
log_det_Q = np.sum(np.log(np.diag(Phi)**2))
result = result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q))
return result
#==============================================================================
# update both Qu and Sigma_J, gradient of Qu and Sigma J
def calc_mean_var_loss(epochsInds,loss_train):
#Loss train is in dimension # epochs X #batchs
num_of_epochs = loss_train.shape[0]
#Average over the batchs
loss_train_mean = np.mean(loss_train,1)
#The diff divided by the sampled indexes
d_mean_loss_to_dt = np.sqrt(np.abs(np.diff(loss_train_mean) / np.diff(epochsInds[:])))
var_loss = []
#Go over the epochs
for epoch_index in range(num_of_epochs):
#The loss for the specpic epoch
current_loss = loss_train[epoch_index, :]
#The derivative between the batchs
current_loss_dt = np.diff(current_loss)
#The mean of his derivative
average_loss = np.mean(current_loss_dt)
current_loss_minus_mean = current_loss_dt- average_loss
#The covarince between the batchs
cov_mat = np.dot(current_loss_minus_mean[:, None], current_loss_minus_mean[None, :])
# The trace of the cov matrix
trac_cov = np.trace(cov_mat)
var_loss.append(trac_cov)
return np.array(var_loss), d_mean_loss_to_dt
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def process_fidelity(self, reference_unitary):
"""
Compute the quantum process fidelity of the estimated state with respect to a unitary
process. For non-sparse reference_unitary, this implementation this will be expensive in
higher dimensions.
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as ``rho -> other*rho*other.dag()``, can also be a superoperator or Pauli-transfer matrix.
:return: The process fidelity, a real number between 0 and 1.
:rtype: float
"""
if isinstance(reference_unitary, qt.Qobj):
if not reference_unitary.issuper or reference_unitary.superrep != "super":
sother = qt.to_super(reference_unitary)
else:
sother = reference_unitary
tm_other = self.pauli_basis.transfer_matrix(sother)
else:
tm_other = csr_matrix(reference_unitary)
dimension = self.pauli_basis.ops[0].shape[0]
return np.trace(tm_other.T * self.r_est).real / dimension ** 2
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def one_time_from_two_time(two_time_corr):
"""
This will provide the one-time correlation data from two-time
correlation data.
Parameters
----------
two_time_corr : array
matrix of two time correlation
shape (number of labels(ROI's), number of frames, number of frames)
Returns
-------
one_time_corr : array
matrix of one time correlation
shape (number of labels(ROI's), number of frames)
"""
one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2]))
for g in two_time_corr:
for j in range(two_time_corr.shape[2]):
one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2]
return one_time_corr
def one_time_from_two_time(two_time_corr):
"""
This will provide the one-time correlation data from two-time
correlation data.
Parameters
----------
two_time_corr : array
matrix of two time correlation
shape (number of labels(ROI's), number of frames, number of frames)
Returns
-------
one_time_corr : array
matrix of one time correlation
shape (number of labels(ROI's), number of frames)
"""
one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2]))
for g in two_time_corr:
for j in range(two_time_corr.shape[2]):
one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2]
return one_time_corr
def __trace_middle_dims(sys, dims, reverse=True):
"""
Get system dimensions for __trace_middle.
Args:
j (int): system to trace over.
dims(list[int]): dimensions of all subsystems.
reverse (bool): if true system-0 is right-most system tensor product.
Returns:
Tuple (dim1, dims2, dims3)
"""
dpre = dims[:sys]
dpost = dims[sys + 1:]
if reverse:
dpre, dpost = (dpost, dpre)
dim1 = int(np.prod(dpre))
dim2 = int(dims[sys])
dim3 = int(np.prod(dpost))
return (dim1, dim2, dim3)
def quadratic_loss(covariance, precision):
"""Computes ...
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
Returns
-------
Quadratic loss
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
return np.trace((np.dot(covariance, precision) - np.eye(dim))**2)
def multivariate_prior_KL(meanA, covA, meanB, covB):
# KL[ qA | qB ] = E_{qA} \log [qA / qB] where qA and aB are
# K dimensional multivariate normal distributions.
# Analytically tractable and equal to...
# 0.5 * (Tr(covB^{-1} covA) + (meanB - meanA)^T covB^{-1} (meanB - meanA)
# - K + log(det(covB)) - log (det(covA)))
K = covA.shape[0]
traceTerm = 0.5 * np.trace(np.linalg.solve(covB, covA))
delta = meanB - meanA
mahalanobisTerm = 0.5 * np.dot(delta.T, np.linalg.solve(covB, delta))
constantTerm = -0.5 * K
priorLogDeterminantTerm = 0.5*np.linalg.slogdet(covB)[1]
variationalLogDeterminantTerm = -0.5 * np.linalg.slogdet(covA)[1]
return (traceTerm +
mahalanobisTerm +
constantTerm +
priorLogDeterminantTerm +
variationalLogDeterminantTerm)
def contract_internal(self, label1, label2, index1=0, index2=0):
"""By default will contract the first index with label1 with the
first index with label2. index1 and index2 can be specified to contract
indices that are not the first with the specified label."""
label1_indices = [i for i, x in enumerate(self.labels) if x == label1]
label2_indices = [i for i, x in enumerate(self.labels) if x == label2]
index_to_contract1 = label1_indices[index1]
index_to_contract2 = label2_indices[index2]
self.data = np.trace(self.data, axis1=index_to_contract1, axis2=
index_to_contract2)
# The following removes the contracted indices from the list of labels
self.labels = [label for j, label in enumerate(self.labels)
if j not in [index_to_contract1, index_to_contract2]]
# aliases for contract_internal
def estimate_cov(self, samples, mean):
"""
Estimate the empirical covariance of the weight vectors, possibly
with regularization.
"""
d = mean.shape[0]
# Accumulate statistics
Sigma = np.zeros((d, d))
for t in range(len(samples)):
zm = samples[t] - mean
Sigma = Sigma + zm.dot(zm.T)
# Normalize factor of estimate
if self._norm_style == 'ML':
norm = 1.0/(len(samples)-1)
elif self._norm_style == 'Trace':
norm = 1.0/np.trace(Sigma)
else:
raise ValueError('Norm style {} not known'.format(self._norm_style))
Sigma = norm*Sigma
# Add diagonal loading term
self.diag_eps = 0.1*np.mean(np.abs(np.linalg.eig(Sigma)[0])) # TODO
return Sigma + self.diag_eps*self._id
def compute_gradient_totalcverr_wrt_lambda(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: D_tst_tr; 3: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
gradient_cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[3][jj])[0] # number of training samples
M_tst_tr = exp(matrix_results[2][jj]*float(-1/2)*sigmasq_z**(-1))
M_tr_tr = exp(matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1))
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
first_term = matrix_results[0][jj].dot(ZZ.dot(ZZ.dot(M_tst_tr.T)))
second_term = M_tst_tr.dot(ZZ.dot(ZZ.dot(
matrix_results[1][jj].dot(ZZ.dot(M_tst_tr.T)))))
gradient_cverr_per_fold[jj] = trace(first_term-second_term)
return 2*sum(gradient_cverr_per_fold)/float(num_sample_cv)
# lambda = exp(eta)
def compute_gradient_totalcverr_wrt_sqsigma(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: D_tst_tr; 3: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
gradient_cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[3][jj])[0]
log_M_tr_tst = matrix_results[2][jj].T*float(-1/2)*sigmasq_z**(-1)
M_tr_tst = exp(log_M_tr_tst)
log_M_tr_tr = matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1)
M_tr_tr = exp(log_M_tr_tr)
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
term_1 = matrix_results[0][jj].dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(ZZ.dot(M_tr_tst))))
term_2 = -matrix_results[0][jj].dot(ZZ.dot(M_tr_tst*(-log_M_tr_tst*sigmasq_z**(-1))))
term_3 = (sigmasq_z**(-1)*(M_tr_tst.T)*(-log_M_tr_tst.T)).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot(M_tr_tst))))
term_4 = -(M_tr_tst.T).dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(ZZ.dot(matrix_results[1][jj].dot(
ZZ.dot(M_tr_tst))))))
term_5 = -(M_tr_tst.T).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(
ZZ.dot(M_tr_tst))))))
term_6 = (M_tr_tst.T).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot(M_tr_tst*sigmasq_z**(-1)*(-log_M_tr_tst)))))
gradient_cverr_per_fold[jj] = trace(2*term_1 + 2*term_2 + term_3 + term_4 + term_5 + term_6)
return sum(gradient_cverr_per_fold)/float(num_sample_cv)
def compute_totalcverr(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: K_tst_tst; 3: D_tst_tr; 4: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[4][jj])[0] # number of training samples
M_tst_tr = exp(matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1))
M_tr_tr = exp(matrix_results[4][jj]*float(-1/2)*sigmasq_z**(-1))
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
first_term = matrix_results[2][jj]
second_term = - matrix_results[0][jj].dot(ZZ.dot(M_tst_tr.T))
third_term = np.transpose(second_term)
fourth_term = M_tst_tr.dot(ZZ.dot(
matrix_results[1][jj].dot(ZZ.dot(M_tst_tr.T))))
cverr_per_fold[jj] = trace(first_term + second_term + third_term + fourth_term)
return sum(cverr_per_fold)/float(num_sample_cv)
def covariance_distance_from_matrices(m1, m2, mul_factor=1):
"""
Covariance distance between matrices m1 and m2, defined as
d = factor * (1 - (trace(m1 * m2)) / (norm_fro(m1) + norm_fro(m2)))
:param m1: matrix
:param m2: matrix
:param mul_factor: multiplicative factor for the formula, it equals to the maximal value the distance can reach
:return: mul_factor * (1 - (np.trace(m1.dot(m2))) / (np.linalg.norm(m1) + np.linalg.norm(m2)))
"""
if np.nan not in m1 and np.nan not in m2:
return \
mul_factor * (1 - (np.trace(m1.dot(m2)) / (np.linalg.norm(m1, ord='fro') * np.linalg.norm(m2, ord='fro'))))
else:
return np.nan
# --- global distances: (segm, segm) |-> real
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def test_sum_bit0(self):
n = 32
x = np.random.random(n)
# x = np.arange(n).astype(np.float64)
x_gpu = drv.to_device(x)
trace(
x_gpu, np.int32(0), block=(
n, 1, 1), grid=(
1, 1, 1), shared=8 * 128)
x2 = drv.from_device_like(x_gpu, x)
print(x)
print(x2)
assert np.allclose(x2[1], np.sum(x[::2]))
assert np.allclose(x2[0], np.sum(x[1::2]))
def test_sum_bit1(self):
n = 32
x = np.random.random(n)
# x = np.arange(n).astype(np.float64)
x_gpu = drv.to_device(x)
trace(
x_gpu, np.int32(1), block=(
n, 1, 1), grid=(
1, 1, 1), shared=8 * 128)
x2 = drv.from_device_like(x_gpu, x)
print(x)
print(x2)
assert np.allclose(x2[1], np.sum(x[::4]) + np.sum(x[1::4]))
assert np.allclose(x2[0], np.sum(x[2::4]) + np.sum(x[3::4]))
def test_preserve_trace_ground_state(self, dm):
dm.hadamard(2)
assert np.allclose(dm.trace(), 1)
dm.hadamard(4)
assert np.allclose(dm.trace(), 1)
dm.hadamard(0)
assert np.allclose(dm.trace(), 1)
# @pytest.mark.skip
# def test_squares_to_one(self, dm_random):
# dm = dm_random
# a0 = dm.to_array()
# dm.hadamard(4)
# dm.hadamard(4)
# # dm.hadamard(2)
# # dm.hadamard(2)
# # dm.hadamard(0)
# # dm.hadamard(0)
# a1 = dm.to_array()
# assert np.allclose(np.triu(a0), np.triu(a1))
def norm_fro_err(X, W, H, norm_X):
""" Compute the approximation error in Frobeinus norm
norm(X - W.dot(H.T)) is efficiently computed based on trace() expansion
when W and H are thin.
Parameters
----------
X : numpy.array or scipy.sparse matrix, shape (m,n)
W : numpy.array, shape (m,k)
H : numpy.array, shape (n,k)
norm_X : precomputed norm of X
Returns
-------
float
"""
sum_squared = norm_X * norm_X - 2 * np.trace(H.T.dot(X.T.dot(W))) \
+ np.trace((W.T.dot(W)).dot(H.T.dot(H)))
return math.sqrt(np.maximum(sum_squared, 0))