def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100):
"""
infinitesimal model with snp-specific heritability derived from annotation
used as the initial values for MCMC of non-infinitesimal model
"""
num_betas = len(beta_hats)
updated_betas = sp.empty(num_betas)
m = len(beta_hats)
for i, wi in enumerate(range(0, num_betas, ld_window_size)):
start_i = wi
stop_i = min(num_betas, wi + ld_window_size)
curr_window_size = stop_i - start_i
Li = 1.0/pr_sigi[start_i: stop_i]
D = reference_ld_mats[i]
A = (n/(1))*D + sp.diag(Li)
A_inv = linalg.pinv(A)
updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i]) # Adjust the beta_hats
return updated_betas
python类diag()的实例源码
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def _compute_process_and_covariance_matrices(self, dt):
"""Computes the transition and covariance matrix of the process model and measurement model.
Args:
dt (float): Timestep of the discrete transition.
Returns:
F (numpy.ndarray): Transition matrix.
Q (numpy.ndarray): Process covariance matrix.
R (numpy.ndarray): Measurement covariance matrix.
"""
F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]]))
self.process_matrix = F
q_p = self.process_covariance_position
q_v = self.process_covariance_velocity
Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt
r = self.measurement_covariance
R = r * np.eye(4)
self.process_covariance = Q
self.measurement_covariance = R
return F, Q, R
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {'Overall Acc: \t': acc,
'Mean Acc : \t': acc_cls,
'FreqW Acc : \t': fwavacc,
'Mean IoU : \t': mean_iu,}, cls_iu
def scale_variance(Theta, eps):
"""Allows to scale a Precision Matrix such that its
corresponding covariance has unit variance
Parameters
----------
Theta: ndarray
Precision Matrix
eps: float
values to threshold to zero
Returns
-------
Theta: ndarray
Precision of rescaled Sigma
Sigma: ndarray
Sigma with ones on diagonal
"""
Sigma = np.linalg.inv(Theta)
V = np.diag(np.sqrt(np.diag(Sigma) ** -1))
Sigma = V.dot(Sigma).dot(V.T) # = VSV
Theta = np.linalg.inv(Sigma)
Theta[np.abs(Theta) <= eps] = 0.
return Theta, Sigma
def sampson_error(F, pts1, pts2):
"""
Computes the sampson error for F, and points pts1, pts2. Sampson
error is the first order approximation to the geometric error.
Remember that this is a squared error.
(x'^{T} * F * x)^2
-----------------
(F * x)_1^2 + (F * x)_2^2 + (F^T * x')_1^2 + (F^T * x')_2^2
where (F * x)_i^2 is the square of the i-th entry of the vector Fx
"""
x1, x2 = unproject_points(pts1).T, unproject_points(pts2).T
Fx1 = np.dot(F, x1)
Fx2 = np.dot(F, x2)
# Sampson distance as error measure
denom = Fx1[0]**2 + Fx1[1]**2 + Fx2[0]**2 + Fx2[1]**2
return ( np.diag(x1.T.dot(Fx2)) )**2 / denom
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = self.condition ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim),
diag(((self.condition / 10.)**.5) ** linspace(0, 1, dim)))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
K = np.arange(0, 12)
self.aK = np.reshape(0.5 ** K, (1, 12))
self.bK = np.reshape(3. ** K, (1, 12))
self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed))
self.rotation = compute_rotation(self.rseed + 1e6, dim)
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))
# decouple scaling from function definition
self.linearTF = dot(self.linearTF, self.rotation)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
# self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
transformations.py 文件源码
项目:Neural-Networks-for-Inverse-Kinematics
作者: paramrajpura
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def distribution_parameters(self, parameter_name):
if parameter_name=='trend':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
mean = dot(inv(eye(self.size)+E),self.data)
cov = (self.parameters.list['sigma2'].current_value)*inv(eye(self.size)+E)
return {'mean' : mean, 'cov' : cov}
elif parameter_name=='sigma2':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
pos = self.size
loc = 0
scale = 0.5*dot((self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)).T,(self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)))+0.5*dot(dot(self.parameters.list['trend'].current_value.T,E),self.parameters.list['trend'].current_value)
elif parameter_name=='lambda2':
pos = self.size-self.total_variation_order-1+self.alpha
loc = 0.5*(norm(dot(self.derivative_matrix,self.parameters.list['trend'].current_value),ord=1))/self.parameters.list['sigma2'].current_value+self.rho
scale = 1
elif parameter_name==str('omega'):
pos = [sqrt(((self.parameters.list['lambda2'].current_value**2)*self.parameters.list['sigma2'].current_value)/(dj**2)) for dj in dot(self.derivative_matrix,self.parameters.list['trend'].current_value)]
loc = 0
scale = self.parameters.list['lambda2'].current_value**2
return {'pos' : pos, 'loc' : loc, 'scale' : scale}
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def test_psi(adjcube):
"""Tests retrieval of the wave functions and eigenvalues.
"""
from pydft.bases.fourier import psi, O, H
cell = adjcube
V = QHO(cell)
W = W4(cell)
Ns = W.shape[1]
Psi, epsilon = psi(V, W, cell, forceR=False)
#Make sure that the eigenvalues are real.
assert np.sum(np.imag(epsilon)) < 1e-13
checkI = np.dot(Psi.conjugate().T, O(Psi, cell))
assert abs(np.sum(np.diag(checkI))-Ns) < 1e-13 # Should be the identity
assert np.abs(np.sum(checkI)-Ns) < 1e-13
checkD = np.dot(Psi.conjugate().T, H(V, Psi, cell))
diagsum = np.sum(np.diag(checkD))
assert np.abs(np.sum(checkD)-diagsum) < 1e-12 # Should be diagonal
# Should match the diagonal elements of previous matrix
assert np.allclose(np.diag(checkD), epsilon)
def computePolarVecs(self,karg=False):
N = len(self.times)
L = np.reshape(self.L,(3,N))
if karg is False:
A = self.computeRotMatrix()
elif np.size(karg) is 3:
A = self.computeRotMatrix(karg)
elif np.size(karg) is 9:
A = karg
q = np.zeros((6,N))
for pp in range(0,N):
Lpp = np.diag(L[:,pp])
p = np.dot(A,np.dot(Lpp,A.T))
q[:,pp] = np.r_[p[:,0],p[[1,2],1],p[2,2]]
return q
def toa_normalize(x0, y0):
xdim = x0.shape[0]
m = x0.shape[1]
n = x0.shape[1]
t = -x0[:, 1]
x = x0 + np.tile(t, (1, m))
y = y0 + np.tile(t, (1, n))
qr_a = x[:, 2:(1 + xdim)]
q, r = scipy.linalg.qr(qr_a)
x = (q.conj().T) * x
y = (q.conj().T) * y
M = np.diag(sign(np.diag(qr_a)))
x1 = M * x
y1 = M * y
return x1, y1
matrix_factorization.py 文件源码
项目:probabilistic-matrix-factorization
作者: aki-nishimura
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
# Params:
# J - column indices
nnz_i = len(J)
residual_i = y_i - mu0 - c[J]
prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
post_Phi_i = prior_Phi + \
np.dot(v_T.T,
np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T) # Weighted sum of v_j * v_j.T
post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
C, lower = scipy.linalg.cho_factor(post_Phi_i)
post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
# Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
lower=lower)
ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
r_i = ru_i[0]
u_i = ru_i[1:]
return r_i, u_i
matrix_factorization.py 文件源码
项目:probabilistic-matrix-factorization
作者: aki-nishimura
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):
prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
nnz_j = len(I)
residual_j = y_j - mu0 - r[I]
u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
post_Phi_j = prior_Phi + \
np.dot(u_T.T,
np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T) # Weighted sum of u_i * u_i.T
post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
C, lower = scipy.linalg.cho_factor(post_Phi_j)
post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
# Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
lower=lower)
cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
c_j = cv_j[0]
v_j = cv_j[1:]
return c_j, v_j
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def predict_log_likelihood(self, paths, latents):
if self.recurrent:
observations = np.array([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.array([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time)
else:
observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate(latents, axis=0)
if self.noisify_traj_coef:
noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
obs_actions += noise
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...
def lowb_mutual(self, paths, times=(0, None)):
if self.recurrent:
observations = np.array([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.array([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2)
latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths])
else:
observations = np.concatenate([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate([p['agent_infos']["latents"][times[0]:times[1]] for p in paths])
if self.noisify_traj_coef:
obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in
return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def gaussianWeight(kernelSize, even=False):
if even == True:
weight = np.ones([kernelSize,kernelSize])
weight = weight.reshape((1,kernelSize**2))
weight = np.array(weight)[0]
weight = np.diag(weight)
return weight
SIGMA = 1 #the standard deviation of your normal curve
CORRELATION = 0 #see wiki for multivariate normal distributions
weight = np.zeros([kernelSize,kernelSize])
cpt = kernelSize%2 + kernelSize//2 #gets the center point
for i in range(len(weight)):
for j in range(len(weight)):
ptx = i + 1
pty = j + 1
weight[i,j] = 1 / (2*np.pi*SIGMA**2) / (1-CORRELATION**2)**.5*np.exp(-1/(2*(1-CORRELATION**2))*((ptx-cpt)**2+(pty-cpt)**2)/(SIGMA**2))
# weight[i,j] = 1/SIGMA/(2*np.pi)**.5*np.exp(-(pt-cpt)**2/(2*SIGMA**2))
weight = weight.reshape((1,kernelSize**2))
weight = np.array(weight)[0] #convert to a 1D array
weight = np.diag(weight) #convert to n**2xn**2 diagonal matrix
return weight
# return np.diag(weight)
def get_sigma(self):
"""Returns the co-variance matrix of the spline
Return:
(np.ndarray):
Co-variance matrix for the EOS shape is (nxn) where n is the dof
of the model
"""
sigma = self.get_option('spline_sigma')
return np.diag((sigma * self.prior.get_dof())**2)
# alpha = 3/self.shape()\
# * np.log(self.get_option('spline_max')
# /self.get_option('spline_min'))
# beta = np.log(1 + self.get_option('spline_sigma'))
#return self.gram
def test_model_pq(self):
"""Test the model PQ matrix generation
"""
new_model = self.bayes.update(models={
'simp': self.model.update_dof([4, 2])})
P, q = new_model._get_model_pq()
epsilon = np.array([2, 1])
sigma = inv(np.diag(np.ones(2)))
P_true = sigma
q_true = -np.dot(epsilon, sigma)
npt.assert_array_almost_equal(P, P_true, decimal=8)
npt.assert_array_almost_equal(q, q_true, decimal=8)