def initialize_estimate(self, estimate_id, initial_state):
"""Initialize a state estimate with identity covariance.
The initial estimate is saved in the `self.estimates` dictionary.
The timestamp in the `self.estimate_times` is updated.
Args:
estimate_id (int): ID of the tracked target.
initial_state (int): Initial state of the estimate.
Returns:
X (numpy.ndarray): Solution of equation.
"""
x = initial_state
P = self.initial_position_covariance * np.eye(6)
P[3:6, 3:6] = 0
estimate = UWBTracker.StateEstimate(x, P)
self.estimates[estimate_id] = estimate
self.estimate_times[estimate_id] = rospy.get_time()
self.ikf_prev_outlier_flags[estimate_id] = False
self.ikf_outlier_counts[estimate_id] = 0
python类eye()的实例源码
def _compute_process_and_covariance_matrices(self, dt):
"""Computes the transition and covariance matrix of the process model and measurement model.
Args:
dt (float): Timestep of the discrete transition.
Returns:
F (numpy.ndarray): Transition matrix.
Q (numpy.ndarray): Process covariance matrix.
R (numpy.ndarray): Measurement covariance matrix.
"""
F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]]))
self.process_matrix = F
q_p = self.process_covariance_position
q_v = self.process_covariance_velocity
Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt
r = self.measurement_covariance
R = r * np.eye(4)
self.process_covariance = Q
self.measurement_covariance = R
return F, Q, R
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def infExact_scipy_post(self, K, covars, y, sig2e, fixedEffects):
n = y.shape[0]
#mean vector
m = covars.dot(fixedEffects)
if (K.shape[1] < K.shape[0]): K_true = K.dot(K.T)
else: K_true = K
if sig2e<1e-6:
L = la.cholesky(K_true + sig2e*np.eye(n), overwrite_a=True, check_finite=False) #Cholesky factor of covariance with noise
sl = 1
pL = -self.solveChol(L, np.eye(n)) #L = -inv(K+inv(sW^2))
else:
L = la.cholesky(K_true/sig2e + np.eye(n), overwrite_a=True, check_finite=False) #Cholesky factor of B
sl = sig2e
pL = L #L = chol(eye(n)+sW*sW'.*K)
alpha = self.solveChol(L, y-m, overwrite_b=False) / sl
post = dict([])
post['alpha'] = alpha #return the posterior parameters
post['sW'] = np.ones(n) / np.sqrt(sig2e) #sqrt of noise precision vector
post['L'] = pL
return post
def BorthogonalityTest(B, U):
"""
Test the frobenious norm of U^TBU - I_k
"""
BU = np.zeros(U.shape)
Bu = Vector()
u = Vector()
B.init_vector(Bu,0)
B.init_vector(u,1)
nvec = U.shape[1]
for i in range(0,nvec):
u.set_local(U[:,i])
B.mult(u,Bu)
BU[:,i] = Bu.get_local()
UtBU = np.dot(U.T, BU)
err = UtBU - np.eye(nvec, dtype=UtBU.dtype)
print("|| UtBU - I ||_F = ", np.linalg.norm(err, 'fro') )
def KeyGen(**kwargs):
'''
Appendix B of BLISS paper
m_bar = m + n
o/p:
A: Public Key n x m' numpy array
S: Secret Key m'x n numpy array
'''
q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha']
Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m)
S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq)
S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q)
A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q)
#return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar
return util.matrix_to_Zq(A, 2*q), S
def test():
# Classical SIS parameters
n, m, alpha, q = 128, 872, 1, 114356107
kappa = 20
#Discrete Gaussian Parameters
sd = 300
eta = 1.2
A, S = KeyGen(q = q,n = n,m = m,alpha = alpha)
#print np.array(np.matmul(A,S) - q*np.eye(n),dtype=float)/(2*q) #to test AS = q mod(2q)
z, c = Sign(msg = "Hello Bob",A = A,S = S,m = m,n = n,sd = sd,q = q,M = 3.0,kappa = kappa)
print z
print c
print Verify(msg = "Hello Bob", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
print Verify(msg = "Hello Robert", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
print Verify(msg = "Hello Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
print Verify(msg = "Hola Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
def __init__(self, X, Y, R=None, t=None, s=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0):
if X.shape[1] != Y.shape[1]:
raise 'Both point clouds must have the same number of dimensions!'
self.X = X
self.Y = Y
self.TY = Y
(self.N, self.D) = self.X.shape
(self.M, _) = self.Y.shape
self.R = np.eye(self.D) if R is None else R
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.s = 1 if s is None else s
self.sigma2 = sigma2
self.iteration = 0
self.maxIterations = maxIterations
self.tolerance = tolerance
self.w = w
self.q = 0
self.err = 0
def __init__(self, X, Y, B=None, t=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0):
if X.shape[1] != Y.shape[1]:
raise 'Both point clouds must have the same number of dimensions!'
self.X = X
self.Y = Y
self.TY = Y
(self.N, self.D) = self.X.shape
(self.M, _) = self.Y.shape
self.B = np.eye(self.D) if B is None else B
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.sigma2 = sigma2
self.iteration = 0
self.maxIterations = maxIterations
self.tolerance = tolerance
self.w = w
self.q = 0
self.err = 0
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.summary.scalar('mat loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.scalar_summary('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.scalar_summary('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
def get_loss(l_pred, seg_pred, label, seg, weight, end_points):
per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label)
label_loss = tf.reduce_mean(per_instance_label_loss)
# size of seg_pred is batch_size x point_num x part_cat_num
# size of seg is batch_size x point_num
per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
seg_loss = tf.reduce_mean(per_instance_seg_loss)
per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3
return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
def refit_model(self):
"""Learns a new surrogate model using the data observed so far.
"""
# only fit the model if there is data for it.
if len(self.known_models) > 0:
self._build_feature_maps(self.known_models, self.ngram_maxlen, self.thres)
X = sp.vstack([ self._compute_features(mdl)
for mdl in self.known_models], "csr")
y = np.array(self.known_scores, dtype='float64')
#A = np.dot(X.T, X) + lamb * np.eye(X.shape[1])
#b = np.dot(X.T, y)
self.surr_model = lm.Ridge(self.lamb_ridge)
self.surr_model.fit(X, y)
# NOTE: if the search space has holes, it break. needs try/except module.
def covariance_matrix(self):
if self._debug:
# return None
ka = self.k_active
if ka > 0:
C = np.eye(self.N) + np.dot(self.V[:ka].T * self.S[:ka],
self.V[:ka])
C = (C * self.D).T * self.D
else:
C = np.diag(self.D**2)
C *= self.sigma**2
else:
# Fake Covariance Matrix for Speed
C = np.ones(1)
self.B = np.ones(1)
return C
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'):
num_inputs=df_dim*4
theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0))
W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0))
W = tf.reshape(W,[-1,num_kernels*dim_per_kernel])
x = input
x=tf.reshape(x, [batchsize,num_inputs])
activation = tf.matmul(x, W)
activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel])
abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2),
1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1))
f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif))
print(f.get_shape())
print(input.get_shape())
return tf.concat(1,[x, f])
def _build_policy(env, predictor, epsilon):
eye = np.eye(env.num_states)
q_values = predictor.predict(
{str(i): eye[i]
for i in range(env.num_states)}
)
policy_vector = [
env.ACTIONS[np.argmax([q_values[action][i] for action in env.ACTIONS])]
for i in range(env.num_states)
]
def policy(state) -> str:
if np.random.random() < epsilon:
return np.random.choice(env.ACTIONS)
else:
return policy_vector[state]
return policy
def export_data(prediction_dir, nii_image_dir, tfrecords_dir, export_dir, transformation=None):
for file_path in os.listdir(prediction_dir):
name, prediction, probability = read_prediction_file(os.path.join(prediction_dir, file_path))
if transformation:
image, ground_truth = get_original_image(os.path.join(tfrecords_dir, name + '.tfrecord'), False)
prediction = transformation.transform_image(prediction, probability, image)
# build cv_predictions .nii image
img = nib.Nifti1Image(prediction, np.eye(4))
img.set_data_dtype(dtype=np.uint8)
path = os.path.join(nii_image_dir, name)
adc_name = next(l for l in os.listdir(path) if 'MR_ADC' in l)
export_image = nib.load(os.path.join(nii_image_dir, name, adc_name, adc_name + '.nii'))
i = export_image.get_data()
i[:] = img.get_data()
# set name to specification and export
_id = next(l for l in os.listdir(path) if 'MR_MTT' in l).split('.')[-1]
export_path = os.path.join(export_dir, 'SMIR.' + name + '.' + _id + '.nii')
nib.save(export_image, os.path.join(export_path))
def eye(sites, ldim):
"""Returns a MPA representing the identity matrix
:param sites: Number of sites
:param ldim: Int-like local dimension or iterable of local dimensions
:returns: Representation of the identity matrix as MPA
>>> I = eye(4, 2)
>>> I.ranks, I.shape
((1, 1, 1), ((2, 2), (2, 2), (2, 2), (2, 2)))
>>> I = eye(3, (3, 4, 5))
>>> I.shape
((3, 3), (4, 4), (5, 5))
"""
if isinstance(ldim, collections.Iterable):
ldim = tuple(ldim)
assert len(ldim) == sites
else:
ldim = it.repeat(ldim, sites)
return mp.MPArray.from_kron(map(np.eye, ldim))
def _embed_ltens_identity(mpa, embed_tensor=None):
"""Embed with identity matrices by default.
:param embed_tensor: If the MPAs do not have two physical legs or
have non-square physical dimensions, you must provide an
embedding tensor. The default is to use the square identity
matrix, assuming that the size of the two physical legs is the
same at each site.
:returns: `embed_tensor` with one size-one virtual leg added at each
end.
"""
if embed_tensor is None:
pdims = mpa.shape[0]
assert len(pdims) == 2 and pdims[0] == pdims[1], (
"For ndims != 2 or non-square shape, you must supply a tensor"
"for embedding")
embed_tensor = np.eye(pdims[0])
embed_ltens = embed_tensor[None, ..., None]
return embed_ltens
def test_povm_normalization_ic(dim):
for name, constructor in ALL_POVMS.items():
# Check that the POVM is normalized: elements must sum to the identity
current_povm = constructor(dim)
element_sum = sum(iter(current_povm))
assert_array_almost_equal(element_sum, np.eye(dim))
# Check that the attribute that says whether the POVM is IC is correct.
linear_inversion_recons = np.dot(current_povm.linear_inversion_map,
current_povm.probability_map)
if current_povm.informationally_complete:
assert_array_almost_equal(
linear_inversion_recons, np.eye(dim**2),
err_msg='POVM {} is not informationally complete'.format(name))
else:
assert np.abs(linear_inversion_recons - np.eye(dim**2)).max() > 0.1, \
'POVM {} is informationally complete'.format(name)
def test_povm_ic_mpa(nr_sites, local_dim, rank, rgen):
# Check that the tensor product of the PauliGen POVM is IC.
paulis = povm.pauli_povm(local_dim)
inv_map = mp_from_array_repeat(paulis.linear_inversion_map, nr_sites)
probab_map = mp_from_array_repeat(paulis.probability_map, nr_sites)
reconstruction_map = mp.dot(inv_map, probab_map)
eye = factory.eye(nr_sites, local_dim**2)
assert mp.norm(reconstruction_map - eye) < 1e-5
# Check linear inversion for a particular example MPA.
# Linear inversion works for arbitrary matrices, not only for states,
# so we test it for an arbitrary MPA.
# Normalize, otherwise the absolute error check below will not work.
mpa = factory.random_mpa(nr_sites, local_dim**2, rank,
dtype=np.complex_, randstate=rgen, normalized=True)
probabs = mp.dot(probab_map, mpa)
recons = mp.dot(inv_map, probabs)
assert mp.norm(recons - mpa) < 1e-6
def test_randomized_svd(rows, cols, rank, dtype, transpose, n_iter, target_gen,
rgen):
rank = min(rows, cols) - 2 if rank is 'fullrank' else rank
A = target_gen(rows, cols, rank=rank, randstate=rgen, dtype=dtype)
U_ref, s_ref, V_ref = utils.truncated_svd(A, k=rank)
U, s, V = em.randomized_svd(A, rank, transpose=transpose, randstate=rgen,
n_iter=n_iter)
error_U = np.abs(U.conj().T.dot(U_ref)) - np.eye(rank)
assert_allclose(np.linalg.norm(error_U), 0, atol=1e-3)
error_V = np.abs(V.dot(V_ref.conj().T)) - np.eye(rank)
assert_allclose(np.linalg.norm(error_V), 0, atol=1e-3)
assert_allclose(s.ravel() - s_ref, 0, atol=1e-3)
# Check that singular values are returned in descending order
assert_array_equal(s, np.sort(s)[::-1])
def hessian(self, x, d=None):
"""
Computes Hessian matrix
"""
d = calc_distances(x) if d is None else d
if d.ndim == 1: d = squareform(d)
H = np.zeros((3*len(x), 3*len(x)))
n = self.n
for i in range(len(x)):
for j in range(len(x)):
if j == i: continue
dx = x[i]-x[j]
r = d[i,j]
h = n / r**(0.5*n+2) * ((n+2) * np.multiply.outer(dx,dx) - np.eye(3) * r)
H[3*i:3*(i+1), 3*j:3*(j+1)] = -h
H[3*i:3*(i+1), 3*i:3*(i+1)] += h
return H
def apply_gate(self,gate,on_qubit_name):
on_qubit=self.qubits.get_quantum_register_containing(on_qubit_name)
if len(on_qubit.get_noop()) > 0:
print "NOTE this qubit has been measured previously, there should be no more gates allowed but we are reverting that measurement for consistency with IBM's language"
on_qubit.set_state(on_qubit.get_noop())
on_qubit.set_noop([])
if not on_qubit.is_entangled():
if on_qubit.get_num_qubits()!=1:
raise Exception("This qubit is not marked as entangled but it has an entangled state")
on_qubit.set_state(gate*on_qubit.get_state())
else:
if not on_qubit.get_num_qubits()>1:
raise Exception("This qubit is marked as entangled but it does not have an entangled state")
n_entangled=len(on_qubit.get_entangled())
apply_gate_to_qubit_idx=[qb.name for qb in on_qubit.get_entangled()].index(on_qubit_name)
if apply_gate_to_qubit_idx==0:
entangled_gate=gate
else:
entangled_gate=Gate.eye
for i in range(1,n_entangled):
if apply_gate_to_qubit_idx==i:
entangled_gate=np.kron(entangled_gate,gate)
else:
entangled_gate=np.kron(entangled_gate,Gate.eye)
on_qubit.set_state(entangled_gate*on_qubit.get_state())
def _matrix_inverse(self, matrix):
"""
Computes inverse of a matrix.
"""
matrix = np.array(matrix)
n_features = matrix.shape[0]
rank = np.linalg.matrix_rank(matrix)
if rank == n_features:
return np.linalg.inv(matrix)
else:
# Matrix is not full rank, so use Hadi's technique to compute inverse
# Reference: Ali S. Hadi (1992) "Identifying Multiple Outliers in Multivariate Data" eg. 2.3, 2.4
eigenValues, eigenVectors = np.linalg.eig(matrix)
eigenValues = np.abs(eigenValues) # to deal with -0 values
idx = eigenValues.argsort()[::-1]
eigenValues = eigenValues[idx]
eigenVectors = eigenVectors[:, idx]
s = eigenValues[eigenValues != 0].min()
w = [1 / max(e, s) for e in eigenValues]
W = w * np.eye(n_features)
return eigenVectors.dot(W).dot(eigenVectors.T)
def swap_affine(axes):
""" Build a correction matrix, from the given orientation of axes to RAS.
Parameters
----------
axes: str (manadtory)
the given orientation of the axes.
Returns
-------
rotation: array (4, 4)
the correction matrix.
"""
rotation = numpy.eye(4)
rotation[:3, 0] = CORRECTION_MATRIX_COLUMNS[axes[0]]
rotation[:3, 1] = CORRECTION_MATRIX_COLUMNS[axes[1]]
rotation[:3, 2] = CORRECTION_MATRIX_COLUMNS[axes[2]]
return rotation
def distribution_parameters(self, parameter_name):
if parameter_name=='trend':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
mean = dot(inv(eye(self.size)+E),self.data)
cov = (self.parameters.list['sigma2'].current_value)*inv(eye(self.size)+E)
return {'mean' : mean, 'cov' : cov}
elif parameter_name=='sigma2':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
pos = self.size
loc = 0
scale = 0.5*dot((self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)).T,(self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)))+0.5*dot(dot(self.parameters.list['trend'].current_value.T,E),self.parameters.list['trend'].current_value)
elif parameter_name=='lambda2':
pos = self.size-self.total_variation_order-1+self.alpha
loc = 0.5*(norm(dot(self.derivative_matrix,self.parameters.list['trend'].current_value),ord=1))/self.parameters.list['sigma2'].current_value+self.rho
scale = 1
elif parameter_name==str('omega'):
pos = [sqrt(((self.parameters.list['lambda2'].current_value**2)*self.parameters.list['sigma2'].current_value)/(dj**2)) for dj in dot(self.derivative_matrix,self.parameters.list['trend'].current_value)]
loc = 0
scale = self.parameters.list['lambda2'].current_value**2
return {'pos' : pos, 'loc' : loc, 'scale' : scale}
def estimate_time_constant(y, p=2, sn=None, lags=5, fudge_factor=1.):
"""
Estimate AR model parameters through the autocovariance function
Parameters
----------
y : array, shape (T,)
One dimensional array containing the fluorescence intensities with
one entry per time-bin.
p : positive integer
order of AR system
sn : float
sn standard deviation, estimated if not provided.
lags : positive integer
number of additional lags where he autocovariance is computed
fudge_factor : float (0< fudge_factor <= 1)
shrinkage factor to reduce bias
Returns
-------
g : estimated coefficients of the AR process
"""
if sn is None:
sn = GetSn(y)
lags += p
xc = axcov(y, lags)
xc = xc[:, np.newaxis]
A = scipy.linalg.toeplitz(xc[lags + np.arange(lags)],
xc[lags + np.arange(p)]) - sn**2 * np.eye(lags, p)
g = np.linalg.lstsq(A, xc[lags + 1:])[0]
gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
gr = (gr + gr.conjugate()) / 2.
gr[gr > 1] = 0.95 + np.random.normal(0, 0.01, np.sum(gr > 1))
gr[gr < 0] = 0.15 + np.random.normal(0, 0.01, np.sum(gr < 0))
g = np.poly(fudge_factor * gr)
g = -g[1:]
return g.flatten()
def gaussian_function(y, dimension, ?, cov, log=False, standard=False):
"""??????????? y???(???) ??????(?????) cov??????,log????????,standard??????"""
x = y - ?
if standard:
x = np.dot(x, np.linalg.inv(cov) ** 0.5)
cov_ = np.eye(dimension)
else:
cov_ = cov
np.seterr(all='ignore') # ??????
if log:
func = - (dimension / 2) * np.log(2 * math.pi) - 0.5 * np.log(np.linalg.det(cov_))
exp = -0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T)
return func + exp
else:
sigma = (2 * math.pi) ** (dimension / 2) * np.linalg.det(cov_) ** 0.5
func = 1. / sigma
exp = np.exp(-0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T))
return func * exp
def batch_works(k):
if k == n_processes - 1:
paths = all_paths[k * int(len(all_paths) / n_processes) : ]
else:
paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]
for path in paths:
probs = np.load(os.path.join(input_path, path))
pred = np.argmax(probs, axis=3)
fg_prob = 1 - probs[..., 0]
pred = clean_contour(fg_prob, pred)
seg = np.zeros(pred.shape, dtype=np.int16)
seg[pred == 1] = 1
seg[pred == 2] = 2
seg[pred == 3] = 4
img = nib.Nifti1Image(seg, np.eye(4))
nib.save(img, os.path.join(output_path, path.replace('_probs.npy', '.nii.gz')))