def compute_nearest_neighbors(submatrix, balltree, k, row_start):
""" Compute k nearest neighbors on a submatrix
Args: submatrix (np.ndarray): Data submatrix
balltree: Nearest neighbor index (from sklearn)
k: number of nearest neigbors to compute
row_start: row offset into larger matrix
Returns a COO sparse adjacency matrix of nearest neighbor relations as (i,j,x)"""
nn_dist, nn_idx = balltree.query(submatrix, k=k+1)
# Remove the self-as-neighbors
nn_idx = nn_idx[:,1:]
nn_dist = nn_dist[:,1:]
# Construct a COO sparse matrix of edges and distances
i = np.repeat(row_start + np.arange(nn_idx.shape[0]), k)
j = nn_idx.ravel().astype(int)
return (i, j, nn_dist.ravel())
python类repeat()的实例源码
def standard_case(self):
"""Create standard testcase from Thetas defined in this Testcase. The following
metrics can be calculated by hand and should match the computations:
precisions: [1, 1, 0, 2/3, 1]
recalls: [1, 1, 0, 1, 0.5]
f1s: [1, 1, 0, 0.8, 2/3]
tps: 1 + 1 + 0 + 2 + 1 = 5
fps: 0 + 0 + 1 + 1 + 0 = 2
fns: 0 + 0 + 2 + 0 + 1 = 3
tns: 2 + 2 + 0 + 0 + 1 = 5
"""
Theta_true = np.vstack([
np.repeat(self.Theta_true1[nx, :, :], 2, axis=0),
np.repeat(self.Theta_true2[nx, :, :], 3, axis=0)
])
Theta_pred = np.vstack([
np.repeat(self.Theta_pred1[nx, :, :], 3, axis=0),
self.Theta_pred2[nx, :, :],
self.Theta_pred3[nx, :, :]
])
return Theta_true, Theta_pred
def test_repeat(self):
""" Test if `repeat` works the same as np.repeat."""
with tf.Session().as_default():
# try different tensor types
for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
# and all their axes
for axis in range(len(init_value.shape)):
for repeats in [1, 2, 3, 11]:
tensor = tf.constant(init_value, dtype=tfdtype)
repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
expected_value = np.repeat(init_value, repeats=repeats, axis=axis)
self.assertTrue(np.all(repeated_value == expected_value))
def check_string_input(self, input_name, input_value):
if type(input_value) is np.array:
if input_value.size == self.P:
setattr(self, input_name, input_value)
elif input_value.size == 1:
setattr(self, input_name, np.repeat(input_value, self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, input_value.size, self.P))
elif type(input_value) is str:
setattr(self, input_name, float(input_value)*np.ones(self.P))
elif type(input_value) is list:
if len(input_value) == self.P:
setattr(self, input_name, np.array([str(x) for x in input_value]))
elif len(input_value) == 1:
setattr(self, input_name, np.repeat(input_value, self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, len(input_value), self.P))
else:
raise ValueError("user provided %s with an unsupported type" % input_name)
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) / 4 / 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def __mmap_ncs_packet_timestamps(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=(int((filesize - 16384) / 4 / 261), 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + data[:,1]*2**32
return timestamps
else:
return None
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) / 4 / 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def check_string_input(self, input_name, input_value):
if type(input_value) is np.array:
if input_value.size == self.P:
setattr(self, input_name, input_value)
elif input_value.size == 1:
setattr(self, input_name, np.repeat(input_value, self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, input_value.size, self.P))
elif type(input_value) is str:
setattr(self, input_name, float(input_value)*np.ones(self.P))
elif type(input_value) is list:
if len(input_value) == self.P:
setattr(self, input_name, np.array([str(x) for x in input_value]))
elif len(input_value) == 1:
setattr(self, input_name, np.repeat(input_value, self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, len(input_value), self.P))
else:
raise ValueError("user provided %s with an unsupported type" % input_name)
def _makeflat(self, start=None, end=None, groups = False):
eeg = list()
for sub in self.data[start:end]:
if len(sub) % self.chunk_len == 0:
eeg.append(sub.reshape([-1, self.chunk_len,3]))
else:
print('ERROR: Please choose a chunk length that is a factor of {}. Current len = {}'.format(self.samples_per_epoch, len(sub)))
return [0,0]
hypno = list()
group = list()
hypno_repeat = self.samples_per_epoch / self.chunk_len
idx = 0
for sub in self.hypno[start:end]:
hypno.append(np.repeat(sub, hypno_repeat))
group.append(np.repeat(idx, len(hypno[-1])))
idx += 1
if groups:
return np.vstack(eeg), np.hstack(hypno), np.hstack(group)
else:
return np.vstack(eeg), np.hstack(hypno)
def _get_intercept_stats(self, add_slopes=True):
# start with mean and variance of Y on the link scale
mod = sm.GLM(endog=self.model.y.data,
exog=np.repeat(1, len(self.model.y.data)),
family=self.model.family.smfamily(),
missing='drop' if self.model.dropna else 'none').fit()
mu = mod.params
# multiply SE by sqrt(N) to turn it into (approx.) SD(Y) on link scale
sd = (mod.cov_params()[0] * len(mod.mu))**.5
# modify mu and sd based on means and SDs of slope priors.
if len(self.model.fixed_terms) > 1 and add_slopes:
means = np.array([x['mu'] for x in self.priors.values()])
sds = np.array([x['sd'] for x in self.priors.values()])
# add to intercept prior
index = list(self.priors.keys())
mu -= np.dot(means, self.stats['mean_x'][index])
sd = (sd**2 + np.dot(sds**2, self.stats['mean_x'][index]**2))**.5
return mu, sd
def test_slda():
l = language(10000)
n_iter = 2000
KL_thresh = 0.001
nu2 = l['K']
sigma2 = 1
np.random.seed(l['seed'])
eta = np.random.normal(scale=nu2, size=l['K'])
y = [np.dot(eta, l['thetas'][i]) for i in range(l['D'])] + \
np.random.normal(scale=sigma2, size=l['D'])
_beta = np.repeat(0.01, l['V'])
_mu = 0
slda = SLDA(l['K'], l['alpha'], _beta, _mu, nu2, sigma2, n_iter,
seed=l['seed'], n_report_iter=l['n_report_iters'])
slda.fit(l['doc_term_matrix'], y)
assert_probablity_distribution(slda.phi)
check_KL_divergence(l['topics'], slda.phi, KL_thresh)
def test_blslda():
l = language(10000)
n_iter = 1500
KL_thresh = 0.03
mu = 0.
nu2 = 1.
np.random.seed(l['seed'])
eta = np.random.normal(loc=mu, scale=nu2, size=l['K'])
zeta = np.array([np.dot(eta, l['thetas'][i]) for i in range(l['D'])])
y = (zeta >= 0).astype(int)
_beta = np.repeat(0.01, l['V'])
_b = 7.25
blslda = BLSLDA(l['K'], l['alpha'], _beta, mu, nu2, _b, n_iter,
seed=l['seed'],
n_report_iter=l['n_report_iters'])
blslda.fit(l['doc_term_matrix'], y)
assert_probablity_distribution(blslda.phi)
check_KL_divergence(l['topics'], blslda.phi, KL_thresh)
def test_blslda():
l = language(10000)
n_iter = 1500
KL_thresh = 0.03
mu = 0.
nu2 = 1.
np.random.seed(l['seed'])
eta = np.random.normal(loc=mu, scale=nu2, size=l['K'])
zeta = np.array([np.dot(eta, l['thetas'][i]) for i in range(l['D'])])
y = (zeta >= 0).astype(int)
_beta = np.repeat(0.01, l['V'])
_b = 7.25
blslda = BLSLDA(l['K'], l['alpha'], _beta, mu, nu2, _b, n_iter,
seed=l['seed'],
n_report_iter=l['n_report_iters'])
blslda.fit(l['doc_term_matrix'], y)
assert_probablity_distribution(blslda.phi)
check_KL_divergence(l['topics'], blslda.phi, KL_thresh)
def update_photo(data=None,widget=None):
global Z
if data is None: # By default, assume we're updating with the current value of Z
data = np.repeat(np.repeat(np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0])),4,1),4,2)
else:
data = np.repeat(np.repeat(np.uint8(data),4,1),4,2)
if widget is None:
widget = output
# Reshape image to canvas
mshape = (4*64,4*64,1)
im = Image.fromarray(np.concatenate([np.reshape(data[0],mshape),np.reshape(data[1],mshape),np.reshape(data[2],mshape)],axis=2),mode='RGB')
# Make sure photo is an object of the current widget so the garbage collector doesn't wreck it
widget.photo = ImageTk.PhotoImage(image=im)
widget.create_image(0,0,image=widget.photo,anchor=NW)
widget.tag_raise(pixel_rect)
# Function to update the latent canvas.
def update_canvas(widget=None):
global r, Z, res, rects, painted_rects
if widget is None:
widget = w
# Update display values
r = np.repeat(np.repeat(Z,r.shape[0]//Z.shape[0],0),r.shape[1]//Z.shape[1],1)
# If we're letting freeform painting happen, delete the painted rectangles
for p in painted_rects:
w.delete(p)
painted_rects = []
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
w.itemconfig(int(rects[i,j]),fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j]))
# Function to move the paintbrush
def test_find_multiple_noisy(self):
""" Test finding multiple particles (noisy) """
self.atol = 5
radius = np.random.random() * 15 + 15
generated_image = self.generate_image(radius, 10, noise=0.2)
actual_number = len(generated_image.coords)
fits = find_disks(generated_image.image, (radius / 2.0,
radius * 2.0),
maximum=actual_number)
_, coords = sort_positions(generated_image.coords,
np.array([fits['y'].values,
fits['x'].values]).T)
if len(fits) == 0: # Nothing found
actual = np.repeat([[np.nan, np.nan, np.nan]], actual_number,
axis=0)
else:
actual = fits[['r', 'y', 'x']].values.astype(np.float64)
expected = np.array([np.full(actual_number, radius, np.float64),
coords[:, 0], coords[:, 1]]).T
return np.sqrt(((actual - expected)**2).mean(0)), [0] * 3
def make_quantile_df(data, draw_quantiles):
"""
Return a dataframe with info needed to draw quantile segments
"""
dens = data['density'].cumsum() / data['density'].sum()
ecdf = interp1d(dens, data['y'], assume_sorted=True)
ys = ecdf(draw_quantiles)
# Get the violin bounds for the requested quantiles
violin_xminvs = interp1d(data['y'], data['xminv'])(ys)
violin_xmaxvs = interp1d(data['y'], data['xmaxv'])(ys)
data = pd.DataFrame({
'x': interleave(violin_xminvs, violin_xmaxvs),
'y': np.repeat(ys, 2),
'group': np.repeat(np.arange(1, len(ys)+1), 2)})
return data
def draw_group(data, panel_params, coord, ax, **params):
n = len(data)
data = data.sort_values('x', kind='mergesort')
# create stepped path -- interleave x with
# itself and y with itself
xs = np.repeat(range(n), 2)[:-1]
ys = np.repeat(range(0, n), 2)[1:]
# horizontal first
if params['direction'] == 'hv':
xs, ys = ys, xs
df = pd.DataFrame({'x': data['x'].values[xs],
'y': data['y'].values[ys]})
copy_missing_columns(df, data)
geom_path.draw_group(df, panel_params, coord, ax, **params)
b3_data_iter.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: u1234x1234
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def sample_crop(self, n):
kx = np.array([len(x) for x in self.maps_with_class])
class_hist = np.random.multinomial(n, self.class_probs * (kx != 0))
class_ids = np.repeat(np.arange(class_hist.shape[0]), class_hist)
X = []
for class_id in class_ids:
for i in range(20):
random_image_idx = np.random.choice(self.maps_with_class[class_id])
if random_image_idx < 25:
break
x = self.kde_samplers[random_image_idx][class_id].sample()[0]
x /= self.mask_size
x = np.clip(x, 0., 1.)
return x, class_id, random_image_idx
X.append(x)
return X
def test_FaceInnerProductAnisotropicDeriv(self):
def fun(x):
# fake anisotropy (testing anistropic implementation with isotropic
# vector). First order behavior expected for fully anisotropic
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([eye, zero, eye])])
MfSig = self.mesh.getFaceInnerProduct(x)
MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0)
return MfSig*self.face_vec , MfSigDeriv(self.face_vec) * P.T
print('Testing FaceInnerProduct Anisotropic')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD, plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvProp(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([eye, zero, eye])])
MfSig = self.mesh.getFaceInnerProduct(x, invProp=True)
MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0,
invProp=True)
return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T
print('Testing FaceInnerProduct Anisotropic InvProp')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def test_FaceInnerProductAnisotropicDerivInvMat(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([eye, zero, eye])])
MfSig = self.mesh.getFaceInnerProduct(x, invMat=True)
MfSigDeriv = self.mesh.getFaceInnerProductDeriv(x0, invMat=True)
return MfSig*self.face_vec, MfSigDeriv(self.face_vec) * P.T
print('Testing FaceInnerProduct Anisotropic InvMat')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def test_EdgeInnerProductAnisotropicDeriv(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([zero, eye, zero])])
MeSig = self.mesh.getEdgeInnerProduct(x.reshape(self.mesh.nC, 3))
MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0)
return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T
print('Testing EdgeInnerProduct Anisotropic')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvProp(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([zero, eye, zero])])
MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True)
MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invProp=True)
return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T
print('Testing EdgeInnerProduct Anisotropic InvProp')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvMat(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([zero, eye, zero])])
MeSig = self.mesh.getEdgeInnerProduct(x, invMat=True)
MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0, invMat=True)
return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T
print('Testing EdgeInnerProduct Anisotropic InvMat')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def test_EdgeInnerProductAnisotropicDerivInvPropInvMat(self):
def fun(x):
x = np.repeat(np.atleast_2d(x), 3, axis=0).T
x0 = np.repeat(self.x0, 3, axis=0).T
zero = sp.csr_matrix((self.mesh.nC, self.mesh.nC))
eye = sp.eye(self.mesh.nC)
P = sp.vstack([sp.hstack([zero, eye, zero])])
MeSig = self.mesh.getEdgeInnerProduct(x, invProp=True, invMat=True)
MeSigDeriv = self.mesh.getEdgeInnerProductDeriv(x0,
invProp=True,
invMat=True)
return MeSig*self.edge_vec, MeSigDeriv(self.edge_vec) * P.T
print('Testing EdgeInnerProduct Anisotropic InvProp InvMat')
return self.assertTrue(Tests.checkDerivative(fun, self.x0, num=7,
tolerance=TOLD,
plotIt=False))
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def precompute_marginals(self):
sys.stderr.write('Precomputing marginals...\n')
self._pdfs = [None] * self._num_instances
# precomputing all possible marginals
for i in xrange(self._num_instances):
mean = self._corrected_means[i]
cov = self._corrected_covs[i]
self._pdfs[i] = [None] * (2 ** mean.shape[0])
for marginal_pattern in itertools.product([False, True], repeat=mean.shape[0]):
marginal_length = marginal_pattern.count(True)
if marginal_length == 0:
continue
m = np.array(marginal_pattern)
marginal_mean = mean[m]
mm = m[:, np.newaxis]
marginal_cov = cov[np.dot(mm, mm.transpose())].reshape((marginal_length, marginal_length))
self._pdfs[i][hash_bool_array(m)] = multivariate_normal(mean=marginal_mean, cov=marginal_cov)