def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True):
if not len(bboxes):
return vis
if not colored:
cols = np.tile([240,240,240], [len(bboxes), 1])
else:
N = 20
cwheel = colormap(np.linspace(0, 1, N))
cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)])
texts = [None] * len(bboxes) if texts is None else texts
for col, b, t in zip(cols, bboxes, texts):
if ellipse:
cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360,
color=tuple(col), thickness=1)
else:
cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2)
if t:
annotate_bbox(vis, b, title=t)
return vis
python类tile()的实例源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
"""
color_type: {age, unique}
"""
N = 20
# inds = self.confident_tracks(min_length=min_track_length)
# if not len(inds):
# return
# ids, pts = self.latest_ids[inds], self.latest_pts[inds]
# lengths = self.tm_.lengths[inds]
ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths
if color_type == 'unique':
cwheel = colormap(np.linspace(0, 1, N))
cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
elif color_type == 'age':
cols = colormap(lengths)
else:
raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))
if not colored:
cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])
for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()):
cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False,
tuple(col), thickness=1)
tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
matrix_factorization.py 文件源码
项目:probabilistic-matrix-factorization
作者: aki-nishimura
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def __init__(self, y_coo, num_factor, bias_scale, factor_scale, weight=None):
if weight is None:
weight = np.ones(y_coo.data.size)
self.y_coo = y_coo
self.y_csr = scipy.sparse.csr_matrix(y_coo)
self.y_csc = scipy.sparse.csc_matrix(y_coo)
self.num_factor = num_factor
self.prior_param = {
'col_bias_scale': bias_scale,
'row_bias_scale': bias_scale,
'factor_scale': np.tile(factor_scale, self.num_factor),
'weight': weight,
'obs_df': float('inf'),
'param_df': float('inf'),
}
def EStep(self):
P = np.zeros((self.M, self.N))
for i in range(0, self.M):
diff = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)
c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N
P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps
self.P = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1 = np.sum(self.P, axis=1)
self.Np = np.sum(self.P1)
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def get_color_arr(c, n, flip_rb=False):
"""
Convert string c to carr array (N x 3) format
"""
carr = None;
if isinstance(c, str): # single color
carr = np.tile(np.array(colorConverter.to_rgb(c)), [n,1])
elif isinstance(c, float):
carr = np.tile(np.array(color_func(c)), [n,1])
else:
carr = reshape_arr(c)
if flip_rb:
b, r = carr[:,0], carr[:,2]
carr[:,0], carr[:,2] = r.copy(), b.copy()
# return floating point with values in [0,1]
return carr.astype(np.float32) / 255.0 if carr.dtype == np.uint8 else carr.astype(np.float32)
def getMedianDistanceBetweenSamples(self, sampleSet=None) :
"""
Jaakkola's heuristic method for setting the width parameter of the Gaussian
radial basis function kernel is to pick a quantile (usually the median) of
the distribution of Euclidean distances between points having different
labels.
Reference:
Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect
remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J.
Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh
International Conference on Intelligent Systems for Molecular Biology.
"""
numrows = sampleSet.shape[0]
samples = sampleSet
G = sum((samples * samples), 1)
Q = numpy.tile(G[:, None], (1, numrows))
R = numpy.tile(G, (numrows, 1))
distances = Q + R - 2 * numpy.dot(samples, samples.T)
distances = distances - numpy.tril(distances)
distances = distances.reshape(numrows**2, 1, order="F").copy()
return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))
def load_solar_data():
with open('solar label.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
labels = np.array(rows, dtype=int)
print(shape(labels))
with open('solar.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
rows = np.array(rows, dtype=float)
rows=rows[:104832,:]
print(shape(rows))
trX = np.reshape(rows.T,(-1,576))
print(shape(trX))
m = np.ndarray.max(rows)
print("maximum value of solar power", m)
trY=np.tile(labels,(32,1))
trX=trX/m
return trX,trY
def to_rgb(img):
"""
Converts the given array into a RGB image. If the number of channels is not
3 the array is tiled such that it has 3 channels. Finally, the values are
rescaled to [0,255)
:param img: the array to convert [nx, ny, channels]
:returns img: the rgb image [nx, ny, 3]
"""
img = np.atleast_3d(img)
channels = img.shape[2]
if channels < 3:
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
img *= 255
return img
def test_run_model(self, input_dataset, ds_model_interface):
out_ds = ds_model_interface.run_model()
expected = input_dataset.copy()
del expected.attrs[SimlabAccessor._snapshot_vars_key]
del expected.clock.attrs[SimlabAccessor._snapshot_vars_key]
del expected.out.attrs[SimlabAccessor._snapshot_vars_key]
expected['grid__x'] = ('x', np.arange(10), {'description': ''})
expected['quantity__quantity'] = (
('clock', 'x'),
np.arange(0, 10, 2)[:, None] * np.arange(10) * 1.,
{'description': 'a quantity'}
)
expected['some_process__some_effect'] = (
('out', 'x'), np.tile(np.arange(2, 12), 3).reshape(3, 10),
{'description': ''}
)
expected['other_process__other_effect'] = (
('out', 'x'), np.tile(np.arange(-2, 8), 3).reshape(3, 10),
{'description': ''}
)
xr.testing.assert_identical(out_ds, expected)
def get_image(filepath, image_target, image_size):
img = imread(filepath).astype(np.float)
h_origin, w_origin = img.shape[:2]
if image_target > h_origin or image_target > w_origin:
image_target = min(h_origin, w_origin)
h_drop = int((h_origin - image_target)/2)
w_drop = int((w_origin - image_target)/2)
if img.ndim == 2:
img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))
img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]
img_resize = imresize(img_crop, [image_size, image_size])
return np.array(img_resize)/127.5 - 1.
def fixed_label_diversity(model, config,step=''):
sample_dir=make_sample_dir(model)
str_step=str(step) or guess_model_step(model)
N=64#per image
n_combo=5#n label combinations
#0,1 label combinations
fixed_labels=model.attr.sample(n_combo)[model.cc.node_names]
size=infer_grid_image_shape(N)
for j, fx_label in enumerate(fixed_labels.values):
fx_label=np.reshape(fx_label,[1,-1])
fx_label=np.tile(fx_label,[N,1])
do_dict={model.cc.labels: fx_label}
images, feed_dict= sample(model, do_dict=do_dict)
fx_file=os.path.join(sample_dir, str_step+'fxlab'+str(j)+'.pdf')
save_figure_images(model.model_type,images['G'],fx_file,size=size)
#which image is what label
fixed_labels=fixed_labels.reset_index(drop=True)
fixed_labels.to_csv(os.path.join(sample_dir,str_step+'fxlab'+'.csv'))
def _linear_phase(self, n_shift):
"""
Private: Select the center of FOV
"""
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
def linear_phase(self, n_shift):
'''
Select the center of FOV
'''
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
# multiply the diagonal, linear phase before the gridding matrix
def _linear_phase(self, n_shift):
"""
Private: Select the center of FOV
"""
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(
self.Nd) /
2)
phase = numpy.exp(
1.0j *
numpy.sum(
om *
numpy.tile(
final_shifts,
(M,
1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
return 0 # shifted sparse matrix
def _linear_phase(self, n_shift):
"""
Private: Select the center of FOV
"""
om = self.st['om']
M = self.st['M']
final_shifts = tuple(
numpy.array(n_shift) +
numpy.array(self.st['Nd']) / 2)
phase = numpy.exp(
1.0j *
numpy.sum(
om * numpy.tile(
final_shifts,
(M,1)),
1))
# add-up all the linear phasees in all axes,
self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
def makedists(pdata,binl):
##### This is called from within makeraindist.
##### Caclulate distributions
pds=pdata.shape; nlat=pds[1]; nlon=pds[0]; nd=pds[2]
bins=np.append(0,binl)
n=np.empty((nlon,nlat,len(binl)))
binno=np.empty(pdata.shape)
for ilon in range(nlon):
for ilat in range(nlat):
# this is the histogram - we'll get frequency from this
thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins)
n[ilon,ilat,:]=thisn
# these are the bin locations. we'll use these for the amount dist
binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins)
#### Calculate the number of days with non-missing data, for normalization
ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
thisppdfmap=n/ndmat
#### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
testpamtmap=np.empty(thisppdfmap.shape)
for ibin in range(len(bins)-1):
testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
thispamtmap=testpamtmap/ndmat
return thisppdfmap,thispamtmap
def makedists(pdata,binl):
##### This is called from within makeraindist.
##### Caclulate distributions
pds=pdata.shape; nlat=pds[1]; nlon=pds[0]; nd=pds[2]
bins=np.append(0,binl)
n=np.empty((nlon,nlat,len(binl)))
binno=np.empty(pdata.shape)
for ilon in range(nlon):
for ilat in range(nlat):
# this is the histogram - we'll get frequency from this
thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins)
n[ilon,ilat,:]=thisn
# these are the bin locations. we'll use these for the amount dist
binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins)
#### Calculate the number of days with non-missing data, for normalization
ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
thisppdfmap=n/ndmat
#### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
testpamtmap=np.empty(thisppdfmap.shape)
for ibin in range(len(bins)-1):
testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
thispamtmap=testpamtmap/ndmat
return thisppdfmap,thispamtmap
def test_point_cloud_transformation(self, num_points=10):
R_a_b = RigidTransform.random_rotation()
t_a_b = RigidTransform.random_translation()
T_a_b = RigidTransform(R_a_b, t_a_b, 'a', 'b')
x_a = np.random.rand(3, num_points)
pc_a = PointCloud(x_a, 'a')
# multiply with numpy arrays
x_b = R_a_b.dot(x_a) + np.tile(t_a_b.reshape(3,1), [1, num_points])
# use multiplication operator
pc_b = T_a_b * pc_a
self.assertTrue(np.sum(np.abs(pc_b.data - x_b)) < 1e-5, msg='Point cloud transformation incorrect:\n Expected:\n {}\n Got:\n {}'.format(x_b, pc_b.data))
# check frames
self.assertEqual(pc_b.frame, 'b', msg='Transformed point cloud has incorrect frame')
def initializeAllParameters(self):
self.phi = (1.0 / self.K) * np.ones((self.n_tasks,self.K))
self.theta = np.tile(self.mu, (self.K, 1))
self.gamma = [self.sigma for i in range(self.K)]
self.xi = [[0]* len(self.task_dict[i]['Y']) for i in range(self.n_tasks)]
self.computeXi()
self.tau1 = self.tau10
self.tau2 = self.tau20
self.computeSmallPhis()
self.computeTaus()
self.s = np.zeros((self.n_tasks,self.K))
self.computeTaskVectors()
self.xi_convergence_list = []
self.phi_convergence_list = []
self.s_convergence_list = []
self.gamma_convergence_list = []
self.theta_convergence_list = []
if self.debug:
print "initial phi", self.phi
print "initial small phi1", self.small_phi1
print "initial small phi2", self.small_phi2
print "initial tau1", self.tau1, "tau2", self.tau2
def do_checkpoint(prefix):
"""Callback to checkpoint the model to prefix every epoch.
Parameters
----------
prefix : str
The file prefix to checkpoint to
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
def _callback(iter_no, sym, arg, aux):
#if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# print "save model with mean/std"
# num_classes = len(arg['bbox_pred_bias'].asnumpy()) / 4
# means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (1, num_classes))
# stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (1, num_classes))
# arg['bbox_pred_weight'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
# arg['bbox_pred_bias'] = arg['bbox_pred_bias'] * mx.nd.array(np.squeeze(stds)) + \
# mx.nd.array(np.squeeze(means))
"""The checkpoint function."""
save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
return _callback
def get_image(filepath, image_target, image_size):
img = imread(filepath).astype(np.float)
h_origin, w_origin = img.shape[:2]
if image_target > h_origin or image_target > w_origin:
image_target = min(h_origin, w_origin)
h_drop = int((h_origin - image_target)/2)
w_drop = int((w_origin - image_target)/2)
if img.ndim == 2:
img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))
img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]
img_resize = imresize(img_crop, [image_size, image_size])
return np.array(img_resize)/127.5 - 1.
def test_f_init_dims(self):
"""
Best I can tell, f_init is only ever given one sentence, but it appears to be
written to process multiple sentences.
"""
self.logger.info("========================================================================================")
self.logger.info("Starting the f_init_dims test to determine that x_f_init acts as expected.")
self.logger.info("========================================================================================")
x0_state0, x0_ctx0 = self.remote_interface.x_f_init(x0) # (1, 1024) (31, 1, 2048)
# If tile input, state/context should be tiled too
xx0_state0, xx0_ctx0 = self.remote_interface.x_f_init(xx0) # (2, 1024) (31, 2, 2048)
self.assertTrue(np.allclose(np.tile(x0_state0, [2, 1]), xx0_state0))
self.assertTrue(np.allclose(np.tile(x0_ctx0, [1, 2, 1]), xx0_ctx0))
# Different inputs should create different state
x1_state0, x1_ctx0 = self.remote_interface.x_f_init(x1)
self.assertFalse(np.allclose(x0_state0, x1_state0))
# Different inputs (of same length) should create different state and context
x1_2_state0, x1_2_ctx0 = self.remote_interface.x_f_init(x1 * 2)
self.assertFalse(np.allclose(x1_state0, x1_2_state0))
self.assertFalse(np.allclose(x1_ctx0, x1_2_ctx0))
def toa_normalize(x0, y0):
xdim = x0.shape[0]
m = x0.shape[1]
n = x0.shape[1]
t = -x0[:, 1]
x = x0 + np.tile(t, (1, m))
y = y0 + np.tile(t, (1, n))
qr_a = x[:, 2:(1 + xdim)]
q, r = scipy.linalg.qr(qr_a)
x = (q.conj().T) * x
y = (q.conj().T) * y
M = np.diag(sign(np.diag(qr_a)))
x1 = M * x
y1 = M * y
return x1, y1
matrix_factorization.py 文件源码
项目:probabilistic-matrix-factorization
作者: aki-nishimura
项目源码
文件源码
阅读 43
收藏 0
点赞 0
评论 0
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
# Params:
# J - column indices
nnz_i = len(J)
residual_i = y_i - mu0 - c[J]
prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
post_Phi_i = prior_Phi + \
np.dot(v_T.T,
np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T) # Weighted sum of v_j * v_j.T
post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
C, lower = scipy.linalg.cho_factor(post_Phi_i)
post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
# Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
lower=lower)
ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
r_i = ru_i[0]
u_i = ru_i[1:]
return r_i, u_i
matrix_factorization.py 文件源码
项目:probabilistic-matrix-factorization
作者: aki-nishimura
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):
prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
nnz_j = len(I)
residual_j = y_j - mu0 - r[I]
u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
post_Phi_j = prior_Phi + \
np.dot(u_T.T,
np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T) # Weighted sum of u_i * u_i.T
post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
C, lower = scipy.linalg.cho_factor(post_Phi_j)
post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
# Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
lower=lower)
cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
c_j = cv_j[0]
v_j = cv_j[1:]
return c_j, v_j
def frame_blocking(signal, framerate, sampletime=0.025, overlap=0.5):
"""
??
?N???????????????????????N???256?512???????
?20~30ms??
:param framerate:???
:param signal:????
:param sampletime:??????
:param overlap:????????????
:return: ????????
"""
samplenum = len(signal) # ??????
framesize = int(framerate * sampletime) # ?????
step = int(framesize * overlap) # ?????
framenum = 1 + math.ceil((samplenum - framesize) / step) # ??????
padnum = (framenum - 1) * step + framesize
zeros = np.zeros((padnum - samplenum,))
padsignal = np.concatenate((signal, zeros))
indices = np.tile(np.arange(0, framesize), (framenum, 1)) + np.tile(np.arange(0, framenum * step, step),
(framesize, 1)).T
indices = np.array(indices, dtype=np.int32)
frames = padsignal[indices]
return frames
def add_missing_facets(data, layout, vars, facet_vals):
# When in a dataframe some layer does not have all
# the facet variables, add the missing facet variables
# and create new data where the points(duplicates) are
# present in all the facets
missing_facets = set(vars) - set(facet_vals)
if missing_facets:
to_add = layout.loc[:, missing_facets].drop_duplicates()
to_add.reset_index(drop=True, inplace=True)
# a point for each facet, [0, 1, ..., n-1, 0, 1, ..., n-1, ...]
data_rep = np.tile(np.arange(len(data)), len(to_add))
# a facet for each point, [0, 0, 0, 1, 1, 1, ... n-1, n-1, n-1]
facet_rep = np.repeat(np.arange(len(to_add)), len(data))
data = data.iloc[data_rep, :].reset_index(drop=True)
facet_vals = facet_vals.iloc[data_rep, :].reset_index(drop=True)
to_add = to_add.iloc[facet_rep, :].reset_index(drop=True)
facet_vals = pd.concat([facet_vals, to_add],
axis=1, ignore_index=False)
return data, facet_vals
def __init__(self, nrBasis=11, sigma=0.05, num_samples=100):
self.x = np.linspace(0, 1, num_samples)
self.nrSamples = len(self.x)
self.nrBasis = nrBasis
self.sigma = sigma
self.sigmaSignal = float('inf') # Noise of signal (float)
self.C = np.arange(0,nrBasis)/(nrBasis-1.0)
self.Phi = np.exp(-.5 * (np.array(map(lambda x: x - self.C, np.tile(self.x, (self.nrBasis, 1)).T)).T ** 2 / (self.sigma ** 2)))
self.Phi /= sum(self.Phi)
self.viapoints = []
self.W = np.array([])
self.nrTraj = 0
self.meanW = None
self.sigmaW = None
self.Y = np.empty((0, self.nrSamples), float)
def generate_trajectory(self, randomness=1e-10):
"""
Outputs a trajectory
:param randomness: float between 0. (output will be the mean of gaussians) and 1. (fully randomized inside the variance)
:return: a 1-D vector of the generated points
"""
newMu = self.meanW
newSigma = self.sigmaW
for viapoint in self.viapoints:
PhiT = np.exp(-.5 * (np.array(map(lambda x: x - self.C, np.tile(viapoint['t'], (11, 1)).T)).T ** 2 / (self.sigma ** 2)))
PhiT = PhiT / sum(PhiT) # basis functions at observed time points
# Conditioning
aux = viapoint['sigmay'] + np.dot(np.dot(PhiT.T, newSigma), PhiT)
newMu = newMu + np.dot(np.dot(newSigma, PhiT) * 1 / aux, (viapoint['obsy'] - np.dot(PhiT.T, newMu))) # new weight mean conditioned on observations
newSigma = newSigma - np.dot(np.dot(newSigma, PhiT) * 1 / aux, np.dot(PhiT.T, newSigma))
sampW = np.random.multivariate_normal(newMu, randomness*newSigma, 1).T
return np.dot(self.Phi.T, sampW)