def predict_forward_mc(self, T, x_control, no_samples):
"""Summary
Args:
T (TYPE): Description
x_control (None, optional): Description
Returns:
TYPE: Description
"""
x = np.zeros((T, no_samples, self.Din))
my = np.zeros((T, no_samples, self.Dout))
vy = np.zeros((T, no_samples, self.Dout))
post_m, post_v = self.get_posterior_x()
mtm1 = post_m[[-1], :]
vtm1 = post_v[[-1], :]
eps = np.random.randn(no_samples, self.Din)
x_samples = eps * np.sqrt(vtm1) + mtm1
for t in range(T):
if self.Dcon_dyn > 0:
xc_samples = np.hstack((x_samples, np.tile(x_control[[t], :], [no_samples, 1])))
else:
xc_samples = x_samples
mt, vt = self.dyn_layer.forward_prop_thru_post(xc_samples)
eps = np.random.randn(no_samples, self.Din)
x_samples = eps * np.sqrt(vt) + mt
if self.Dcon_emi > 0:
xc_samples = np.hstack((x_samples, np.tile(x_control[[t], :]), [no_samples, 1]))
else:
xc_samples = x_samples
if self.gp_emi:
mft, vft = self.emi_layer.forward_prop_thru_post(xc_samples)
myt, vyt_n = self.lik_layer.output_probabilistic(mft, vft)
else:
myt, _, vyt_n = self.emi_layer.output_probabilistic(xc_samples, np.zeros_like(x_samples))
vyt_n = np.diagonal(vyt_n, axis1=1, axis2=2)
x[t, :, :] = x_samples
my[t, :, :], vy[t, :, :] = myt, vyt_n
return x, my, vy
python类diagonal()的实例源码
def update_clustered_homogeneous_block_sizes(self, x, weight=1.0, block_size=None, include_self_loops=True):
print("update_clustered_homogeneous_block_sizes ")
if block_size is None:
er = "error, block_size not specified!!!!"
raise Exception(er)
# block_size = self.block_size
if isinstance(block_size, numpy.ndarray):
er = "Error: inhomogeneous block sizes not supported by this function"
raise Exception(er)
# Assuming block_size is an integer:
num_samples, dim = x.shape
if num_samples % block_size > 0:
err = "Inconsistency error: num_samples (%d) is not a multiple of block_size (%d)" % \
(num_samples, block_size)
raise Exception(err)
num_blocks = num_samples / block_size
# warning, plenty of dtype missing!!!!!!!!
sum_x = x.sum(axis=0)
sum_prod_x = mdp.utils.mult(x.T, x)
self.AddSamples(sum_prod_x, sum_x, num_samples, weight)
self.last_block = None
# DCorrelation Matrix. Compute medias signal
media = numpy.zeros((num_blocks, dim))
for i in range(num_blocks):
media[i] = x[i * block_size:(i + 1) * block_size].sum(axis=0) * (1.0 / block_size)
sum_prod_meds = mdp.utils.mult(media.T, media)
# FIX1: AFTER DT in (0,4) normalization
num_diffs = num_blocks * block_size # ## * (block_size-1+1) / (block_size-1)
print("num_diffs in block:", num_diffs, " num_samples:", num_samples)
if include_self_loops:
sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size * sum_prod_meds) / block_size
else:
sum_prod_diffs = 2.0 * block_size * (sum_prod_x - block_size * sum_prod_meds) / (block_size - 1)
self.AddDiffs(sum_prod_diffs, num_diffs, weight)
print("(Diag(complete)/num_diffs.avg)**0.5 =", ((numpy.diagonal(sum_prod_diffs) / num_diffs).mean()) ** 0.5)
def diagonal(self):
return numpy.diagonal(self.__mat)
def __animate_1d(self, mat, **kwargs):
delta_list = numpy.diagonal(mat)
for (idx, val) in enumerate(delta_list):
if val is True:
self.activate_1d(idx, **kwargs)
elif val is False:
self.deactivate_1d(idx, **kwargs)
self.commit(**kwargs)
def _predict(self, steps, exog, alpha):
assert 0 < alpha < 1
y = (exog if exog is not None else self._endog)[-self.results.k_ar:]
forecast = self.results.forecast(y, steps)
# FIXME: The following is adapted from statsmodels's
# VAR.forecast_interval() as the original doesn't work
q = norm.ppf(1 - alpha / 2)
sigma = np.sqrt(np.abs(np.diagonal(self.results.mse(steps), axis1=2)))
err = q * sigma
return np.asarray([forecast, forecast - err, forecast + err])
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def __str__(self):
return "x: {}, var: {}".format(self._x, np.diagonal(self._P))
def __str__(self):
return "x: {}, var: {}".format(self._x, np.diagonal(self._P))
def fisher_vector(samples, means, covs, w):
s0, s1, s2 = likelihood_statistics(samples, means, covs, w)
T = len(samples)
covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])])
#pdb.set_trace()
a = fisher_vector_weights(s0, s1, s2, means, covs, w, T)
b = fisher_vector_means(s0, s1, s2, means, covs, w, T)
c = fisher_vector_sigma(s0, s1, s2, means, covs, w, T)
fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)])
fv = normalize(fv)
return fv
def fisher_vector(samples, means, covs, w):
s0, s1, s2 = likelihood_statistics(samples, means, covs, w)
T = len(samples)
covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])])
#pdb.set_trace()
a = fisher_vector_weights(s0, s1, s2, means, covs, w, T)
b = fisher_vector_means(s0, s1, s2, means, covs, w, T)
c = fisher_vector_sigma(s0, s1, s2, means, covs, w, T)
fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)])
fv = normalize(fv)
return fv
def _update_iteration_data(self, itr, algorithm, costs, pol_sample_lists):
"""
Update iteration data information: iteration, average cost, and for
each condition the mean cost over samples, step size, linear Guassian
controller entropies, and initial/final KL divergences for BADMM.
"""
avg_cost = np.mean(costs)
if pol_sample_lists is not None:
test_idx = algorithm._hyperparams['test_conditions']
# pol_sample_lists is a list of singletons
samples = [sl[0] for sl in pol_sample_lists]
pol_costs = [np.sum(algorithm.cost[idx].eval(s)[0])
for s, idx in zip(samples, test_idx)]
itr_data = '%3d | %8.2f %12.2f' % (itr, avg_cost, np.mean(pol_costs))
else:
itr_data = '%3d | %8.2f' % (itr, avg_cost)
for m in range(algorithm.M):
cost = costs[m]
step = np.mean(algorithm.prev[m].step_mult * algorithm.base_kl_step)
entropy = 2*np.sum(np.log(np.diagonal(algorithm.prev[m].traj_distr.chol_pol_covar,
axis1=1, axis2=2)))
itr_data += ' | %8.2f %8.2f %8.2f' % (cost, step, entropy)
if isinstance(algorithm, AlgorithmBADMM):
kl_div_i = algorithm.cur[m].pol_info.init_kl.mean()
kl_div_f = algorithm.cur[m].pol_info.prev_kl.mean()
itr_data += ' %8.2f %8.2f %8.2f' % (pol_costs[m], kl_div_i, kl_div_f)
elif isinstance(algorithm, AlgorithmMDGPS):
# TODO: Change for test/train better.
if test_idx == algorithm._hyperparams['train_conditions']:
itr_data += ' %8.2f' % (pol_costs[m])
else:
itr_data += ' %8s' % ("N/A")
self.append_output_text(itr_data)
def f1_score(confusion):
tps = np.diagonal(confusion)
supports = confusion.sum(axis=1)
# TODO remove this ignore divide by 0, shouldn't happen
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.true_divide(tps, confusion.sum(axis=0))
recalls = np.true_divide(tps, supports)
f1s = 2*np.true_divide((precisions*recalls),(precisions+recalls))
f1s[f1s == np.inf] = 0
f1s = np.nan_to_num(f1s)
f1 = np.average(f1s, weights=supports)
return f1
# TODO remove duplicated code, same as intent model utils
def gradient_ascent(a, b, sigma, l, alpha, K_y):
"""
tune hyperparameters sigma and l for RBF kernel
:param a: input vector a
:param b: input vector b
:param sigma: output variance determines the average distance of your function away from its mean
:param l: lengthscale determines the length of the 'wiggles' in your function.
:param alpha: equals to K_inv * y
:param K_y: K_inv
:return: current sigmal and l
"""
step_size = 0.01
sqdist = ((a[:, :, None] - b[:, :, None].T) ** 2).sum(1)
# fix the output variance of RBF kernel in order to visualize it in one dimension
'''
# tune hyperparameter sigma
sigma_grad = 2 * sigma * np.exp(-.5*sqdist/(l**2))
sigma_matrix = np.dot(np.dot(alpha, alpha.T) - K_y, sigma_grad)
tr_sigma = np.diagonal(sigma_matrix).sum()
sigma_var = .5 * tr_sigma
'''
# tune hyperparameter l
l_grad = sigma**2 * np.exp(-.5*sqdist/(l**2)) * (sqdist/l**3)
l_matrix = np.dot(np.dot(alpha, alpha.T) - K_y, l_grad)
tr_l = np.diagonal(l_matrix).sum()
l_var = .5 * tr_l
# gradient ascent to maximum log marginal likelihood simultaneously
'''
sigma = sigma + step_size * sigma_var
'''
l = l + step_size * l_var
return sigma, l
test_multiarray.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
test_multiarray.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
test_numeric.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def test_diagonal():
b1 = np.matrix([[1,2],[3,4]])
diag_b1 = np.matrix([[1, 4]])
array_b1 = np.array([1, 4])
assert_equal(b1.diagonal(), diag_b1)
assert_equal(np.diagonal(b1), array_b1)
assert_equal(np.diag(b1), array_b1)
def _compute_output_errors(traj, x, P, output_stamps,
gyro_model, accel_model):
T = _errors_transform_matrix(traj.loc[output_stamps])
y = util.mv_prod(T, x[:, :N_BASE_STATES])
Py = util.mm_prod(T, P[:, :N_BASE_STATES, :N_BASE_STATES])
Py = util.mm_prod(Py, T, bt=True)
sd_y = np.diagonal(Py, axis1=1, axis2=2) ** 0.5
err = pd.DataFrame(index=output_stamps)
err['lat'] = y[:, DRN]
err['lon'] = y[:, DRE]
err['VE'] = y[:, DVE]
err['VN'] = y[:, DVN]
err['h'] = np.rad2deg(y[:, DH])
err['p'] = np.rad2deg(y[:, DP])
err['r'] = np.rad2deg(y[:, DR])
sd = pd.DataFrame(index=output_stamps)
sd['lat'] = sd_y[:, DRN]
sd['lon'] = sd_y[:, DRE]
sd['VE'] = sd_y[:, DVE]
sd['VN'] = sd_y[:, DVN]
sd['h'] = np.rad2deg(sd_y[:, DH])
sd['p'] = np.rad2deg(sd_y[:, DP])
sd['r'] = np.rad2deg(sd_y[:, DR])
gyro_err = pd.DataFrame(index=output_stamps)
gyro_sd = pd.DataFrame(index=output_stamps)
n = N_BASE_STATES
for i, name in enumerate(gyro_model.states):
gyro_err[name] = x[:, n + i]
gyro_sd[name] = P[:, n + i, n + i] ** 0.5
accel_err = pd.DataFrame(index=output_stamps)
accel_sd = pd.DataFrame(index=output_stamps)
ng = gyro_model.n_states
for i, name in enumerate(accel_model.states):
accel_err[name] = x[:, n + ng + i]
accel_sd[name] = P[:, n + ng + i, n + ng + i] ** 0.5
return err, sd, gyro_err, gyro_sd, accel_err, accel_sd
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)