def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
python类plot()的实例源码
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
def plot1D_mat(a, b, M, title=''):
""" Plot matrix M with the source and target 1D distribution
Creates a subplot with the source distribution a on the left and
target distribution b on the tot. The matrix M is shown in between.
Parameters
----------
a : np.array, shape (na,)
Source distribution
b : np.array, shape (nb,)
Target distribution
M : np.array, shape (na,nb)
Matrix to plot
"""
na, nb = M.shape
gs = gridspec.GridSpec(3, 3)
xa = np.arange(na)
xb = np.arange(nb)
ax1 = pl.subplot(gs[0, 1:])
pl.plot(xb, b, 'r', label='Target distribution')
pl.yticks(())
pl.title(title)
ax2 = pl.subplot(gs[1:, 0])
pl.plot(a, xa, 'b', label='Source distribution')
pl.gca().invert_xaxis()
pl.gca().invert_yaxis()
pl.xticks(())
pl.subplot(gs[1:, 1:], sharex=ax1, sharey=ax2)
pl.imshow(M, interpolation='nearest')
pl.axis('off')
pl.xlim((0, nb))
pl.tight_layout()
pl.subplots_adjust(wspace=0., hspace=0.2)
def plot2D_samples_mat(xs, xt, G, thr=1e-8, **kwargs):
""" Plot matrix M in 2D with lines using alpha values
Plot lines between source and target 2D samples with a color
proportional to the value of the matrix G between samples.
Parameters
----------
xs : ndarray, shape (ns,2)
Source samples positions
b : ndarray, shape (nt,2)
Target samples positions
G : ndarray, shape (na,nb)
OT matrix
thr : float, optional
threshold above which the line is drawn
**kwargs : dict
paameters given to the plot functions (default color is black if
nothing given)
"""
if ('color' not in kwargs) and ('c' not in kwargs):
kwargs['color'] = 'k'
mx = G.max()
for i in range(xs.shape[0]):
for j in range(xt.shape[0]):
if G[i, j] / mx > thr:
pl.plot([xs[i, 0], xt[j, 0]], [xs[i, 1], xt[j, 1]],
alpha=G[i, j] / mx, **kwargs)
def fst_delay_snd(fst, snd, samp_rate, max_delay):
# Verify argument shape.
s1, s2 = fst.shape, snd.shape
if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]:
raise Exception("Argument shape invalid, in 'fst_delay_snd' function")
half_len = int(s1[0]/2)
a = numpy.array(fst, dtype=numpy.double)
b = numpy.array(snd, dtype=numpy.double)
corr = numpy.correlate(a, b, 'same')
max_pos = numpy.argmax(corr)
# plot(s1[0], samp_rate, a, b, corr)
return corr, (max_pos - half_len) / samp_rate
def plot(l, samp, w1, w2, cor):
time_range = numpy.arange(0, l) * (1.0 / samp)
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, w1)
pl.subplot(212)
pl.plot(time_range, w2, c="r")
pl.xlabel("time")
pl.figure(2)
pl.plot(time_range, cor)
pl.show()
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[1:3]
nframes = w1.shape[0]
# Cut one channel in the tail, while the other in the head,
# to guarantee same length and first delays second.
cut_time_len = 0.2 # second
cut_len = int(cut_time_len * sampling)
wp1 = w1[:-cut_len]
wp2 = w2[cut_len:]
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes - cut_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[0:2]
nframes = w1.shape[0]
# Pad one channel in the head, while the other in the tail,
# to guarantee same length.
pad_time_len = 0.01 # second
pad_len = int(pad_time_len * sampling)
pad_arr = numpy.zeros(pad_len)
wp1 = numpy.concatenate((pad_arr, w1))
wp2 = numpy.concatenate((w2, pad_arr))
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes + pad_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def lms(x1: numpy.array, x2: numpy.array, N: int):
# Verify argument shape.
s1, s2 = x1.shape, x2.shape
if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]:
raise Exception("Argument shape invalid, in 'lms' function")
l = s1[0]
# Coefficient matrix
W = numpy.mat(numpy.zeros([1, 2 * N + 1]))
# Coefficient (time) matrix
Wt = numpy.mat(numpy.zeros([l, 2 * N + 1]))
# Feedback (time) matrix
y = numpy.mat(numpy.zeros([l, 1]))
# Error (time) matrix
e = numpy.mat(numpy.zeros([l, 1]))
# Traverse channel data
for i in range(N, l-N):
x1_vec = numpy.asmatrix(x1[i-N:i+N+1])
y[i] = x1_vec * numpy.transpose(W)
e[i] = x2[i] - y[i]
W += mu * e[i] * x1_vec
Wt[i] = W
# Find the coefficient matrix which has max maximum.
Wt_maxs = numpy.max(Wt, axis=1)
row_idx = numpy.argmax(Wt_maxs)
max_W = Wt[row_idx]
delay_count = numpy.argmax(max_W) - N
plot(l, x1, x2, y, e)
return delay_count
def init():
global fig1, ln_o, ln_x
ln_o, = plt.plot([], [], 'ro')
ln_x, = plt.plot([], [], 'bx')
plt.xlim(-disp_bound, disp_bound)
plt.ylim(-disp_bound, disp_bound)
plt.xlabel('x')
plt.ylabel('y')
return ln_o,
def plot_channel(audio, sampling):
channels, nframes = audio.shape[0], audio.shape[1]
time_range = numpy.arange(0, nframes) * (1.0 / sampling)
for i in range(1, channels + 1):
pl.figure(i)
pl.plot(time_range, audio[i - 1])
pl.xlabel("time{0}".format(i))
pl.show()
time_alignment_plotting_tools.py 文件源码
项目:hand_eye_calibration
作者: ethz-asl
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def plot_angular_velocities(title,
angular_velocities,
angular_velocities_filtered,
block=True):
fig = plt.figure()
title_position = 1.05
fig.suptitle(title, fontsize='24')
a1 = plt.subplot(1, 2, 1)
a1.set_title(
"Angular Velocities Before Filtering \nvx [red], vy [green], vz [blue]",
y=title_position)
plt.plot(angular_velocities[:, 0], c='r')
plt.plot(angular_velocities[:, 1], c='g')
plt.plot(angular_velocities[:, 2], c='b')
a2 = plt.subplot(1, 2, 2)
a2.set_title(
"Angular Velocities After Filtering \nvx [red], vy [green], vz [blue]", y=title_position)
plt.plot(angular_velocities_filtered[:, 0], c='r')
plt.plot(angular_velocities_filtered[:, 1], c='g')
plt.plot(angular_velocities_filtered[:, 2], c='b')
plt.subplots_adjust(left=0.025, right=0.975, top=0.8, bottom=0.05)
if plt.get_backend() == 'TkAgg':
mng = plt.get_current_fig_manager()
max_size = mng.window.maxsize()
max_size = (max_size[0], max_size[1] * 0.45)
mng.resize(*max_size)
plt.show(block=block)
def plot(self):
from matplotlib.pylab import show, plot, stem
pass
def run_regression_1D_collapsed():
np.random.seed(42)
print "create dataset ..."
Xtrain, ytrain, Xtest, ytest = create_dataset()
alphas = [0.001, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 1]
for alpha in alphas:
M = 20
model = vfe.SGPR_collapsed(Xtrain, ytrain, M)
model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000, disp=False)
my, vy = model.predict_y(Xtest, alpha)
my = np.reshape(my, ytest.shape)
vy = np.reshape(vy, ytest.shape)
rmse = np.sqrt(np.mean((my - ytest)**2))
ll = np.mean(-0.5 * np.log(2 * np.pi * vy) - 0.5 * (ytest - my)**2 / vy)
nlml, _ = model.objective_function(model.get_hypers(), alpha)
print 'alpha=%.3f, train ml=%3f, test rmse=%.3f, ll=%.3f' % (alpha, nlml, rmse, ll)
# plot(model, Xtrain, ytrain)
# plt.show()
# should produce something like this
# alpha=0.001, train ml=-64.573021, test rmse=0.169, ll=0.348
# alpha=0.100, train ml=-64.616618, test rmse=0.169, ll=0.348
# alpha=0.200, train ml=-64.626655, test rmse=0.169, ll=0.348
# alpha=0.300, train ml=-64.644053, test rmse=0.169, ll=0.348
# alpha=0.500, train ml=-64.756588, test rmse=0.169, ll=0.348
# alpha=0.700, train ml=-68.755871, test rmse=0.169, ll=0.350
# alpha=0.800, train ml=-72.153441, test rmse=0.167, ll=0.349
# alpha=1.000, train ml=-71.305002, test rmse=0.169, ll=0.303
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='adam', alpha=1.0,
maxiter=50000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpr_1D_stoc.pdf')
def run_cluster_MC():
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
np.random.seed(42)
print "inference ..."
M = 30
D = 5
alpha = 0.5
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000, prop_mode=config.PROP_MC)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
# plt.show()
plt.savefig('/tmp/gplvm_cluster_MC.pdf')
def run_pinwheel():
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
rs=np.random.RandomState(0)):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes * num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:, 0])
rotations = np.stack([np.cos(angles), -np.sin(angles),
np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum('ti,tij->tj', features, rotations)
# create dataset
print "creating dataset..."
Y = make_pinwheel(radial_std=0.3, tangential_std=0.05, num_classes=3,
num_per_class=50, rate=0.4)
# inference
print "inference ..."
M = 20
D = 2
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B')
mx, vx = lvm.get_posterior_x()
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(Y[:, 0], Y[:, 1], 'bx')
ax = fig.add_subplot(122)
ax.errorbar(mx[:, 0], mx[:, 1], xerr=np.sqrt(
vx[:, 0]), yerr=np.sqrt(vx[:, 1]), fmt='xk')
plt.show()
def run_semicircle():
# create dataset
print "creating dataset..."
N = 20
cos_val = [0.97, 0.95, 0.94, 0.89, 0.8,
0.88, 0.92, 0.96, 0.7, 0.65,
0.3, 0.25, 0.1, -0.25, -0.3,
-0.6, -0.67, -0.75, -0.97, -0.98]
cos_val = np.array(cos_val).reshape((N, 1))
# cos_val = 2*np.random.rand(N, 1) - 1
angles = np.arccos(cos_val)
sin_val = np.sin(angles)
Y = np.hstack((sin_val, cos_val))
Y += 0.05 * np.random.randn(Y.shape[0], Y.shape[1])
# inference
print "inference ..."
M = 10
D = 2
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B', maxiter=2000)
# lvm.optimise(method='adam', maxiter=2000)
plt.figure()
plt.plot(Y[:, 0], Y[:, 1], 'sb')
mx, vx = lvm.get_posterior_x()
for i in range(mx.shape[0]):
mxi = mx[i, :]
vxi = vx[i, :]
mxi1 = mxi + np.sqrt(vxi)
mxi2 = mxi - np.sqrt(vxi)
mxis = np.vstack([mxi.reshape((1, D)),
mxi1.reshape((1, D)),
mxi2.reshape((1, D))])
myis, vyis = lvm.predict_f(mxis)
plt.errorbar(myis[:, 0], myis[:, 1],
xerr=np.sqrt(vyis[:, 0]), yerr=np.sqrt(vyis[:, 1]), fmt='.k')
plt.show()
def run_frey():
# import dataset
data = pods.datasets.brendan_faces()
# Y = data['Y'][:50, :]
Y = data['Y']
Yn = Y - np.mean(Y, axis=0)
Yn /= np.std(Y, axis=0)
Y = Yn
# inference
print "inference ..."
M = 30
D = 20
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B', maxiter=10)
plt.figure()
mx, vx = lvm.get_posterior_x()
zu = lvm.sgp_layer.zu
plt.scatter(mx[:, 0], mx[:, 1])
plt.plot(zu[:, 0], zu[:, 1], 'ko')
nx = ny = 30
x_values = np.linspace(-5, 5, nx)
y_values = np.linspace(-5, 5, ny)
sx = 28
sy = 20
canvas = np.empty((sx * ny, sy * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
x_mean, x_var = lvm.predict_f(z_mu)
canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
sy:(j + 1) * sy] = x_mean.reshape(sx, sy)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.show()
def run_step_1D_collapsed():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X)
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx, alpha)
zu = m.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var),
mean[:, 0] + 2 * np.sqrt(var),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
# no_samples = 20
# f_samples = m.sample_f(xx, no_samples)
# for i in range(no_samples):
# plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
alpha = 0.01
model = vfe.SGPR_collapsed(X, Y, M)
model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000)
plot(model)
plt.show()