def plot_x_y_yhat(x, y, y_hat, xsz, ysz, binz=False):
"""Plot x, y and y_hat side by side."""
plt.close("all")
f = plt.figure(figsize=(15, 10.8), dpi=300)
gs = gridspec.GridSpec(1, 3)
if binz:
y_hat = (y_hat > 0.5) * 1.
ims = [x, y, y_hat]
tils = [
"x:" + str(xsz) + "x" + str(xsz),
"y:" + str(ysz) + "x" + str(ysz),
"yhat:" + str(ysz) + "x" + str(ysz)]
for n, ti in zip([0, 1, 2], tils):
f.add_subplot(gs[n])
if n == 0:
plt.imshow(ims[n], cmap=cm.Greys_r)
else:
plt.imshow(ims[n], cmap=cm.Greys_r)
plt.title(ti)
return f
python类figure()的实例源码
def plot_roc(y_test, y_pred, label=''):
"""Compute ROC curve and ROC area"""
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic' + label)
plt.legend(loc="lower right")
plt.show()
def plot_feat_importance(feature_names, clf, name):
pylab.figure(num=None, figsize=(6, 5))
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.figure(num=None, figsize=(6, 5))
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def CO_ratio(self,ifig,ixaxis):
"""
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model'
"""
def C_O(model):
surface_c12=model.get('surface_c12')
surface_o16=model.get('surface_o16')
CORatio=old_div((surface_c12*4.),(surface_o16*3.))
return CORatio
if ixaxis=='time':
xax=self.get('star_age')
elif ixaxis=='model':
xax=self.get('model_number')
else:
raise IOError("ixaxis not recognised")
pl.figure(ifig)
pl.plot(xax,C_O(self))
def t_lumi(self,num_frame,xax):
"""
Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logLH = self.get('log_LH')
logLHe = self.get('log_LHe')
pyl.plot(xaxisarray,logLH,label='L_(H)')
pyl.plot(xaxisarray,logLHe,label='L(He)')
pyl.ylabel('log L')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def t_surf_parameter(self, num_frame, xax):
"""
Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def example_plot1():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
x = np.linspace(1., 8., 30)
ax.set_title('Title!')
ax.plot(x, x ** 1.5, color='k', ls='solid', label='line 1')
ax.plot(x, 20/x, color='0.50', ls='dashed', label='line 2')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Temperature (K)')
ax.legend(loc='upper left')
fig.tight_layout()
return [fig], ['example_1']
# Should make an OO example where __init__ sets up data, then methods plot it different ways. Should be able to just pass methods along...
def rasta_plp_extractor(x, sr, plp_order=0, do_rasta=True):
spec = log_power_spectrum_extractor(x, int(sr*0.02), int(sr*0.01), 'hamming', False)
bark_filters = int(np.ceil(freq2bark(sr//2)))
wts = get_fft_bark_mat(sr, int(sr*0.02), bark_filters)
'''
plt.figure()
plt.subplot(211)
plt.imshow(wts)
plt.subplot(212)
plt.hold(True)
for i in range(18):
plt.plot(wts[i, :])
plt.show()
'''
bark_spec = np.matmul(wts, spec)
if do_rasta:
bark_spec = np.where(bark_spec == 0.0, np.finfo(float).eps, bark_spec)
log_bark_spec = np.log(bark_spec)
rasta_log_bark_spec = rasta_filt(log_bark_spec)
bark_spec = np.exp(rasta_log_bark_spec)
post_spec = postaud(bark_spec, sr/2.)
if plp_order > 0:
lpcas = do_lpc(post_spec, plp_order)
# lpcas = do_lpc(spec, plp_order) # just for test
else:
lpcas = post_spec
return lpcas
def plot(l, samp, w1, w2, cor):
time_range = numpy.arange(0, l) * (1.0 / samp)
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, w1)
pl.subplot(212)
pl.plot(time_range, w2, c="r")
pl.xlabel("time")
pl.figure(2)
pl.plot(time_range, cor)
pl.show()
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[1:3]
nframes = w1.shape[0]
# Cut one channel in the tail, while the other in the head,
# to guarantee same length and first delays second.
cut_time_len = 0.2 # second
cut_len = int(cut_time_len * sampling)
wp1 = w1[:-cut_len]
wp2 = w2[cut_len:]
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes - cut_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[0:2]
nframes = w1.shape[0]
# Pad one channel in the head, while the other in the tail,
# to guarantee same length.
pad_time_len = 0.01 # second
pad_len = int(pad_time_len * sampling)
pad_arr = numpy.zeros(pad_len)
wp1 = numpy.concatenate((pad_arr, w1))
wp2 = numpy.concatenate((w2, pad_arr))
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes + pad_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def plot_channel(audio, sampling):
channels, nframes = audio.shape[0], audio.shape[1]
time_range = numpy.arange(0, nframes) * (1.0 / sampling)
for i in range(1, channels + 1):
pl.figure(i)
pl.plot(time_range, audio[i - 1])
pl.xlabel("time{0}".format(i))
pl.show()
time_alignment_plotting_tools.py 文件源码
项目:hand_eye_calibration
作者: ethz-asl
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def plot_angular_velocities(title,
angular_velocities,
angular_velocities_filtered,
block=True):
fig = plt.figure()
title_position = 1.05
fig.suptitle(title, fontsize='24')
a1 = plt.subplot(1, 2, 1)
a1.set_title(
"Angular Velocities Before Filtering \nvx [red], vy [green], vz [blue]",
y=title_position)
plt.plot(angular_velocities[:, 0], c='r')
plt.plot(angular_velocities[:, 1], c='g')
plt.plot(angular_velocities[:, 2], c='b')
a2 = plt.subplot(1, 2, 2)
a2.set_title(
"Angular Velocities After Filtering \nvx [red], vy [green], vz [blue]", y=title_position)
plt.plot(angular_velocities_filtered[:, 0], c='r')
plt.plot(angular_velocities_filtered[:, 1], c='g')
plt.plot(angular_velocities_filtered[:, 2], c='b')
plt.subplots_adjust(left=0.025, right=0.975, top=0.8, bottom=0.05)
if plt.get_backend() == 'TkAgg':
mng = plt.get_current_fig_manager()
max_size = mng.window.maxsize()
max_size = (max_size[0], max_size[1] * 0.45)
mng.resize(*max_size)
plt.show(block=block)
experiment_3_mondrian_kernel_vs_forest.py 文件源码
项目:mondrian-kernel
作者: matejbalog
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def plot_mondrian_kernel_vs_mondrian_forest(lifetime_max, res):
""" Plots training and test set error of Mondrian kernel and Mondrian forest based on the same set of M Mondrian samples.
This procedure takes as input a dictionary res, returned by the evaluate_all_lifetimes procedure in mondrian_kernel.py.
"""
times = res['times']
forest_train = res['forest_train']
forest_test = res['forest_test']
kernel_train = res['kernel_train']
kernel_test = res['kernel_test']
# set up test error plot
fig = plt.figure(figsize=(7, 4))
ax = fig.add_subplot('111')
remove_chartjunk(ax)
ax.set_xlabel('lifetime $\lambda$')
ax.set_ylabel('relative error [\%]')
ax.yaxis.grid(b=True, which='major', linestyle='dotted', lw=0.5, color='black', alpha=0.3)
ax.set_xscale('log')
ax.set_xlim((1e-8, lifetime_max))
ax.set_ylim((0, 25))
rasterized = False
ax.plot(times, forest_test, drawstyle="steps-post", ls='-', lw=2, color=tableau20(6), label='"M. forest" (test)', rasterized=rasterized)
ax.plot(times, forest_train, drawstyle="steps-post", ls='-', color=tableau20(7), label='"M. forest" (train)', rasterized=rasterized)
ax.plot(times, kernel_test, drawstyle="steps-post", ls='-', lw=2, color=tableau20(4), label='M. kernel (test)', rasterized=rasterized)
ax.plot(times, kernel_train, drawstyle="steps-post", ls='-', color=tableau20(5), label='M. kernel (train)', rasterized=rasterized)
ax.legend(bbox_to_anchor=[1.15, 1.05], frameon=False)
experiment_3_mondrian_kernel_vs_forest.py 文件源码
项目:mondrian-kernel
作者: matejbalog
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def plot_kernel_vs_forest_weights(y, res):
""" Plots the weights learned by Mondrian kernel and Mondrian forest based on the same set of M Mondrian samples.
This procedure takes as input a dictionary res, returned by the evaluate_all_lifetimes procedure in mondrian_kernel.py.
"""
w_forest = res['w_forest']
w_kernel = res['w_kernel']
# plot weights against each other
fig1 = plt.figure(figsize=(8, 4))
ax1 = fig1.add_subplot('121')
ax1.set_xlabel('weights learned by "Mondrian forest"')
ax1.set_ylabel('weights learned by Mondrian kernel')
ax1.scatter(w_forest, w_kernel, marker='.', color=tableau20(16))
xl = ax1.get_xlim()
yl = ax1.get_ylim()
lims = [
np.min([xl, yl]), # min of both axes
np.max([xl, yl]), # max of both axes
]
ax1.plot(lims, lims, '--', color='black', alpha=0.75, zorder=0)
ax1.set_xlim(xl)
#ax1.set_ylim(yl)
ax1.set_ylim((-60, 60))
# plot histogram of weight values (and training targets)
ax2 = fig1.add_subplot('122')
ax2.set_xlabel('values')
ax2.set_ylabel('value frequency')
bins = np.linspace(-100, 20, 50)
ax2.hist(w_forest, bins=bins, histtype='stepfilled', normed=True, color=tableau20(6), alpha=0.5,
label='M. forest weights $\mathbf{w}$')
ax2.hist(w_kernel, bins=bins, histtype='stepfilled', normed=True, color=tableau20(4), alpha=0.5,
label='M. kernel weights $\mathbf{w}$')
ax2.hist(y - np.mean(y), bins=bins, histtype='stepfilled', normed=True, color=tableau20(8), alpha=0.5,
label='training targets $\mathbf{y}$')
ax2.set_ylim((0.0, 0.16))
ax2.legend(frameon=False, loc='upper left')
fig1.tight_layout()
def plot_latent(model, y, plot_title=''):
# make prediction on some test inputs
N_test = 300
C = model.get_hypers()['C_emission'][0, 0]
x_test = np.linspace(-10, 8, N_test) / C
x_test = np.reshape(x_test, [N_test, 1])
if isinstance(model, aep.SGPSSM) or isinstance(model, vfe.SGPSSM):
zu = model.dyn_layer.zu
else:
zu = model.sgp_layer.zu
mu, vu = model.predict_f(zu)
# mu, Su = model.dyn_layer.mu, model.dyn_layer.Su
mf, vf = model.predict_f(x_test)
my, vy = model.predict_y(x_test)
# plot function
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.plot(x_test[:,0], kink_true(x_test[:,0]), '-', color='k')
ax.plot(C*x_test[:,0], my[:,0], '-', color='r', label='y')
ax.fill_between(
C*x_test[:,0],
my[:,0] + 2*np.sqrt(vy[:, 0]),
my[:,0] - 2*np.sqrt(vy[:, 0]),
alpha=0.2, edgecolor='r', facecolor='r')
ax.plot(
y[0:model.N-1],
y[1:model.N],
'r+', alpha=0.5)
mx, vx = model.get_posterior_x()
ax.set_xlabel(r'$x_{t-1}$')
ax.set_ylabel(r'$x_{t}$')
plt.title(plot_title)
plt.savefig('/tmp/lincos_'+plot_title+'.png')
# generate a dataset from the lincos function above
def plot_latent(model, y, plot_title=''):
# make prediction on some test inputs
N_test = 200
C = model.get_hypers()['C_emission'][0, 0]
x_test = np.linspace(-4, 6, N_test) / C
x_test = np.reshape(x_test, [N_test, 1])
zu = model.dyn_layer.zu
mu, vu = model.predict_f(zu)
# mu, Su = model.dyn_layer.mu, model.dyn_layer.Su
mf, vf = model.predict_f(x_test)
my, vy = model.predict_y(x_test)
# plot function
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.plot(x_test[:,0], kink_true(x_test[:,0]), '-', color='k')
ax.plot(C*x_test[:,0], my[:,0], '-', color='r', label='y')
ax.fill_between(
C*x_test[:,0],
my[:,0] + 2*np.sqrt(vy[:, 0]),
my[:,0] - 2*np.sqrt(vy[:, 0]),
alpha=0.2, edgecolor='r', facecolor='r')
# ax.plot(zu, mu, 'ob')
# ax.errorbar(zu, mu, yerr=3*np.sqrt(vu), fmt='ob')
# ax.plot(x_test[:,0], mf[:,0], '-', color='b')
# ax.fill_between(
# x_test[:,0],
# mf[:,0] + 2*np.sqrt(vf[:,0]),
# mf[:,0] - 2*np.sqrt(vf[:,0]),
# alpha=0.2, edgecolor='b', facecolor='b')
ax.plot(
y[0:model.N-1],
y[1:model.N],
'r+', alpha=0.5)
mx, vx = model.get_posterior_x()
ax.set_xlabel(r'$x_{t-1}$')
ax.set_ylabel(r'$x_{t}$')
ax.set_xlim([-4, 6])
# ax.set_ylim([-7, 7])
plt.title(plot_title)
# plt.savefig('/tmp/kink_'+plot_title+'.pdf')
plt.savefig('/tmp/kink_'+plot_title+'.png')
def plot_prediction_MC(model, y_train, y_test, plot_title=''):
T = y_test.shape[0]
x_samples, my, vy = model.predict_forward(T, prop_mode=PROP_MC)
T_train = y_train.shape[0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(T_train), y_train[:, 0], 'k+-')
ttest = np.arange(T_train, T_train+T)
ttest = np.reshape(ttest, [T, 1])
loglik, ranks = compute_log_lik(np.exp(2*model.sn), y_test, my[:, :, 0].T)
red = 0.1
green = 0. * red
blue = 1. - red
color = np.array([red, green, blue]).T
for k in np.argsort(ranks):
ax.plot(ttest, my[:, k, 0], '-', color=color*ranks[k], alpha=0.5)
# ax.plot(np.tile(ttest, [1, my.shape[1]]), my[:, :, 0], '-x', color='r', alpha=0.3)
# ax.plot(np.tile(ttest, [1, my.shape[1]]), x_samples[:, :, 0], 'x', color='m', alpha=0.3)
ax.plot(ttest, y_test, 'ro')
ax.set_xlim([T_train-5, T_train + T])
plt.title(plot_title)
plt.savefig('/tmp/kink_pred_MC_'+plot_title+'.pdf')
# plt.savefig('/tmp/kink_pred_MC_'+plot_title+'.png')
# generate a dataset from the kink function above