def plot_volcano(logFC,p_val,sample_name,saveName,logFC_thresh):
fig=pl.figure()
## To plot and save
pl.scatter(logFC[(p_val>0.05)|(abs(logFC)<logFC_thresh)],-np.log10(p_val[(p_val>0.05)|(abs(logFC)<logFC_thresh)]),color='blue',alpha=0.5);
pl.scatter(logFC[(p_val<0.05)&(abs(logFC)>logFC_thresh)],-np.log10(p_val[(p_val<0.05)&(abs(logFC)>logFC_thresh)]),color='red');
pl.hlines(-np.log10(0.05),min(logFC),max(logFC))
pl.vlines(-logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.vlines(logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.xlim(-3,3)
pl.xlabel('Log Fold Change')
pl.ylabel('-log10(p-value)')
pl.savefig(saveName)
pl.close(fig)
# def plot_histograms(df_peaks,pntr_list):
#
# for pntr in pntr_list:
# colName =pntr[2]+'_Intragenic_position'
# pl.hist(df_peaks[colName])
# pl.xlabel(colName)
# pl.ylabel()
# pl.show()
python类scatter()的实例源码
def test_plot_error_ellipse(self):
# Generate random data
x = np.random.normal(0, 1, 300)
s = np.array([2.0, 2.0])
y1 = np.random.normal(s[0] * x)
y2 = np.random.normal(s[1] * x)
data = np.array([y1, y2])
# Calculate covariance and plot error ellipse
cov = np.cov(data)
plot_error_ellipse([0.0, 0.0], cov)
debug = False
if debug:
plt.scatter(data[0, :], data[1, :])
plt.xlim([-8, 8])
plt.ylim([-8, 8])
plt.show()
plt.clf()
def gp_partd(Xtrain,ytrain,Xtest,ytest):
gp = gaussian_process(Xtrain[:,3],ytrain,Xtrain[:,3],ytrain)
gp.init_kernel_matrices(b=5,var=2)
gp.predict_test()
x = np.asarray(Xtrain[:,3]).flatten()
xsortind = np.argsort(x)
y1 = np.asarray(ytrain).flatten()
y2 = np.asarray(gp.test_predictions).flatten()
plt.figure()
plt.scatter(x[xsortind],y1[xsortind])
plt.plot(x[xsortind],y2[xsortind],'b-')
plt.xlabel('Car Weight (Dimension 4)')
plt.ylabel('Outcome')
plt.title('Visualizing model through single dimension')
plt.savefig('hw3_gaussian_dim4_viz')
plt.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,L])
ax.set_ylim([0,L])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,L])
ax.set_ylim([0,L])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
draw.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
def run_cluster_MM(nat_param=True):
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
np.random.seed(42)
print "inference ..."
M = 30
D = 5
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian', nat_param=nat_param)
lvm.optimise(method='L-BFGS-B', maxiter=20)
# lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
# plt.show()
plt.savefig('/tmp/gplvm_cluster_MM.pdf')
def run_cluster_MC():
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
np.random.seed(42)
print "inference ..."
M = 30
D = 5
alpha = 0.5
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000, prop_mode=config.PROP_MC)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
# plt.show()
plt.savefig('/tmp/gplvm_cluster_MC.pdf')
def run_frey():
# import dataset
data = pods.datasets.brendan_faces()
# Y = data['Y'][:50, :]
Y = data['Y']
Yn = Y - np.mean(Y, axis=0)
Yn /= np.std(Y, axis=0)
Y = Yn
# inference
print "inference ..."
M = 30
D = 20
lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='L-BFGS-B', maxiter=10)
plt.figure()
mx, vx = lvm.get_posterior_x()
zu = lvm.sgp_layer.zu
plt.scatter(mx[:, 0], mx[:, 1])
plt.plot(zu[:, 0], zu[:, 1], 'ko')
nx = ny = 30
x_values = np.linspace(-5, 5, nx)
y_values = np.linspace(-5, 5, ny)
sx = 28
sy = 20
canvas = np.empty((sx * ny, sy * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
x_mean, x_var = lvm.predict_f(z_mu)
canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
sy:(j + 1) * sy] = x_mean.reshape(sx, sy)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.show()
def run_cluster_MC():
import GPy
# create dataset
print "creating dataset..."
N = 100
k1 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 10, 10, 0.1, 0.1]), ARD=True)
k2 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[10, 0.1, 10, 0.1, 10]), ARD=True)
k3 = GPy.kern.RBF(5, variance=1, lengthscale=1. /
np.random.dirichlet(np.r_[0.1, 0.1, 10, 10, 10]), ARD=True)
X = np.random.normal(0, 1, (N, 5))
A = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T
B = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T
C = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T
Y = np.vstack((A, B, C))
labels = np.hstack((np.zeros(A.shape[0]), np.ones(
B.shape[0]), np.ones(C.shape[0]) * 2))
# inference
print "inference ..."
M = 30
D = 5
alpha = 0.5
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
lvm.optimise(method='adam', adam_lr=0.05, maxiter=2000,
alpha=alpha, prop_mode=config.PROP_MC)
ls = np.exp(lvm.sgp_layer.ls)
print ls
inds = np.argsort(ls)
plt.figure()
mx, vx = lvm.get_posterior_x()
plt.scatter(mx[:, inds[0]], mx[:, inds[1]], c=labels)
zu = lvm.sgp_layer.zu
# plt.plot(zu[:, inds[0]], zu[:, inds[1]], 'ko')
# plt.show()
plt.savefig('/tmp/gplvm_cluster.pdf')
def run_frey():
# import dataset
data = pods.datasets.brendan_faces()
# Y = data['Y'][:50, :]
Y = data['Y']
Yn = Y - np.mean(Y, axis=0)
Yn /= np.std(Y, axis=0)
Y = Yn
# inference
print "inference ..."
M = 30
D = 20
lvm = aep.SGPLVM(Y, D, M, lik='Gaussian')
# lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn'])
lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=10)
plt.figure()
mx, vx = lvm.get_posterior_x()
zu = lvm.sgp_layer.zu
plt.scatter(mx[:, 0], mx[:, 1])
plt.plot(zu[:, 0], zu[:, 1], 'ko')
nx = ny = 30
x_values = np.linspace(-5, 5, nx)
y_values = np.linspace(-5, 5, ny)
sx = 28
sy = 20
canvas = np.empty((sx * ny, sy * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]])
x_mean, x_var = lvm.predict_f(z_mu)
canvas[(nx - i - 1) * sx:(nx - i) * sx, j *
sy:(j + 1) * sy] = x_mean.reshape(sx, sy)
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
pylab.show()
def plot_trajectory(state_true, state_estimated):
plt.plot(state_true[:, 0], state_true[:, 2], color="red")
plt.scatter(state_estimated[:, 0].tolist()[::10],
state_estimated[:, 2].tolist()[::10],
marker="o",
color="blue")
def test_compute_inliers(self):
sample = self.ransac.sample(self.data)
dist = self.ransac.compute_distance(sample, self.data)
self.ransac.compute_inliers(dist)
# def test_optimize(self):
# debug = False
#
# for i in range(10):
# m_pred, c_pred, mask = self.ransac.optimize(self.data)
# if debug:
# print("m_true: ", self.m_true)
# print("m_pred: ", m_pred)
# print("c_true: ", self.c_true)
# print("c_pred: ", c_pred)
#
# self.assertTrue(abs(m_pred - self.m_true) < 0.5)
# self.assertTrue(abs(c_pred - self.c_true) < 0.5)
#
# # Plot RANSAC optimized result
# debug = False
# if debug:
# x = np.linspace(0.0, 10.0, num=100)
# y = m_pred * x + c_pred
# plt.scatter(self.data[0, :], self.data[1, :])
# plt.plot(x, y)
# plt.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
demo_mi.py 文件源码
项目:Building-Machine-Learning-Systems-With-Python-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def _plot_mi_func(x, y):
mi = mutual_info(x, y)
title = "NI($X_1$, $X_2$) = %.3f" % mi
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
demo_corr.py 文件源码
项目:Building-Machine-Learning-Systems-With-Python-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
def draw(name, feature4096):
plt.figure(name)
feature4096 = feature4096.reshape(64,64)
for i, x in enumerate(feature4096):
for j, y in enumerate(x):
#if y <= 0:
# print ' ',
#else:
# print '%3.1f'%y,
plt.scatter([j],[i], s=[y*1000])
#print
plt.axis([-1, 65, -1, 65])
plt.show()
vis_hyperspectral.py 文件源码
项目:hyperspectral-framework
作者: mihailoobrenovic
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def vis_data(data,classes):
X_embedded = TSNE(n_components=2, perplexity=40, verbose=2).fit_transform(data)
plt.figure()
colors = cm.rainbow(np.linspace(0, 1, 17))
for i in range(17):
ind = np.where(classes==i)
plt.scatter(X_embedded[ind,0],X_embedded[ind,1],color = colors[i],marker ='x',label = i)
plt.legend()
# Raw data
def plotBestFit(weights):
import matplotlib.pylab as plt
import seaborn as sns
dataMat,labelMat=loadDataSet()
dataArr=array(dataMat)
n=shape(dataArr)[0]
xcord1=[]
ycord1=[]
xcord2=[]
ycord2=[]
for i in range(n):
if int(labelMat[i])==1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])
# fig=plt.figure
plt.scatter(xcord1,ycord1,s=30,c="red",marker="s",label="X1")
plt.scatter(xcord2,ycord2,s=30,c="green",label="X2")
x=arange(-3.0,3.0,0.1)
y=(-float(weights[0])-float(weights[1])*x)/float(weights[2])
plt.plot(x,y,c="purple",label="fitted line")
plt.legend()
plt.xlabel("X1")
plt.ylabel("X2")
plt.show()
def draw_pic(data_list_set):
for i in range(len(data_list_set)):
parse_x=[]
parse_y=[]
for j in range(len(data_list_set[i])):
parse_x.append(data_list_set[i][j].x)
parse_y.append(data_list_set[i][j].y)
plt.scatter(parse_x,parse_y,c=numpy.random.rand(3,1),alpha=0.65,label="Team:"+str(i),s=40)
plt.legend()
plt.title("The Result From The Cluster")
plt.show()
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()