def plotalignment(A, nbins=200, M=None, rng=None, doclf=True, docolorbar=True,
docutcircle=True, docontours=True, dologhist=False,
doaxlines=False, imshowargs={}):
import pylab as plt
from astrometry.util.plotutils import plothist, loghist
if doclf:
plt.clf()
if M is None:
M = A.match
if dologhist:
f = loghist
else:
f = plothist
H,xe,ye = f(M.dra_arcsec*1000., M.ddec_arcsec*1000., nbins,
range=rng, doclf=doclf, docolorbar=docolorbar,
imshowargs=imshowargs)
ax = plt.axis()
if A is not None:
# The EM fit is based on a subset of the matches;
# draw the subset cut circle.
if docutcircle:
angle = np.linspace(0, 2.*pi, 360)
plt.plot((A.cutcenter[0] + A.cutrange * np.cos(angle))*1000.,
(A.cutcenter[1] + A.cutrange * np.sin(angle))*1000., 'r-')
if docontours:
for i,c in enumerate(['b','c','g']*2):
if i == A.ngauss:
break
for nsig in [1,2]:
XY = A.getContours(nsig, c=i)
if XY is None:
break
X,Y = XY
plt.plot(X*1000., Y*1000., '-', color=c)#, alpha=0.5)
if doaxlines:
plt.axhline(0., color='b', alpha=0.5)
plt.axvline(0., color='b', alpha=0.5)
plt.axis(ax)
plt.xlabel('dRA (mas)')
plt.ylabel('dDec (mas)')
return H,xe,ye
python类axis()的实例源码
def _solve_lr(vlines, w, l, opt_options=OPTIMIZATION_OPTIONS, opt_method=OPTIMIZATION_METHOD, limit=0.3):
""" Solve for the left and right edge displacement.
This routine estimates the amount to move the upper left and right cornders of the image
in a horizontal direction in order to make the given lines parallel and vertical.
:param vlines: Lines that we want to map to vertical lines.
:param w: The width of the image
:param l: The height of the image
:param opt_options: Optimization options passed into `minimize`
:param opt_method: The optimization method.
:param limit: A limit on the amount of displacement -- beyond this and we will assume failure.
:return: (dl, dr), the horizontal displacement of the left and right corners.
"""
if len(vlines) == 0:
return 0, 0
a = np.append(vlines[:, 0, :], np.ones((len(vlines), 1)), axis=1)
b = np.append(vlines[:, 1, :], np.ones((len(vlines), 1)), axis=1)
def objective(x):
dl, dr = x
Hv = np.linalg.inv(H_v(dl, dr, w, l))
return np.sum(np.abs(Hv[0, :].dot(a.T) / Hv[2, :].dot(a.T) - Hv[0, :].dot(b.T) / Hv[2, :].dot(b.T)))
res = minimize(objective, (0., 0.),
options=opt_options,
method=opt_method)
dl, dr = res.x
# Give up if the solution is not plausible (this indicates that the 'vlines' are too noisy
if abs(dl) > limit * w:
dl = 0
if abs(dr) > limit * w:
dr = 0
return dl, dr
def _solve_ud(hlines, dl, dr, w, l, opt_options=OPTIMIZATION_OPTIONS, opt_method=OPTIMIZATION_METHOD, limit=0.3):
""" Solve for the left top and bottom edge displacement.
This routine estimates the amount to move the upper left and lower left corners of the image
in a vertical direction in order to make the given lines parallel and horizontal.
:param hlines: Lines that we want to map to horizontal lines.
:param w: The width of the image
:param l: The height of the image
:param opt_options: Optimization options passed into `minimize`
:param opt_method: The optimization method.
:param limit: A limit on the amount of displacement -- beyond this and we will assume failure.
It is expressed as a fraction of the image height.
:return: (dl, dr), the horizontal displacement of the left and right corners.
"""
if len(hlines) == 0:
return 0, 0
a = np.append(hlines[:, 0, :], np.ones((len(hlines), 1)), axis=1)
b = np.append(hlines[:, 1, :], np.ones((len(hlines), 1)), axis=1)
Hv = np.linalg.inv(H_v(dl, dr, w, l))
a = Hv.dot(a.T).T
b = Hv.dot(b.T).T
def objective(x):
du, dd = x
Hh = np.linalg.inv(H_h(du, dd, w, l))
return np.sum(np.abs(Hh[1, :].dot(a.T) / Hh[2, :].dot(a.T) - Hh[1, :].dot(b.T) / Hh[2, :].dot(b.T)))
res = minimize(objective, (0., 0.),
options=opt_options,
method=opt_method)
du, dd = res.x
# Give up if the result is not plausible. We are better off nor warping.
if abs(du) > limit * l:
du = 0
if abs(dd) > limit * l:
dd = 0
return du, dd
def _cut_windows_horizontally(self, s, win_strip):
win_horizontal_scores = []
if len(self.heights) > 0:
win_horizontal_scores = np.percentile(win_strip[self.top:self.bottom], 85, axis=0)
runs, starts, values = run_length_encode(win_horizontal_scores > 0.5)
starts += s
win_widths = runs[np.atleast_1d(values)]
win_widths = np.atleast_1d(win_widths)
win_lefts = np.atleast_1d(starts[values])
if len(win_widths) > 0:
win_left = win_lefts[0]
win_right = win_lefts[-1] + win_widths[-1]
win_horizontal_spacing = np.diff(win_lefts).mean() if len(win_lefts) > 1 else 0
# win_width = win_widths.mean()
else:
win_left = win_right = win_horizontal_spacing = -1 # win_width = -1
else:
win_widths = win_lefts = []
win_left = win_right = win_horizontal_spacing = -1
self.horizontal_spacing = int(win_horizontal_spacing)
self.left = int(win_left)
self.right = int(win_right)
self.horizontal_scores = win_horizontal_scores
self.lefts = np.array(win_lefts)
self.widths = np.array(win_widths)
def _create_mini_facade(self, left, right, wall_colors):
door_strip = i12.door(self.facade_layers)[:, left:right].copy()
shop_strip = i12.shop(self.facade_layers)[:, left:right]
door_strip = np.max((door_strip, shop_strip), axis=0)
win_strip = self.window_scores[:, left:right].copy()
sky_strip = self._sky_mask[:, left:right].copy()
rgb_strip = wall_colors[:, left:right]
win_strip[:, :1] = win_strip[:, -1:] = 0 # edge effects
sky_strip[:, :1] = sky_strip[:, -1:] = 0 # edge effects
facade = FacadeCandidate(self, left, right, sky_strip, door_strip, win_strip, rgb_strip)
facade.find_regions(self.facade_layers)
return facade
def plot_facade_cuts(self):
facade_sig = self.facade_edge_scores.sum(0)
facade_cuts = find_facade_cuts(facade_sig, dilation_amount=self.facade_merge_amount)
mu = np.mean(facade_sig)
sigma = np.std(facade_sig)
w = self.rectified.shape[1]
pad=10
gs1 = pl.GridSpec(5, 5)
gs1.update(wspace=0.5, hspace=0.0) # set the spacing between axes.
pl.subplot(gs1[:3, :])
pl.imshow(self.rectified)
pl.vlines(facade_cuts, *pl.ylim(), lw=2, color='black')
pl.axis('off')
pl.xlim(-pad, w+pad)
pl.subplot(gs1[3:, :], sharex=pl.gca())
pl.fill_between(np.arange(w), 0, facade_sig, lw=0, color='red')
pl.fill_between(np.arange(w), 0, np.clip(facade_sig, 0, mu+sigma), color='blue')
pl.plot(np.arange(w), facade_sig, color='blue')
pl.vlines(facade_cuts, facade_sig[facade_cuts], pl.xlim()[1], lw=2, color='black')
pl.scatter(facade_cuts, facade_sig[facade_cuts])
pl.axis('off')
pl.hlines(mu, 0, w, linestyle='dashed', color='black')
pl.text(0, mu, '$\mu$ ', ha='right')
pl.hlines(mu + sigma, 0, w, linestyle='dashed', color='gray',)
pl.text(0, mu + sigma, '$\mu+\sigma$ ', ha='right')
pl.xlim(-pad, w+pad)
def __call__(self, X, Y=None):
XX = np.sum(X * X, axis=1)[:,np.newaxis]
if Y is None:
Y = X
YY = XX.T
else:
YY = np.sum(Y * Y, axis=1)[np.newaxis,:]
distances = XX + YY # Using broadcasting
distances -= 2 * np.dot(X, Y.T)
distances = np.maximum(distances, 0)
return np.exp(- self.gamma * distances)
def drop_num_features(df):
# Drop all categorical feature helping columns ('Num')
# Todo: is it defined when importing data set? _feature_names_num
for feature_name in HousePrices._feature_names_num:
df = df.drop([feature_name], axis=1)
return df
def features_with_null_logical(df, axis=1):
row_length = len(df._get_axis(0))
# Axis to count non null values in. aggregate_axis=0 implies counting for every feature
aggregate_axis = 1 - axis
features_non_null_series = df.count(axis=aggregate_axis)
# Whenever count() differs from row_length it implies a null value exists in feature column and a False in mask
mask = row_length == features_non_null_series
return mask
def svm_figure_generate(w, b, support_vectors, X):
k = - w[0]/w[1]
x = np.linspace(-5, 5)
y = k*x - b/w[1]
sv_1 = support_vectors[0]
yy_down = k*x + (sv_1[1]-k*sv_1[0])
sv_2 = support_vectors[-1]
yy_up = k*x + (sv_2[1] - k*sv_2[0])
pl.plot(x, y, 'k-')
pl.plot(x, yy_up, 'k--')
pl.plot(x, yy_down, 'k--')
pl.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, facecolor='none')
pl.scatter(X[:, 0], X[:, 1], c='Y', cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
def adjust_layout(self) :
x0, x1, y0, y1 = plt.axis()
plot_margin_x = 0.01 * float(x1)
plot_margin_y = 0.01 * float(y1)
plt.axis((x0 - plot_margin_x, x1 + plot_margin_x, y0, y1 + plot_margin_y))
plt.tight_layout()
def cmPlot(targ_ra, targ_dec, data, iso, g_radius, nbhd, type):
"""Color-magnitude plot"""
angsep = ugali.utils.projector.angsep(targ_ra, targ_dec, data['RA'], data['DEC'])
annulus = (angsep > g_radius) & (angsep < 1.)
mag_g = data[mag_g_dred_flag]
mag_r = data[mag_r_dred_flag]
if type == 'stars':
filter = star_filter(data)
plt.title('Stellar Color-Magnitude')
elif type == 'galaxies':
filter = galaxy_filter(data)
plt.title('Galactic Color-Magnitude')
iso_filter = (iso.separation(mag_g, mag_r) < 0.1)
# Plot background objects
plt.scatter(mag_g[filter & annulus] - mag_r[filter & annulus], mag_g[filter & annulus], c='k', alpha=0.1, edgecolor='none', s=1)
# Plot isochrone
ugali.utils.plotting.drawIsochrone(iso, lw=2, label='{} Gyr, z = {}'.format(iso.age, iso.metallicity))
# Plot objects in nbhd
plt.scatter(mag_g[filter & nbhd] - mag_r[filter & nbhd], mag_g[filter & nbhd], c='g', s=5, label='r < {:.3f}$^\circ$'.format(g_radius))
# Plot objects in nbhd and near isochrone
plt.scatter(mag_g[filter & nbhd & iso_filter] - mag_r[filter & nbhd & iso_filter], mag_g[filter & nbhd & iso_filter], c='r', s=5, label='$\Delta$CM < 0.1')
plt.axis([-0.5, 1, 16, 24])
plt.gca().invert_yaxis()
plt.gca().set_aspect(1./4.)
plt.legend(loc='upper left')
plt.xlabel('g-r (mag)')
plt.ylabel('g (mag)')
def nullspace(A, eps=1e-15):
u, s, vh = sp.linalg.svd(A,full_matrices=1,compute_uv=1)
# Pad so that we get the nullspace of a wide matrix.
N = A.shape[1]
K = s.shape[0]
if K < N:
s[K+1:N] = 0
s2 = np.zeros((N))
s2[0:K] = s
s = s2
null_mask = (s <= eps)
null_space = sp.compress(null_mask, vh, axis=0)
return sp.transpose(null_space)
# return smallest singular vector of A (or the nullspace if A is 2x3)
def analytic_twopath(self,p,rate1,rate2):
'''
Population of neurons receive input from two pathways, the first path is gated-on
rate1 and rate2 are the input rate of each pathway
First we need to convert the input rate into conductance,
the dend_IO(exc, inh) function takes total excitatory and inhibitory
conductances as inputs
'''
# number of synapses
num_syn = 15
g_exc = p['g_exc']*num_syn
# gating variable
s1 = MCM.meansNMDA(rate1)
s2 = MCM.meansNMDA(rate2)
# Total conductance input
Exc1 = self.Exc1_raw*s1*g_exc # nS
Exc2 = self.Exc2_raw*s2*g_exc # nS
Exc = Exc1+Exc2
#frac_proj = 0.1 # fraction projection
N_proj = p['frac_proj']*self.params['n_pyr']
N_proj0 = np.floor(N_proj)
N_proj0 = min((N_proj0,self.params['n_pyr']-1))
N_proj0 = max((N_proj0,0))
DendV = dend_IO(Exc[:(N_proj0+1)*self.params['n_dend_each']],
self.Inh1[:(N_proj0+1)*self.params['n_dend_each']])
meanDendV = DendV.reshape(N_proj0+1,self.params['n_dend_each']).mean(axis=1)
SomaR = soma_fv(meanDendV)
# Make sure firing rate depend smoothly on frac_proj
rboth = (SomaR[:N_proj0].sum()+SomaR[N_proj0]*(N_proj-N_proj0))/N_proj
return rboth
def Get_r_fromV(self,Exc,Inh,n_dend_each):
DendV = dend_IO(Exc, Inh)
MeanDendV = DendV.reshape(len(DendV)//n_dend_each,n_dend_each).mean(axis=1)
SomaR = soma_fv(MeanDendV)
return SomaR
def Get_r(self,Exc,Inh,Inh2soma,n_dend_each):
# Get rate from injection current
DendV = dend_IO(Exc, Inh)
MeanDendV = DendV.reshape(len(DendV)//n_dend_each,n_dend_each).mean(axis=1)
vSoma = -55 # Assume somatic voltage is around the reset, which is a good approximation
SomaR = soma_fI(self.gCouple*(MeanDendV-vSoma)-Inh2soma)
return SomaR
def generate_W_grid(self,p):
'''
Generate connection matrix for neurons in a two-dimensional grid
Specifically for VIP-SOM connections
'''
p['p_vip2som_arbor'] = 0.6
# Consider a grid of 400\mum * 400\mum
# Assign locations of neurons
p['n_vip_scale'] = 625
p['grid_size_vip'] = 400*np.sqrt(p['n_vip_scale']/p['n_vip']) # mu m
p['n_vip_scale_sqrt'] = np.round(np.sqrt(p['n_vip_scale']))
p['n_som_sqrt'] = np.floor(np.sqrt(p['n_som']))
# x and y locations of VIP neurons, randomly drawn
p['vip_x'] = np.tile(np.linspace(-0.5,0.5,p['n_vip_scale_sqrt']),p['n_vip_scale_sqrt'])*p['grid_size_vip']
p['vip_y'] = np.repeat(np.linspace(-0.5,0.5,p['n_vip_scale_sqrt']),p['n_vip_scale_sqrt'])*p['grid_size_vip']
# x and y locations of SOM neurons, randomly drawn
p['som_x'] = np.tile(np.linspace(-0.5,0.5,p['n_som_sqrt']),p['n_som_sqrt'])*400
p['som_y'] = np.repeat(np.linspace(-0.5,0.5,p['n_som_sqrt']),p['n_som_sqrt'])*400
p['som_x'] = np.concatenate((p['som_x'],(np.random.rand(p['n_som']-p['n_som_sqrt']**2)-0.5)*400))
p['som_y'] = np.concatenate((p['som_y'],(np.random.rand(p['n_som']-p['n_som_sqrt']**2)-0.5)*400))
# Assume that each VIP only targets SOM within vicinity (vip_arbor) with probability p_vip2som_arbor
p['W_vip2som'] = np.zeros((p['n_som'],p['n_vip_scale']))
for i_som in xrange(p['n_som']):
dist2vip = np.sqrt((p['som_x'][i_som]-p['vip_x'])**2+(p['som_y'][i_som]-p['vip_y'])**2)
# Make connections if p>p_vip2som_arbor and dist<vip_arbor
ind_vip2som_conn = np.where(dist2vip<(p['vip_arbor']))[0]
np.random.shuffle(ind_vip2som_conn)
ind_vip2som_conn = ind_vip2som_conn[:int(p['p_vip2som_arbor']*len(ind_vip2som_conn))]
p['W_vip2som'][i_som,ind_vip2som_conn] = 1
n_vip2som = np.sum(p['W_vip2som'],axis=1)
# uIPSQ is about 0.7 pC=0.7 pA/Hz for VIP-SOM connection, Pfeffer et al. Nat Neurosci. 2012
#syn_weight_vip2som = 10/n_vip2som
for i_som in xrange(p['n_som']):
p['W_vip2som'][i_som,:] = p['W_vip2som'][i_som,:]*0.7*60/n_vip2som[i_som]
return p
def lecun_lcn(input, img_shape, kernel_shape, threshold=1e-4):
input = input.reshape(input.shape[0], 1, img_shape[0], img_shape[1])
X = T.matrix(dtype=theano.config.floatX)
X = X.reshape(input.shape)
filter_shape = (1, 1, kernel_shape, kernel_shape)
filters = gaussian_filter(kernel_shape).reshape(filter_shape)
convout = conv.conv2d(input=X,
filters=filters,
image_shape=(input.shape[0], 1, img_shape[0], img_shape[1]),
filter_shape=filter_shape,
border_mode='full')
# For each pixel, remove mean of 9x9 neighborhood
mid = int(np.floor(kernel_shape / 2.))
centered_X = X - convout[:, :, mid:-mid, mid:-mid]
centered_X = X - convout[:, :, mid:-mid, mid:-mid]
# Scale down norm of 9x9 patch if norm is bigger than 1
sum_sqr_XX = conv.conv2d(input=centered_X ** 2,
filters=filters,
image_shape=(input.shape[0], 1, img_shape[0], img_shape[1]),
filter_shape=filter_shape,
border_mode='full')
denom = T.sqrt(sum_sqr_XX[:, :, mid:-mid, mid:-mid])
per_img_mean = denom.mean(axis=[1, 2])
divisor = T.largest(per_img_mean.dimshuffle(0, 'x', 'x', 1), denom)
divisor = T.maximum(divisor, threshold)
new_X = centered_X / divisor
new_X = new_X.dimshuffle(0, 2, 3, 1)
new_X = new_X.flatten(ndim=3)
f = theano.function([X], new_X)
return f
# return f(input)
def tile_rgb_images(x, dir=None, filename="x", row=10, col=10):
if dir is None:
raise Exception()
try:
os.mkdir(dir)
except:
pass
fig = pylab.gcf()
fig.set_size_inches(col * 2, row * 2)
pylab.clf()
for m in range(row * col):
pylab.subplot(row, col, m + 1)
pylab.imshow(np.clip(x[m], 0, 1), interpolation="none")
pylab.axis("off")
pylab.savefig("{}/{}.png".format(dir, filename))
def tile_rgb_images(x, dir=None, filename="x"):
if dir is None:
raise Exception()
try:
os.mkdir(dir)
except:
pass
fig = pylab.gcf()
fig.set_size_inches(16.0, 16.0)
pylab.clf()
for m in range(100):
pylab.subplot(10, 10, m + 1)
pylab.imshow(np.clip(x[m], 0, 1), interpolation="none")
pylab.axis("off")
pylab.savefig("{}/{}.png".format(dir, filename))