def _neighbors_from_list_with_mask(self, labels, min_contact_area=None, real_area=True):
if (not self._neighbors is None) and (sum([i in self._neighbors.keys() for i in labels])==len(labels)):
result = dict([(i,self._neighbors[i]) for i in labels])
if min_contact_area is None:
return result
else:
return self._filter_with_area(result, min_contact_area, real_area)
edges = {}
for label in labels:
try:
slices = self.boundingbox(label)
ex_slices = dilation(slices)
mask_img = self.image[ex_slices]
except:
mask_img = self.image
neigh = list(contact_surface(mask_img,label))
if min_contact_area is not None:
neigh = self._neighbors_filtering_by_contact_area(label, neigh, min_contact_area, real_area)
edges[label] = neigh
return edges
python类sum()的实例源码
def _add_provenance(in_file, settings, air_msk, rot_msk):
from mriqc import __version__ as version
from niworkflows.nipype.utils.filemanip import hash_infile
import nibabel as nb
import numpy as np
air_msk_size = nb.load(air_msk).get_data().astype(
np.uint8).sum()
rot_msk_size = nb.load(rot_msk).get_data().astype(
np.uint8).sum()
out_prov = {
'md5sum': hash_infile(in_file),
'version': version,
'software': 'mriqc',
'warnings': {
'small_air_mask': bool(air_msk_size < 5e5),
'large_rot_frame': bool(rot_msk_size > 500),
}
}
if settings:
out_prov['settings'] = settings
return out_prov
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def property_topomesh_gaussian_smoothing_force(topomesh,gaussian_sigma=10.0):
compute_topomesh_property(topomesh,'vertices',degree=1)
edge_vertices = topomesh.wisp_property('vertices',degree=1).values()
reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)
edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
edge_lengths = np.linalg.norm(edge_vectors,axis=1)
gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors
gaussian_force = np.transpose([nd.sum(gaussian_edge_vectors[:,d],edge_vertices[:,0],index=list(topomesh.wisps(0))) for d in [0,1,2]])
vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
gaussian_force = gaussian_force/vertices_weights[:,np.newaxis]
return gaussian_force
def histo_plot(figure,X,color,xlabel="",ylabel="",cumul=False,bar=True,n_points=400,smooth_factor=0.1,spline_order=3,linewidth=3,alpha=1.0,label=""):
if '%' in xlabel:
magnitude = 100
X_values = np.array(np.minimum(np.around(X),n_points+1),int)
else:
# magnitude = np.power(10,np.around(4*np.log10(X.mean()))/4+0.5)
magnitude = np.power(10,np.around(4*np.log10(np.nanmean(X)+np.nanstd(X)+1e-7))/4+1)
magnitude = np.around(magnitude,int(-np.log10(magnitude))+1)
# print magnitude
#magnitude = X.mean()+5.0*X.std()
X_values = np.array(np.minimum(np.around(n_points*X[True-np.isnan(X)]/magnitude),n_points+1),int)
X_histo = np.zeros(n_points+1,float)
for x in np.linspace(0,n_points,n_points+1):
X_histo[x] = nd.sum(np.ones_like(X_values,float),X_values,index=x)
if '%' in ylabel:
X_histo[x] /= X_values.size/100.0
if cumul:
X_histo[x] += X_histo[x-1]
if bar:
bar_plot(figure,np.linspace(0,magnitude,n_points+1),X_histo,np.array([1,1,1]),color,xlabel,ylabel,label=label)
else:
smooth_plot(figure,np.linspace(0,magnitude,n_points+1),X_histo,color,color,xlabel,ylabel,n_points=n_points,smooth_factor=smooth_factor,spline_order=spline_order,linewidth=linewidth,alpha=alpha,label=label)
def com(A, d1, d2):
"""Calculation of the center of mass for spatial components
Inputs:
A: np.ndarray
matrix of spatial components (d x K)
d1: int
number of pixels in x-direction
d2: int
number of pixels in y-direction
Output:
cm: np.ndarray
center of mass for spatial components (K x 2)
"""
nr = np.shape(A)[-1]
Coor = dict()
Coor['x'] = np.kron(np.ones((d2, 1)), np.expand_dims(range(d1), axis=1))
Coor['y'] = np.kron(np.expand_dims(range(d2), axis=1), np.ones((d1, 1)))
cm = np.zeros((nr, 2)) # vector for center of mass
cm[:, 0] = np.dot(Coor['x'].T, A) / A.sum(axis=0)
cm[:, 1] = np.dot(Coor['y'].T, A) / A.sum(axis=0)
return cm
def stellar_luminosity2(self, steps=10000):
"""
DEPRECATED: ADW 2017-09-20
Compute the stellar luminosity (L_Sol; average per star).
Uses "sample" to generate mass sample and pdf. The range of
integration only covers the input isochrone data (no
extrapolation used), but this seems like a sub-percent effect
if the isochrone goes to 0.15 Msun for the old and metal-poor
stellar populations of interest.
Note that the stellar luminosity is very sensitive to the
post-AGB population.
"""
msg = "'%s.stellar_luminosity2': ADW 2017-09-20"%self.__class__.__name__
DeprecationWarning(msg)
mass_init, mass_pdf, mass_act, mag_1, mag_2 = self.sample(mass_steps=steps)
luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
luminosity = luminosity_interpolation(mass_init)
return np.sum(luminosity * mass_pdf)
# ADW: For temporary backward compatibility
def filter_isolated_cells(array, struct):
filtered_array = np.copy(array)
id_regions, num_ids = ndimage.label(filtered_array, structure=struct)
id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids + 1)))
area_mask = (id_sizes == 1)
filtered_array[area_mask[id_regions]] = 0
return filtered_array
#Decide if given spectrum shows bird sounds or noise only
def get_largest_component(self, closing_shape=None):
mask, bounds = self._get_bounded_mask(closing_shape)
label_im, num_labels = ndimage.label(mask)
label_sizes = ndimage.sum(mask, label_im, range(num_labels + 1))
label_im[(label_sizes < label_sizes.max())[label_im]] = 0
label_im = np.minimum(label_im, 1)
if label_im[tuple(self.seed - bounds[0])] == 0:
logging.warning('Seed voxel ({}) is not in connected component.'.format(np.array_str(self.seed)))
return label_im, bounds
def _run_interface(self, runtime):
in_file = nb.load(self.inputs.in_file)
data = in_file.get_data()
mask = np.zeros_like(data, dtype=np.uint8)
mask[data <= 0] = 1
# Pad one pixel to control behavior on borders of binary_opening
mask = np.pad(mask, pad_width=(1,), mode='constant', constant_values=1)
# Remove noise
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_opening(mask, structure=struc).astype(
np.uint8)
# Remove small objects
label_im, nb_labels = nd.label(mask)
if nb_labels > 2:
sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
for _, label in ordered[2:]:
mask[label_im == label] = 0
# Un-pad
mask = mask[1:-1, 1:-1, 1:-1]
# If mask is small, clean-up
if mask.sum() < 500:
mask = np.zeros_like(mask, dtype=np.uint8)
out_img = in_file.__class__(mask, in_file.affine, in_file.header)
out_img.header.set_data_dtype(np.uint8)
out_file = fname_presuffix(self.inputs.in_file,
suffix='_rotmask', newpath='.')
out_img.to_filename(out_file)
self._results['out_file'] = out_file
return runtime
def artifact_mask(imdata, airdata, distance, zscore=10.):
"""Computes a mask of artifacts found in the air region"""
from statsmodels.robust.scale import mad
if not np.issubdtype(airdata.dtype, np.integer):
airdata[airdata < .95] = 0
airdata[airdata > 0.] = 1
bg_img = imdata * airdata
if np.sum((bg_img > 0).astype(np.uint8)) < 100:
return np.zeros_like(airdata)
# Find the background threshold (the most frequently occurring value
# excluding 0)
bg_location = np.median(bg_img[bg_img > 0])
bg_spread = mad(bg_img[bg_img > 0])
bg_img[bg_img > 0] -= bg_location
bg_img[bg_img > 0] /= bg_spread
# Apply this threshold to the background voxels to identify voxels
# contributing artifacts.
qi1_img = np.zeros_like(bg_img)
qi1_img[bg_img > zscore] = 1
qi1_img[distance < .10] = 0
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 1)
qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)
qi1_img[airdata <= 0] = 0
return qi1_img
def fuzzy_jaccard(in_tpms, in_mni_tpms):
overlaps = []
for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):
tpm = tpm.reshape(-1)
mni_tpm = mni_tpm.reshape(-1)
num = np.min([tpm, mni_tpm], axis=0).sum()
den = np.max([tpm, mni_tpm], axis=0).sum()
overlaps.append(float(num / den))
return overlaps
def makeMask(self):
#Narrow down to pixels which measured something every frame
counts = np.sum(self.Zs > 0, 2)
self.Mask = (counts == self.Zs.shape[2])
#Find biggest connected component out of remaining pixels
ILabel, NLabels = ndimage.label(self.Mask)
idx = np.argmax(ndimage.sum(self.Mask, ILabel, range(NLabels+1)))
self.Mask = (ILabel == idx)
plt.imshow(self.Mask)
plt.show()
#Actually sets up all of the vertex, face, and adjacency structures in the
#PolyMeshObject
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def property_topomesh_area_smoothing_force(topomesh,target_areas=None):
compute_topomesh_property(topomesh,'vertices',degree=2)
compute_topomesh_property(topomesh,'length',degree=1)
triangle_vertices = topomesh.wisp_property('vertices',degree=2).values()
rotated_triangle_vertices = np.transpose([triangle_vertices[:,2],triangle_vertices[:,0],triangle_vertices[:,1]])
antirotated_triangle_vertices = np.transpose([triangle_vertices[:,1],triangle_vertices[:,2],triangle_vertices[:,0]])
triangle_vertices = np.append(np.append(triangle_vertices,rotated_triangle_vertices,axis=0),antirotated_triangle_vertices,axis=0)
edge_index_list = np.array([[1, 2],[0, 1],[0, 2]])
triangle_edge_vertices = triangle_vertices[:,edge_index_list]
triangle_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,1]) - topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,0])
#triangle_edge_lengths = np.power(np.sum(np.power(triangle_edge_vectors,2.0),axis=2),0.5)
triangle_edge_lengths = np.linalg.norm(triangle_edge_vectors,axis=2)
triangle_edge_directions = triangle_edge_vectors/triangle_edge_lengths[...,np.newaxis]
triangle_perimeters = np.sum(triangle_edge_lengths,axis=1)
triangle_areas = np.sqrt((triangle_perimeters/2.0)*(triangle_perimeters/2.0-triangle_edge_lengths[:,0])*(triangle_perimeters/2.0-triangle_edge_lengths[:,1])*(triangle_perimeters/2.0-triangle_edge_lengths[:,2]))
triangle_areas[np.where(triangle_areas==0.0)] = 0.001
if target_areas is None:
target_areas = triangle_areas.mean()*np.ones(len(triangle_areas))
else:
target_areas = np.tile(target_areas,(3))
area_edge_1_force = (target_areas-triangle_areas)*(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,1]**2)*triangle_edge_lengths[:,1]
area_edge_2_force = (target_areas-triangle_areas)*(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2)*triangle_edge_lengths[:,2]
area_unitary_force = -(area_edge_1_force[:,np.newaxis]*triangle_edge_directions[:,1] + area_edge_2_force[:,np.newaxis]*triangle_edge_directions[:,2])
triangle_force = np.transpose([nd.sum(area_unitary_force[:,d],triangle_vertices[:,0],index=list(topomesh.wisps(0))) for d in xrange(3)])
return triangle_force
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def property_topomesh_laplacian_smoothing_force(topomesh,cellwise_smoothing=False):
if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'vertices',degree=1)
edge_vertices = topomesh.wisp_property('vertices',degree=1).values()
reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)
if not topomesh.has_wisp_property('valence',degree=0,is_computed=True):
compute_topomesh_property(topomesh,'valence',degree=0)
vertices_degrees = topomesh.wisp_property('valence',degree=0).values()
if cellwise_smoothing:
laplacian_force = np.zeros_like(topomesh.wisp_property('barycenter',degree=0).values(),np.float32)
for c in topomesh.wisps(3):
if not topomesh.has_wisp_property('vertices',degree=3,is_computed=True):
compute_topomesh_property(topomesh,'vertices',degree=3)
cell_vertices = topomesh.wisp_property('vertices',degree=3)[c]
cell_edges = np.sum(nd.sum(np.ones_like(cell_vertices),cell_vertices,index=edge_vertices),axis=1)
cell_edge_vertices = edge_vertices[np.where(cell_edges==2)[0]]
cell_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(cell_edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(cell_edge_vertices[:,0])
cell_laplacian_force = np.transpose([nd.sum(cell_edge_vectors[:,0],cell_edge_vertices[:,0],index=cell_vertices),
nd.sum(cell_edge_vectors[:,1],cell_edge_vertices[:,0],index=cell_vertices),
nd.sum(cell_edge_vectors[:,2],cell_edge_vertices[:,0],index=cell_vertices)])
laplacian_force[cell_vertices] += cell_laplacian_force/vertices_degrees[cell_vertices,np.newaxis]
else:
edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
laplacian_force = np.transpose([nd.sum(edge_vectors[:,d],edge_vertices[:,0],index=list(topomesh.wisps(0))) for d in [0,1,2]])
laplacian_force = laplacian_force/vertices_degrees[:,np.newaxis]
return laplacian_force
def where_list(array,values):
mask = nd.sum(np.ones_like(values),values,index=array)
return np.where(mask>0)
def density_plot(figure,X,Y,color,xlabel="",ylabel="",n_points=10,linewidth=1,marker_size=40.,alpha=1.0,label=""):
font = fm.FontProperties(family = 'Trebuchet', weight ='light')
#font = fm.FontProperties(family = 'CenturyGothic',fname = '/Library/Fonts/Microsoft/Century Gothic', weight ='light')
figure.patch.set_facecolor('white')
axes = figure.add_subplot(111)
# axes.plot(X,Y,linewidth=1,color=tuple(color2),alpha=0.2)
# ratios = (Y-Y.min())/(Y.max()-Y.min())
# X_min = X.mean()-3*X.std()
# X_max = X.mean()+3*X.std()
X_min = np.percentile(X,100/n_points)
X_max = np.percentile(X,100 - 100/n_points)
Y_min = np.percentile(Y,100/n_points)
# Y_min = Y.mean()-3*Y.std()
Y_max = np.percentile(Y,100 - 100/n_points)
X_grid = np.linspace(X_min,X_max,n_points)
Y_grid = np.linspace(Y_min,Y_max,n_points)
X_sampled = X_grid[vq(X,X_grid)[0]]
Y_sampled = Y_grid[vq(Y,Y_grid)[0]]
point_density = {}
for x in np.unique(X_sampled):
point_count = nd.sum(np.ones_like(np.where(X_sampled==x)),Y_sampled[np.where(X_sampled==x)],index=np.unique(Y_sampled))
for i,y in enumerate(np.unique(Y_sampled)):
point_density[(x,y)] = point_count[i]/len(Y)
point_area = np.array([np.pi*10.0*marker_size*point_density[(x,y)]/np.array(point_density.values()).max() for x,y in zip(X_sampled,Y_sampled)])
#colors = np.random.rand(len(X))
colors = np.array([point_density[(x,y)]/np.array(point_density.values()).max() * color for x,y in zip(X_sampled,Y_sampled)])
colors += np.array([(1-point_density[(x,y)]/np.array(point_density.values()).max()) * np.ones(3) for x,y in zip(X_sampled,Y_sampled)])
axes.scatter(X_sampled,Y_sampled,s=point_area,c=colors,linewidth=linewidth,alpha=alpha,label=label)
axes.set_xlim(X_min,X_max)
axes.set_xlabel(xlabel,fontproperties=font, size=10, style='italic')
axes.set_xticklabels(axes.get_xticks(),fontproperties=font, size=12)
axes.set_ylim(Y_min,Y_max)
axes.set_ylabel(ylabel, fontproperties=font, size=10, style='italic')
axes.set_yticklabels(axes.get_yticks(),fontproperties=font, size=12)
def order_components(A, C):
"""Order components based on their maximum temporal value and size
Parameters
-----------
A: sparse matrix (d x K)
spatial components
C: matrix or np.ndarray (K x T)
temporal components
Returns
-------
A_or: np.ndarray
ordered spatial components
C_or: np.ndarray
ordered temporal components
srt: np.ndarray
sorting mapping
"""
A = np.array(A.todense())
nA2 = np.sqrt(np.sum(A**2, axis=0))
K = len(nA2)
A = np.array(np.matrix(A) * spdiags(1 / nA2, 0, K, K))
nA4 = np.sum(A**4, axis=0)**0.25
C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
mC = np.ndarray.max(np.array(C), axis=1)
srt = np.argsort(nA4 * mC)[::-1]
A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
C_or = spdiags(1. / nA2[srt], 0, K, K) * (C[srt, :])
return A_or, C_or, srt
def extract_DF_F(Y, A, C, i=None):
"""Extract DF/F values from spatial/temporal components and background
Parameters
-----------
Y: np.ndarray
input data (d x T)
A: sparse matrix of np.ndarray
Set of spatial including spatial background (d x K)
C: matrix
Set of temporal components including background (K x T)
Returns
-----------
C_df: matrix
temporal components in the DF/F domain
Df: np.ndarray
vector with baseline values for each trace
"""
A2 = A.copy()
A2.data **= 2
nA2 = np.squeeze(np.array(A2.sum(axis=0)))
A = A * diags(1 / nA2, 0)
C = diags(nA2, 0) * C
# if i is None:
# i = np.argmin(np.max(A,axis=0))
Y = np.matrix(Y)
Yf = A.transpose() * (Y - A * C) # + A[:,i]*C[i,:])
Df = np.median(np.array(Yf), axis=1)
C_df = diags(1 / Df, 0) * C
return C_df, Df
def find_matches(D_s, print_assignment=False):
matches=[]
costs=[]
t_start=time.time()
for ii,D in enumerate(D_s):
DD=D.copy()
if np.sum(np.where(np.isnan(DD)))>0:
raise Exception('Distance Matrix contains NaN, not allowed!')
# indexes = m.compute(DD)
# indexes = linear_assignment(DD)
indexes = linear_sum_assignment(DD)
indexes2=[(ind1,ind2) for ind1,ind2 in zip(indexes[0],indexes[1])]
matches.append(indexes)
DD=D.copy()
total = []
for row, column in indexes2:
value = DD[row,column]
if print_assignment:
print '(%d, %d) -> %f' % (row, column, value)
total.append(value)
print 'FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0],DD.shape[1], np.sum(total))
print time.time()-t_start
costs.append(total)
return matches,costs
#%%
def run(self, ips, snap, img, para = None):
intenimg = WindowsManager.get(para['inten']).ips.img
strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
buf, n = ndimage.label(snap, strc, output=np.uint16)
index = range(1, n+1)
idx = (np.ones(n+1)*para['front']).astype(np.uint8)
msk = np.ones(n, dtype=np.bool)
if para['mean']>0: msk *= ndimage.mean(intenimg, buf, index)>=para['mean']
if para['mean']<0: msk *= ndimage.mean(intenimg, buf, index)<-para['mean']
if para['max']>0: msk *= ndimage.maximum(intenimg, buf, index)>=para['max']
if para['max']<0: msk *= ndimage.maximum(intenimg, buf, index)<-para['max']
if para['min']>0: msk *= ndimage.minimum(intenimg, buf, index)>=para['min']
if para['min']<0: msk *= ndimage.minimum(intenimg, buf, index)<-para['min']
if para['sum']>0: msk *= ndimage.sum(intenimg, buf, index)>=para['sum']
if para['sum']<0: msk *= ndimage.sum(intenimg, buf, index)<-para['sum']
if para['std']>0: msk *= ndimage.standard_deviation(intenimg, buf, index)>=para['std']
if para['std']<0: msk *= ndimage.standard_deviation(intenimg, buf, index)<-para['std']
xy = ndimage.center_of_mass(intenimg, buf, index)
xy = np.array(xy).round(2).T
idx[1:][~msk] = para['back']
idx[0] = 0
img[:] = idx[buf]
WindowsManager.get(para['inten']).ips.mark = RGMark((xy.T, msk))
WindowsManager.get(para['inten']).ips.update = True
def run(self, ips, snap, img, para = None):
ips.lut = self.buflut
lab, n = ndimg.label(snap>para['thr2'], np.ones((3,3)), output=np.uint16)
sta = ndimg.sum(snap>para['thr1'], lab, index=range(n+1)) > 0
img[:] = (sta*255)[lab]
def _centroid(x, y, density):
"""
Computes weighted centroid of one bin.
Equation (4) of Cappellari & Copin (2003)
"""
mass = np.sum(density)
xBar = x.dot(density)/mass
yBar = y.dot(density)/mass
return xBar, yBar
#----------------------------------------------------------------------
def makeMask(self):
#Narrow down to pixels which measured something every frame
counts = np.sum(self.Zs > 0, 2)
self.Mask = (counts == self.Zs.shape[2])
#Find biggest connected component out of remaining pixels
ILabel, NLabels = ndimage.label(self.Mask)
idx = np.argmax(ndimage.sum(self.Mask, ILabel, range(NLabels+1)))
self.Mask = (ILabel == idx)
plt.imshow(self.Mask)
plt.show()
#Actually sets up all of the vertex, face, and adjacency structures in the
#PolyMeshObject
def stellar_mass(self, mass_min=0.1, steps=10000):
"""
Compute the stellar mass (Msun; average per star). PDF comes
from IMF, but weight by actual stellar mass.
Parameters:
-----------
mass_min : Minimum mass to integrate the IMF
steps : Number of steps to sample the isochrone
Returns:
--------
mass : Stellar mass [Msun]
"""
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if mass_min < np.min(self.mass_init):
mass_act_interpolation = scipy.interpolate.interp1d(np.insert(self.mass_init, 0, mass_min),
np.insert(self.mass_act, 0, mass_min))
else:
mass_act_interpolation = scipy.interpolate.interp1d(self.mass_init, self.mass_act)
mass_act = mass_act_interpolation(mass)
return np.sum(mass_act * d_log_mass * self.imf.pdf(mass, log_mode=True))
def stellar_luminosity(self, steps=10000):
"""
Compute the stellar luminosity (Lsun; average per star). PDF
comes from IMF. The range of integration only covers the
input isochrone data (no extrapolation used), but this seems
like a sub-percent effect if the isochrone goes to 0.15 Msun
for the old and metal-poor stellar populations of interest.
Note that the stellar luminosity is very sensitive to the
post-AGB population.
Parameters:
-----------
steps : Number of steps to sample the isochrone.
Returns:
--------
lum : The stellar luminosity [Lsun]
"""
mass_min = np.min(self.mass_init)
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
luminosity = luminosity_interpolation(mass)
return np.sum(luminosity * d_log_mass * self.imf.pdf(mass, log_mode=True))
def absolute_magnitude(self, richness=1, steps=1e4):
"""
Calculate the absolute magnitude (Mv) by integrating the
stellar luminosity.
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
"""
# Using the SDSS g,r -> V from Jester 2005 [arXiv:0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59(g_sdss-r_sdss) - 0.01
# g_des = g_sdss - 0.104(g_sdss - r_sdss) + 0.01
# r_des = r_sdss - 0.102(g_sdss - r_sdss) + 0.02
if self.survey.lower() != 'des':
raise Exception('Only valid for DES')
if 'g' not in [self.band_1,self.band_2]:
msg = "Need g-band for absolute magnitude"
raise Exception(msg)
if 'r' not in [self.band_1,self.band_2]:
msg = "Need r-band for absolute magnitude"
raise Exception(msg)
mass_init,mass_pdf,mass_act,mag_1,mag_2=self.sample(mass_steps = steps)
g,r = (mag_1,mag_2) if self.band_1 == 'g' else (mag_2,mag_1)
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(mass_pdf*10**(-V/2.5))
Mv = -2.5*np.log10(richness*flux)
return Mv
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction
def observableFractionMMD(self, mask, distance_modulus, mass_min=0.1):
# This can be done faster...
logger.info('Calculating observable fraction from MMD')
mmd = self.signalMMD(mask,distance_modulus)
obs_frac = mmd.sum(axis=-1).sum(axis=-1)[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
return obs_frac
def absolute_magnitude(distance_modulus,g,r,prob=None):
""" Calculate the absolute magnitude from a set of bands """
V = g - 0.487*(g - r) - 0.0249
flux = np.sum(10**(-(V-distance_modulus)/2.5))
Mv = -2.5*np.log10(flux)
return Mv
def compute_image_property(self, property_name, min_contact_area=None, sub_factor=8.):
"""
"""
computable_properties = ['barycenter','volume','neighborhood_size','layer','mean_curvature','gaussian_curvature']
try:
assert property_name in computable_properties
except:
print "Property \""+property_name+"\" can not be computed on image"
print "Try with one of the following :"
for p in computable_properties:
print " * "+p
else:
if self._image is not None:
if property_name in ['barycenter','volume']:
graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=[property_name],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins)
property_dict = graph.vertex_property(property_name)
elif property_name == 'neighborhood_size':
neighbors = [self.image_graph.neighbors(l) for l in self.labels]
property_dict = dict(zip(self.labels,map(len,neighbors)))
elif property_name == 'layer':
if min_contact_area is None:
min_contact_area = self.min_contact_area
graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=['L1'],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins, min_contact_area=min_contact_area)
first_layer = graph.vertex_property('L1')
second_layer_cells = [v for v in graph.vertices() if np.any([first_layer[n] for n in graph.neighbors(v)]) and not first_layer[v]]
second_layer = dict(zip(list(graph.vertices()),[v in second_layer_cells for v in graph.vertices()]))
property_dict = dict(zip(self.labels,[1 if first_layer[l] else 2 if second_layer[l] else 3 for l in self.labels]))
elif property_name in ['mean_curvature','gaussian_curvature']:
if not self.has_image_property('barycenter'):
self.compute_image_property('barycenter')
if not self.has_image_property('layer'):
print "--> Computing layer property"
self.compute_image_property('layer')
cell_centers = self.image_property('barycenter')
L1_cells = self.labels[self.image_property('layer').values()==1]
from openalea.cellcomplex.property_topomesh.utils.implicit_surfaces import implicit_surface_topomesh
from openalea.cellcomplex.property_topomesh.property_topomesh_analysis import compute_topomesh_property, compute_topomesh_vertex_property_from_faces
from openalea.cellcomplex.property_topomesh.property_topomesh_optimization import property_topomesh_vertices_deformation, topomesh_triangle_split
sub_binary_image = (self._image!=self.background).astype(float)[::sub_factor,::sub_factor,::sub_factor]
surface_topomesh = implicit_surface_topomesh(sub_binary_image,np.array(sub_binary_image.shape),sub_factor*np.array(self._image.voxelsize),center=False)
property_topomesh_vertices_deformation(surface_topomesh,iterations=10)
compute_topomesh_property(surface_topomesh,'barycenter',2)
compute_topomesh_property(surface_topomesh,'normal',2,normal_method='orientation')
compute_topomesh_vertex_property_from_faces(surface_topomesh,'normal',adjacency_sigma=2,neighborhood=5)
compute_topomesh_property(surface_topomesh,'mean_curvature',2)
compute_topomesh_vertex_property_from_faces(surface_topomesh,property_name,adjacency_sigma=2,neighborhood=5)
surface_cells = L1_cells[vq(surface_topomesh.wisp_property('barycenter',0).values(),cell_centers.values(L1_cells))[0]]
surface_topomesh.update_wisp_property('label',0,array_dict(surface_cells,list(surface_topomesh.wisps(0))))
L1_cell_property = nd.sum(surface_topomesh.wisp_property(property_name,0).values(),surface_cells,index=L1_cells)/nd.sum(np.ones_like(surface_cells),surface_cells,index=L1_cells)
L1_cell_property = array_dict(L1_cell_property,L1_cells)
property_dict = array_dict([L1_cell_property[l] if (l in L1_cells) and (not np.isnan(L1_cell_property[l])) else 0 for l in self.labels],self.labels)
property_data = [property_dict[l] for l in self.labels]
self.update_image_property(property_name,property_data)