def volume(self, labels = None, real = True):
"""
Return the volume of the labels.
Args:
labels: (int) - single label number or a sequence of
label numbers of the objects to be measured.
If labels is None, all labels are used.
real: (bool) - If real = True, volume is in real-world units else in voxels.
:Examples:
>>> import numpy as np
>>> a = np.array([[1, 2, 7, 7, 1, 1],
[1, 6, 5, 7, 3, 3],
[2, 2, 1, 7, 3, 3],
[1, 1, 1, 4, 1, 1]])
>>> from vplants.tissue_analysis.spatial_image_analysis import SpatialImageAnalysis
>>> analysis = SpatialImageAnalysis(a)
>>> analysis.volume(7)
4.0
>>> analysis.volume([7,2])
[4.0, 3.0]
>>> analysis.volume()
[10.0, 3.0, 4.0, 1.0, 1.0, 1.0, 4.0]
"""
# Check the provided `labels`:
labels = self.label_request(labels)
volume = nd.sum(np.ones_like(self.image), self.image, index=np.int16(labels))
# convert to real-world units if asked:
if real is True:
if self.image.ndim == 2:
volume = np.multiply(volume,(self._voxelsize[0]*self._voxelsize[1]))
elif self.image.ndim == 3:
volume = np.multiply(volume,(self._voxelsize[0]*self._voxelsize[1]*self._voxelsize[2]))
volume.tolist()
if not isinstance(labels, int):
return self.convert_return(volume, labels)
else:
return volume
python类sum()的实例源码
def cells_voxel_layer(self, labels, region_boundingbox = False, single_frame = False):
"""
This function extract the first layer of voxel surrounding a cell defined by `label`
Args:
label: (int|list) - cell-label for which we want to extract the first layer of voxel;
region_boundingbox: (bool) - if True, consider a boundingbox surrounding all labels, instead of each label alone.
single_frame: (bool) - if True, return only one array with all voxels position defining cell walls.
:Output:
returns a binary image: 1 where the cell-label of interest is, 0 elsewhere
"""
if isinstance(labels,int):
labels = [labels]
if single_frame:
region_boundingbox=True
if not isinstance(region_boundingbox,bool):
if sum([isinstance(s,slice) for s in region_boundingbox])==3:
bbox = region_boundingbox
else:
print "TypeError: Wong type for 'region_boundingbox', should either be bool or la tuple of slices"
return None
elif isinstance(region_boundingbox,bool) and region_boundingbox:
bbox = self.region_boundingbox(labels)
else:
bboxes = self.boundingbox(labels, real=False)
# Generate the smaller eroding structure possible:
struct = nd.generate_binary_structure(3, 2)
if single_frame:
vox_layer = np.zeros_like(self.image[bbox], dtype=int)
else:
vox_layer = {}
for clabel in labels:
if region_boundingbox:
bbox_im = self.image[bbox]
else:
bbox_im = self.image[bboxes[clabel]]
# Creating a mask (1 where the cell-label of interest is, 0 elsewhere):
mask_bbox_im = (bbox_im == clabel)
# Erode the cell using the structure:
eroded_mask_bbox_im = nd.binary_erosion(mask_bbox_im, structure=struct)
if single_frame:
vox_layer += np.array(mask_bbox_im - eroded_mask_bbox_im, dtype=int)
else:
vox_layer[clabel] = np.array(mask_bbox_im - eroded_mask_bbox_im, dtype=int)
if len(labels)==1:
return vox_layer[clabel]
else:
return vox_layer
def geometric_median(X, numIter = 200):
"""
Compute the geometric median of a point sample.
The geometric median coordinates will be expressed in the Spatial Image reference system (not in real world metrics).
We use the Weiszfeld's algorithm (http://en.wikipedia.org/wiki/Geometric_median)
Args:
X: (list|np.array) - voxels coordinate (3xN matrix)
numIter: (int) - limit the length of the search for global optimum
Returns:
- np.array((x,y,z)): geometric median of the coordinates;
"""
# -- Initialising 'median' to the centroid
y = np.mean(X,1)
# -- If the init point is in the set of points, we shift it:
while (y[0] in X[0]) and (y[1] in X[1]) and (y[2] in X[2]):
y+=0.1
convergence=False # boolean testing the convergence toward a global optimum
dist=[] # list recording the distance evolution
# -- Minimizing the sum of the squares of the distances between each points in 'X' and the median.
i=0
while ( (not convergence) and (i < numIter) ):
num_x, num_y, num_z = 0.0, 0.0, 0.0
denum = 0.0
m = X.shape[1]
d = 0
for j in range(0,m):
div = math.sqrt( (X[0,j]-y[0])**2 + (X[1,j]-y[1])**2 + (X[2,j]-y[2])**2 )
num_x += X[0,j] / div
num_y += X[1,j] / div
num_z += X[2,j] / div
denum += 1./div
d += div**2 # distance (to the median) to miminize
dist.append(d) # update of the distance evolution
if denum == 0.:
warnings.warn( "Couldn't compute a geometric median, please check your data!" )
return [0,0,0]
y = [num_x/denum, num_y/denum, num_z/denum] # update to the new value of the median
if i > 3:
convergence=(abs(dist[i]-dist[i-2])<0.1) # we test the convergence over three steps for stability
#~ print abs(dist[i]-dist[i-2]), convergence
i += 1
if i == numIter:
raise ValueError( "The Weiszfeld's algoritm did not converged after"+str(numIter)+"iterations !!!!!!!!!" )
# -- When convergence or iterations limit is reached we assume that we found the median.
return np.array(y)
def hasBird(spec, threshold=16):
#working copy
img = spec.copy()
#STEP 1: Median blur
img = cv2.medianBlur(img,5)
#STEP 2: Median threshold
col_median = np.median(img, axis=0, keepdims=True)
row_median = np.median(img, axis=1, keepdims=True)
img[img < row_median * 3] = 0
img[img < col_median * 4] = 0
img[img > 0] = 1
#STEP 3: Remove singles
img = filter_isolated_cells(img, struct=np.ones((3,3)))
#STEP 4: Morph Closing
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((5,5), np.float32))
#STEP 5: Frequency crop
img = img[128:-16, :]
#STEP 6: Count columns and rows with signal
#(Note: We only use rows with signal as threshold, but columns might come in handy in other scenarios)
#column has signal?
col_max = np.max(img, axis=0)
col_max = ndimage.morphology.binary_dilation(col_max, iterations=2).astype(col_max.dtype)
cthresh = col_max.sum()
#row has signal?
row_max = np.max(img, axis=1)
row_max = ndimage.morphology.binary_dilation(row_max, iterations=2).astype(row_max.dtype)
rthresh = row_max.sum()
#final threshold
thresh = rthresh
#DBUGB: show?
#print thresh
#cv2.imshow('BIRD?', img)
#cv2.waitKey(-1)
#STEP 7: Apply threshold (Default = 16)
bird = True
if thresh < threshold:
bird = False
return bird, thresh
######################################################
#elist all bird species
def gradient_threshold(in_file, in_segm, thresh=1.0, out_file=None):
""" Compute a threshold from the histogram of the magnitude gradient image """
import os.path as op
import numpy as np
import nibabel as nb
from scipy import ndimage as sim
struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2)
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath('{}_gradmask{}'.format(fname, ext))
imnii = nb.load(in_file)
hdr = imnii.get_header().copy()
hdr.set_data_dtype(np.uint8) # pylint: disable=no-member
data = imnii.get_data().astype(np.float32)
mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member
mask[data > 15.] = 1
segdata = nb.load(in_segm).get_data().astype(np.uint8)
segdata[segdata > 0] = 1
segdata = sim.binary_dilation(segdata, struc, iterations=2, border_value=1).astype(np.uint8) # pylint: disable=no-member
mask[segdata > 0] = 1
mask = sim.binary_closing(mask, struc, iterations=2).astype(np.uint8) # pylint: disable=no-member
# Remove small objects
label_im, nb_labels = sim.label(mask)
artmsk = np.zeros_like(mask)
if nb_labels > 2:
sizes = sim.sum(mask, label_im, list(range(nb_labels + 1)))
ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
for _, label in ordered[2:]:
mask[label_im == label] = 0
artmsk[label_im == label] = 1
mask = sim.binary_fill_holes(mask, struc).astype(np.uint8) # pylint: disable=no-member
nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file)
return out_file
def makeMesh(self):
if self.Xs.shape[2] == 0:
return
X = self.Xs[:, :, 0]
Y = self.Ys[:, :, 0]
Z = self.Zs[:, :, 0]
mesh = PolyMesh()
#Come up with vertex indices in the mask
Mask = np.array(self.Mask, dtype=np.int32)
nV = np.sum(Mask)
Mask[self.Mask > 0] = np.arange(nV) + 1
Mask = Mask - 1
VPos = np.zeros((nV, 3))
VPos[:, 0] = X[self.Mask > 0]
VPos[:, 1] = Y[self.Mask > 0]
VPos[:, 2] = -Z[self.Mask > 0]
#Add lower right triangle
v1 = Mask[0:-1, 0:-1].flatten()
v2 = Mask[1:, 0:-1].flatten()
v3 = Mask[1:, 1:].flatten()
N = v1.size
ITris1 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
#Add upper right triangle
v1 = Mask[0:-1, 0:-1].flatten()
v2 = Mask[1:, 1:].flatten()
v3 = Mask[0:-1, 1:].flatten()
N = v1.size
ITris2 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
ITris = np.concatenate((ITris1, ITris2), 0)
#Only retain triangles which have all three points
ITris = ITris[np.sum(ITris == -1, 1) == 0, :]
mesh.VPos = VPos
mesh.ITris = ITris
mesh.VColors = 0.5*np.ones(mesh.VPos.shape)
mesh.updateNormalBuffer()
mesh.VPosVBO = vbo.VBO(np.array(mesh.VPos, dtype=np.float32))
mesh.VNormalsVBO = vbo.VBO(np.array(mesh.VNormals, dtype=np.float32))
mesh.VColorsVBO = vbo.VBO(np.array(mesh.VColors, dtype=np.float32))
mesh.IndexVBO = vbo.VBO(mesh.ITris, target=GL_ELEMENT_ARRAY_BUFFER)
mesh.needsDisplayUpdate = False
self.mesh = mesh
#Compute the delta coordinates and solvers for all frames of the video
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def property_topomesh_cotangent_laplacian_smoothing_force(topomesh):
compute_topomesh_property(topomesh,'vertices',degree=2)
compute_topomesh_property(topomesh,'length',degree=1)
triangle_vertices = topomesh.wisp_property('vertices',degree=2).values(list(topomesh.wisps(2)))
rotated_triangle_vertices = np.transpose([triangle_vertices[:,2],triangle_vertices[:,0],triangle_vertices[:,1]])
antirotated_triangle_vertices = np.transpose([triangle_vertices[:,1],triangle_vertices[:,2],triangle_vertices[:,0]])
triangle_vertices = np.append(np.append(triangle_vertices,rotated_triangle_vertices,axis=0),antirotated_triangle_vertices,axis=0)
edge_index_list = np.array([[1, 2],[0, 2],[0, 1]])
triangle_edge_vertices = triangle_vertices[:,edge_index_list]
triangle_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,1]) - topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,0])
#triangle_edge_lengths = np.power(np.sum(np.power(triangle_edge_vectors,2.0),axis=2),0.5)
triangle_edge_lengths = np.linalg.norm(triangle_edge_vectors,axis=2)
triangle_edge_directions = triangle_edge_vectors/triangle_edge_lengths[...,np.newaxis]
triangle_perimeters = np.sum(triangle_edge_lengths,axis=1)
triangle_areas = np.sqrt((triangle_perimeters/2.0)*(triangle_perimeters/2.0-triangle_edge_lengths[:,0])*(triangle_perimeters/2.0-triangle_edge_lengths[:,1])*(triangle_perimeters/2.0-triangle_edge_lengths[:,2]))
triangle_cosines = np.zeros_like(triangle_edge_lengths,np.float32)
triangle_cosines[:,0] = (triangle_edge_lengths[:,1]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,0]**2)/(2.0*triangle_edge_lengths[:,1]*triangle_edge_lengths[:,2])
triangle_cosines[:,1] = (triangle_edge_lengths[:,2]**2+triangle_edge_lengths[:,0]**2-triangle_edge_lengths[:,1]**2)/(2.0*triangle_edge_lengths[:,2]*triangle_edge_lengths[:,0])
triangle_cosines[:,2] = (triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2)/(2.0*triangle_edge_lengths[:,0]*triangle_edge_lengths[:,1])
triangle_cosines[np.where(np.abs(triangle_cosines) < np.power(10.,-6))] = np.power(10.,-6)
triangle_angles = np.arccos(triangle_cosines)
triangle_sinuses = np.zeros_like(triangle_edge_lengths,np.float32)
triangle_sinuses[:,0] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,1]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,0]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,1]*triangle_edge_lengths[:,2],2.0),np.float16))
triangle_sinuses[:,1] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,2]**2+triangle_edge_lengths[:,0]**2-triangle_edge_lengths[:,1]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,2]*triangle_edge_lengths[:,0],2.0),np.float16))
triangle_sinuses[:,2] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,0]*triangle_edge_lengths[:,1],2.0),np.float16))
triangle_sinuses[np.where(triangle_sinuses < np.power(10.,-6))] = np.power(10.,-6)
triangle_cotangent_weights = (triangle_cosines/triangle_sinuses)
#triangle_cotangent_weights = 1./triangle_edge_lengths
#triangle_cotangent_weights = 0.5*(triangle_cosines/triangle_sinuses)/(triangle_edge_lengths + np.power(10.,-10))
#triangle_cotangent_vectors = (triangle_cosines/triangle_sinuses)[...,np.newaxis] * triangle_edge_vectors/triangle_edge_lengths[:,np.newaxis]
triangle_cotangent_vectors = triangle_cotangent_weights[...,np.newaxis] * triangle_edge_vectors
#triangle_cotangent_vectors = triangle_cotangent_weights[...,np.newaxis] * triangle_edge_directions
# triangle_cotangent_vectors = 1./np.tan(triangle_angles)[...,np.newaxis] * triangle_edge_vectors
vertex_cotangent_force = np.transpose([nd.sum(triangle_cotangent_vectors[:,1,d]+triangle_cotangent_vectors[:,2,d],triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0)))) for d in xrange(3)])
vertex_cotangent_sum = nd.sum(triangle_cotangent_weights[:,1] + triangle_cotangent_weights[:,2],triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0))))
vertex_area = nd.sum(triangle_areas,triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0))))
#laplacian_force = vertex_cotangent_force
laplacian_force = vertex_cotangent_force/vertex_cotangent_sum[...,np.newaxis]
#laplacian_force = vertex_cotangent_force/(4.*vertex_area[...,np.newaxis])
laplacian_force[np.where(np.isnan(laplacian_force))] = 0.0
laplacian_force[np.where(np.isinf(laplacian_force))] = 0.0
return laplacian_force
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def property_topomesh_taubin_smoothing_force(topomesh,gaussian_sigma=10.0,positive_factor=0.33,negative_factor=-0.34,cellwise_smoothing=True):
if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'vertices',degree=1)
if cellwise_smoothing:
edge_vertices = topomesh.wisp_property('vertices',degree=1).values(np.concatenate([np.array(list(topomesh.borders(3,c,2)),int) for c in topomesh.wisps(3)]))
else:
edge_vertices = topomesh.wisp_property('vertices',degree=1).values()
reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)
edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
edge_lengths = np.linalg.norm(edge_vectors,axis=1)
gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors
taubin_force = np.transpose([nd.sum(gaussian_edge_vectors[:,0],edge_vertices[:,0],index=list(topomesh.wisps(0))),
nd.sum(gaussian_edge_vectors[:,1],edge_vertices[:,0],index=list(topomesh.wisps(0))),
nd.sum(gaussian_edge_vectors[:,2],edge_vertices[:,0],index=list(topomesh.wisps(0)))])
#vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
vertices_weights = nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
taubin_positive_force = positive_factor*taubin_force/vertices_weights[:,np.newaxis]
taubin_positions = array_dict(topomesh.wisp_property('barycenter',degree=0).values(list(topomesh.wisps(0)))+taubin_positive_force, list(topomesh.wisps(0)))
edge_vectors = taubin_positions.values(edge_vertices[:,1]) - taubin_positions.values(edge_vertices[:,0])
edge_lengths = np.linalg.norm(edge_vectors,axis=1)
gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors
taubin_force = np.transpose([nd.sum(gaussian_edge_vectors[:,0],edge_vertices[:,0],index=list(topomesh.wisps(0))),
nd.sum(gaussian_edge_vectors[:,1],edge_vertices[:,0],index=list(topomesh.wisps(0))),
nd.sum(gaussian_edge_vectors[:,2],edge_vertices[:,0],index=list(topomesh.wisps(0)))])
#vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
vertices_weights = nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
taubin_negative_force = negative_factor*taubin_force/vertices_weights[:,np.newaxis]
taubin_force = taubin_positive_force + taubin_negative_force
return taubin_force
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def property_topomesh_laplacian_epidermis_convexity_force(topomesh):
"""todo"""
if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'vertices',degree=1)
if not topomesh.has_wisp_property('barycenter',degree=3,is_computed=True):
compute_topomesh_property(topomesh,'barycenter',degree=3)
if not topomesh.has_wisp_property('epidermis',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'epidermis',degree=1)
if not topomesh.has_wisp_property('cells',degree=0,is_computed=True):
compute_topomesh_property(topomesh,'cells',degree=0)
if not topomesh.has_wisp_property('cells',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'cells',degree=1)
epidermis_convexity_force = array_dict(np.zeros_like(topomesh.wisp_property('barycenter',degree=0).values(),np.float32),np.array(list(topomesh.wisps(0))))
edge_cells_degree = np.array(map(len,topomesh.wisp_property('cells',degree=1).values()))
# edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((topomesh.wisp_property('epidermis',degree=1).values()))]
# edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((topomesh.wisp_property('epidermis',degree=1).values())&(edge_cells_degree>1))]
edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((edge_cells_degree>2)|((topomesh.wisp_property('epidermis',degree=1).values())&(edge_cells_degree>1)))]
epidermis_vertices = np.unique(edge_vertices)
vertices_degrees = np.array(nd.sum(np.ones_like(edge_vertices),edge_vertices,index=epidermis_vertices),int)
reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)
edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
laplacian_force = np.transpose([nd.sum(edge_vectors[:,0],edge_vertices[:,0],index=epidermis_vertices),
nd.sum(edge_vectors[:,1],edge_vertices[:,0],index=epidermis_vertices),
nd.sum(edge_vectors[:,2],edge_vertices[:,0],index=epidermis_vertices)])
laplacian_force = laplacian_force/vertices_degrees[:,np.newaxis]
# vertex_cell_barycenter = np.array([np.mean(topomesh.wisp_property('barycenter',degree=3).values(c),axis=0) for c in topomesh.wisp_property('cells',0).values(epidermis_vertices)])
# vertices_directions = topomesh.wisp_property('barycenter',degree=0).values(epidermis_vertices) - vertex_cell_barycenter
# vertices_directions = vertices_directions/np.linalg.norm(vertices_directions,axis=1)[:,np.newaxis]
# vertices_products = np.einsum('ij,ij->i',laplacian_force,vertices_directions)
# convex_points = np.where(vertices_products<0.)[0]
# laplacian_force[convex_points] -= vertices_directions[convex_points]*vertices_products[convex_points,np.newaxis]
# laplacian_force[convex_points] = np.array([0.,0.,0.])
epidermis_convexity_force.update(laplacian_force,keys=epidermis_vertices,erase_missing_keys=False)
return epidermis_convexity_force.values()
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def property_topomesh_epidermis_planarization_force(topomesh):
if not topomesh.has_wisp_property('epidermis',degree=2,is_computed=True):
compute_topomesh_property(topomesh,'epidermis',degree=2)
if not topomesh.has_wisp_property('epidermis',degree=3,is_computed=True):
compute_topomesh_property(topomesh,'epidermis',degree=3)
if not topomesh.has_wisp_property('epidermis',degree=0,is_computed=True):
compute_topomesh_property(topomesh,'epidermis',degree=0)
if not topomesh.has_wisp_property('cells',degree=0,is_computed=True):
compute_topomesh_property(topomesh,'cells',degree=0)
if not topomesh.has_wisp_property('barycenter',degree=2,is_computed=True):
compute_topomesh_property(topomesh,'barycenter',2)
if not topomesh.has_wisp_property('barycenter',degree=3,is_computed=True):
compute_topomesh_property(topomesh,'barycenter',3)
if not topomesh.has_wisp_property('length',degree=1,is_computed=True):
compute_topomesh_property(topomesh,'length',degree=1)
if not topomesh.has_wisp_property('area',degree=2,is_computed=True):
compute_topomesh_property(topomesh,'area',degree=2)
if not topomesh.has_wisp_property('normal',degree=2,is_computed=True):
compute_topomesh_property(topomesh,'normal',degree=2)
epidermis_triangles = np.array(list(topomesh.wisps(2)))[topomesh.wisp_property('epidermis',2).values()]
epidermis_cells = np.array(list(topomesh.wisps(3)))[topomesh.wisp_property('epidermis',3).values()]
epidermis_vertices = np.array(list(topomesh.wisps(0)))[topomesh.wisp_property('epidermis',0).values()]
triangle_cells = np.concatenate(topomesh.wisp_property('cells',2).values(epidermis_triangles))
triangle_areas = topomesh.wisp_property('area',2).values(epidermis_triangles)
triangle_centers = topomesh.wisp_property('barycenter',2).values(epidermis_triangles)
triangle_weighted_normals = triangle_areas[:,np.newaxis]*topomesh.wisp_property('normal',2).values(epidermis_triangles)
cell_areas = nd.sum(triangle_areas,triangle_cells,index=epidermis_cells)
cell_epidermis_centers = np.transpose([nd.sum(triangle_areas*triangle_centers[:,k],triangle_cells,index=epidermis_cells) for k in xrange(3)])
cell_epidermis_centers = cell_epidermis_centers/cell_areas[:,np.newaxis]
cell_epidermis_centers = array_dict(cell_epidermis_centers,epidermis_cells)
cell_weighted_normals = np.transpose([nd.sum(triangle_weighted_normals[:,k],triangle_cells,index=epidermis_cells) for k in xrange(3)])
cell_normals = cell_weighted_normals/cell_areas[:,np.newaxis]
cell_normals = array_dict(cell_normals/np.linalg.norm(cell_normals,axis=1)[:,np.newaxis],epidermis_cells)
vertex_cells = np.concatenate([np.intersect1d(np.array(topomesh.wisp_property('cells',0)[v],int),epidermis_cells) for v in epidermis_vertices])
vertex_cell_vertex = np.concatenate([v*np.ones_like(np.intersect1d(topomesh.wisp_property('cells',0)[v],epidermis_cells),int) for v in epidermis_vertices])
vertex_cell_normals = cell_normals.values(vertex_cells)
#vertex_cell_normals = np.array([cell_normals[c] if cell_normals.has_key(c) else np.array([0,0,1]) for c in vertex_cells])
vertex_cell_centers = cell_epidermis_centers.values(vertex_cells)
#vertex_cell_centers = np.array([cell_epidermis_centers[c] if cell_epidermis_centers.has_key(c) else topomesh.wisp_property('barycenter',3)[c]])
vertex_cell_vectors = topomesh.wisp_property('barycenter',0).values(vertex_cell_vertex) - vertex_cell_centers
vertex_cell_projectors = -np.einsum('ij,ij->i',vertex_cell_vectors,vertex_cell_normals)[:,np.newaxis]*vertex_cell_normals
vertex_projectors = np.transpose([nd.sum(vertex_cell_projectors[:,k],vertex_cell_vertex,index=list(topomesh.wisps(0))) for k in xrange(3)])
vertex_projectors[np.isnan(vertex_projectors)] = 0.
return vertex_projectors
property_topomesh_optimization.py 文件源码
项目:cellcomplex
作者: VirtualPlants
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def topomesh_split_edge(topomesh,eid):
pid_to_keep, pid_to_unlink = topomesh.borders(1,eid)
edge_fids = list(topomesh.regions(1,eid))
eids_to_unlink = np.array([list(set(list(topomesh.borders(2,fid))).intersection(set(list(topomesh.regions(0,pid_to_unlink)))).difference({eid}))[0] for fid in edge_fids])
pids_split = np.array([list(set(list(topomesh.borders(2,fid,2))).difference({pid_to_keep, pid_to_unlink}))[0] for fid in edge_fids])
pid_to_add = topomesh.add_wisp(0)
topomesh.unlink(1,eid,pid_to_unlink)
topomesh.link(1,eid,pid_to_add)
# print eid," : ",pid_to_keep,pid_to_add
topomesh.wisp_property("barycenter",0)[pid_to_add] = (topomesh.wisp_property("barycenter",0).values([pid_to_keep,pid_to_unlink]).sum(axis=0))/2.
eid_to_add = topomesh.add_wisp(1)
topomesh.link(1,eid_to_add,pid_to_add)
topomesh.link(1,eid_to_add,pid_to_unlink)
# print "Split ",eid_to_add," : ",pid_to_add,pid_to_unlink
for fid, eid_to_unlink, pid_split in zip(edge_fids,eids_to_unlink,pids_split):
eid_split = topomesh.add_wisp(1)
topomesh.link(1,eid_split,pid_to_add)
topomesh.link(1,eid_split,pid_split)
# print "Added ",eid_split," : ",pid_to_add,pid_split
topomesh.unlink(2,fid,eid_to_unlink)
topomesh.link(2,fid,eid_split)
fid_to_add = topomesh.add_wisp(2)
topomesh.link(2,fid_to_add,eid_to_add)
topomesh.link(2,fid_to_add,eid_to_unlink)
topomesh.link(2,fid_to_add,eid_split)
for cid in topomesh.regions(2,fid):
topomesh.link(3,cid,fid_to_add)
# edge_borders = np.array(map(len,map(np.unique,[list(topomesh.borders(1,e)) for e in topomesh.wisps(1)])))
# if np.min(edge_borders) == 1:
# edge_vertices = np.sort(np.array([list(topomesh.borders(1,e)) for e in topomesh.wisps(1) if topomesh.nb_borders(1,e) == 2]))
# edge_vertex_id = edge_vertices[:,0]*10000 + edge_vertices[:,1]
# if edge_vertices.shape[0] != array_unique(edge_vertices).shape[0]:
# print eid," split error : (",pid_to_keep,pid_to_unlink,")",np.array(list(topomesh.wisps(1)))[nd.sum(np.ones_like(edge_vertex_id),edge_vertex_id,index=edge_vertex_id)>1]
# raw_input()
return True
def compute_topomesh_cell_property_from_faces(topomesh, property_name, aggregate='mean', weighting='area'):
"""Compute a property on degree 3 using the same property defined at degree 2.
The cell property is computed by averaging or summing the properties of its
border faces, weighting them differently according to the chosen method.
Args:
topomesh (:class:`openalea.cellcomplex.property_topomesh.PropertyTopomesh`):
The structure on which to compute the property.
property_name (str):
The name of the property to compute (must be already computed on faces).
aggregate (str):
The way weighted face properties are combined to compute the cell property (default is *mean*):
* *mean*: the cell property is the weighted average of face properties
* *sum*: the cell property is the weighted sum of face properties
weighting (str):
The way weights are assigned to each face to compute the property (default is *area*):
* *uniform*: all the faces have the same weight (1)
* *area*: the weight on the faces is equal to their area
Returns:
None
Note:
The PropertyTopomesh passed as argument is updated.
"""
start_time = time()
print "--> Computing cell property from faces"
assert topomesh.has_wisp_property(property_name,2,is_computed=True)
assert weighting in ['uniform','area']
face_property = topomesh.wisp_property(property_name,2)
cell_faces = np.concatenate([list(topomesh.borders(3,c)) for c in topomesh.wisps(3)]).astype(np.uint32)
cell_face_cells = np.concatenate([c*np.ones(topomesh.nb_borders(3,c)) for c in topomesh.wisps(3)]).astype(np.uint32)
if weighting == 'uniform':
cell_face_weight = np.ones_like(cell_faces)
elif weighting == 'area':
compute_topomesh_property(topomesh,'area',2)
cell_face_weight = topomesh.wisp_property('area',2).values(cell_faces)
cell_face_property = face_property.values(cell_faces)
if cell_face_property.ndim == 1:
cell_property = nd.sum(cell_face_weight*cell_face_property,cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3)))
elif cell_face_property.ndim == 2:
cell_property = np.transpose([nd.sum(cell_face_weight*cell_face_property[:,k],cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3))) for k in xrange(cell_face_property.shape[1])])
elif cell_face_property.ndim == 3:
cell_property = np.transpose([[nd.sum(cell_face_weight*cell_face_property[:,j,k],cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3))) for k in xrange(cell_face_property.shape[2])] for j in xrange(cell_face_property.shape[1])])
topomesh.update_wisp_property(property_name,degree=3,values=array_dict(cell_property,keys=list(topomesh.wisps(3))))
end_time = time()
print "<-- Computing cell property from faces [",end_time-start_time,"s]"
def density_contour_plot(figure,X,Y,color,XY_range=None,xlabel="",ylabel="",n_points=100,n_contours=10,smooth_factor=1.0,linewidth=1,marker_size=40.,alpha=1.0,label=""):
font = fm.FontProperties(family = 'Trebuchet', weight ='light')
figure.patch.set_facecolor('white')
axes = figure.add_subplot(111)
if XY_range is None:
XY_range = [[X.min(),X.max()],[Y.min(),Y.max()]]
range_x = np.linspace(XY_range[0][0],XY_range[0][1],n_points)
range_y = np.linspace(XY_range[1][0],XY_range[1][1],n_points)
xx, yy = np.meshgrid(range_x,range_y)
range_x_cr = (range_x - range_x.mean())/range_x.std()
range_y_cr = (range_y - range_y.mean())/range_y.std()
xx_cr, yy_cr = np.meshgrid(range_x_cr,range_y_cr)
def density_function(positions,radius=1,k=0.1):
def density_func(x,y):
points = np.array(positions.values())
if len((x+y).shape) == 1:
distances = np.power(np.power(x[np.newaxis] - points[:,0,np.newaxis],2) + np.power(y[np.newaxis] - points[:,1,np.newaxis],2),0.5)
elif len((x+y).shape) == 2:
distances = np.power(np.power(x[np.newaxis] - points[:,0,np.newaxis,np.newaxis],2) + np.power(y[np.newaxis] - points[:,1,np.newaxis,np.newaxis],2),0.5)
density_potential = 1./2. * (1. - np.tanh(k*(distances - radius)))
density = density_potential.sum(axis=0)
return density
return density_func
positions = dict(zip(range(len(X)),np.transpose([X,Y])))
positions_cr = dict(zip(range(len(X)),np.transpose([(X-range_x.mean())/range_x.std(),(Y-range_y.mean())/range_y.std()])))
# radius = np.sqrt((X.std() * Y.std()))
radius = 10.0/n_points
density_k = 100.*np.exp(-smooth_factor/2.)
data_density = density_function(positions_cr,radius,density_k)(xx_cr,yy_cr)
levels = np.linspace(0,data_density.max(),n_contours)
colors = np.array([(lev*color + (n_contours-1-lev)*np.ones(3))/(n_contours-1) for lev in xrange(n_contours)])
axes.scatter(X,Y,s=marker_size,c=color,linewidth=0,alpha=alpha/2.,label=label)
axes.contour(xx,yy,data_density,linewidths=linewidth,levels=levels,colors=colors,alpha=(alpha+1)/2.)
axes.set_xlim(*tuple(XY_range[0]))
axes.set_xlabel(xlabel,fontproperties=font, size=10, style='italic')
axes.set_xticklabels(axes.get_xticks(),fontproperties=font, size=12)
axes.set_ylim(*tuple(XY_range[1]))
axes.set_ylabel(ylabel, fontproperties=font, size=10, style='italic')
axes.set_yticklabels(axes.get_yticks(),fontproperties=font, size=12)
def get_contours3d(A, dims, thr=0.9):
"""Gets contour of spatial components and returns their coordinates
Parameters
-----------
A: np.ndarray or sparse matrix
Matrix of Spatial components (d x K)
dims: tuple of ints
Spatial dimensions of movie (x, y, z)
thr: scalar between 0 and 1
Energy threshold for computing contours (default 0.995)
Returns
--------
Coor: list of coordinates with center of mass and
contour plot coordinates (per layer) for each component
"""
d, nr = np.shape(A)
d1, d2, d3 = dims
x, y = np.mgrid[0:d2:1, 0:d3:1]
coordinates = []
cm = np.asarray([center_of_mass(a.reshape(dims, order='F')) for a in A.T])
for i in range(nr):
pars = dict()
indx = np.argsort(A[:, i], axis=None)[::-1]
cumEn = np.cumsum(A[:, i].flatten()[indx]**2)
cumEn /= cumEn[-1]
Bvec = np.zeros(d)
Bvec[indx] = cumEn
Bmat = np.reshape(Bvec, dims, order='F')
pars['coordinates'] = []
for B in Bmat:
cs = plt.contour(y, x, B, [thr])
# this fix is necessary for having disjoint figures and borders plotted correctly
p = cs.collections[0].get_paths()
v = np.atleast_2d([np.nan, np.nan])
for pths in p:
vtx = pths.vertices
num_close_coords = np.sum(np.isclose(vtx[0, :], vtx[-1, :]))
if num_close_coords < 2:
if num_close_coords == 0:
# case angle
newpt = np.round(vtx[-1, :] / [d2, d1]) * [d2, d1]
vtx = np.concatenate((vtx, newpt[np.newaxis, :]), axis=0)
else:
# case one is border
vtx = np.concatenate((vtx, vtx[0, np.newaxis]), axis=0)
v = np.concatenate((v, vtx, np.atleast_2d([np.nan, np.nan])), axis=0)
pars['coordinates'] += [v]
pars['CoM'] = np.squeeze(cm[i, :])
pars['neuron_id'] = i + 1
coordinates.append(pars)
return coordinates
def run(self, ips, imgs, para = None):
inten = WindowsManager.get(para['inten']).ips
if not para['slice']:
imgs = [inten.img]
msks = [ips.img]
else:
msks = ips.imgs
if len(msks)==1:
msks *= len(imgs)
buf = imgs[0].astype(np.uint16)
strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
idct = ['Max','Min','Mean','Variance','Standard','Sum']
key = {'Max':'max','Min':'min','Mean':'mean',
'Variance':'var','Standard':'std','Sum':'sum'}
idct = [i for i in idct if para[key[i]]]
titles = ['Slice', 'ID'][0 if para['slice'] else 1:]
if para['center']: titles.extend(['Center-X','Center-Y'])
if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
titles.extend(idct)
k = ips.unit[0]
data, mark = [], []
for i in range(len(imgs)):
n = ndimage.label(msks[i], strc, output=buf)
index = range(1, n+1)
dt = []
if para['slice']:dt.append([i]*n)
dt.append(range(n))
xy = ndimage.center_of_mass(imgs[i], buf, index)
xy = np.array(xy).round(2).T
if para['center']:dt.extend([xy[1]*k, xy[0]*k])
boxs = [None] * n
if para['extent']:
boxs = ndimage.find_objects(buf)
boxs = [(i[0].start, i[1].start, i[0].stop, i[1].stop) for i in boxs]
for j in (0,1,2,3):
dt.append([i[j]*k for i in boxs])
if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))
if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2))
if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))
mark.append([(center, cov) for center,cov in zip(xy.T, boxs)])
data.extend(list(zip(*dt)))
IPy.table(inten.title+'-region statistic', data, titles)
inten.mark = Mark(mark)
inten.update = True
def _sn_func(index, signal=None, noise=None):
"""
Default function to calculate the S/N of a bin with spaxels "index".
The Voronoi binning algorithm does not require this function to have a
specific form and this default one can be changed by the user if needed
by passing a different function as
... = voronoi_2d_binning(..., sn_func=sn_func)
The S/N returned by sn_func() does not need to be an analytic
function of S and N.
There is also no need for sn_func() to return the actual S/N.
Instead sn_func() could return any quantity the user needs to equalize.
For example sn_func() could be a procedure which uses ppxf to measure
the velocity dispersion from the coadded spectrum of spaxels "index"
and returns the relative error in the dispersion.
Of course an analytic approximation of S/N, like the one below,
speeds up the calculation.
:param index: integer vector of length N containing the indices of
the spaxels for which the combined S/N has to be returned.
The indices refer to elements of the vectors signal and noise.
:param signal: vector of length M>N with the signal of all spaxels.
:param noise: vector of length M>N with the noise of all spaxels.
:return: scalar S/N or another quantity that needs to be equalized.
"""
sn = np.sum(signal[index])/np.sqrt(np.sum(noise[index]**2))
# The following commented line illustrates, as an example, how one
# would include the effect of spatial covariance using the empirical
# Eq.(1) from http://adsabs.harvard.edu/abs/2015A%26A...576A.135G
# Note however that the formula is not accurate for large bins.
#
# sn /= 1 + 1.07*np.log10(index.size)
return sn
#----------------------------------------------------------------------
def _cvt_equal_mass(x, y, signal, noise, xnode, ynode, pixelsize, quiet, sn_func, wvt):
"""
Implements the modified Lloyd algorithm
in section 4.1 of Cappellari & Copin (2003).
NB: When the keyword WVT is set this routine includes
the modification proposed by Diehl & Statler (2006).
"""
dens2 = (signal/noise)**4 # See beginning of section 4.1 of CC03
scale = np.ones_like(xnode) # Start with the same scale length for all bins
for it in range(1, xnode.size): # Do at most xnode.size iterations
xnode_old, ynode_old = xnode.copy(), ynode.copy()
classe = voronoi_tessellation(x, y, xnode, ynode, scale)
# Computes centroids of the bins, weighted by dens**2.
# Exponent 2 on the density produces equal-mass Voronoi bins.
# The geometric centroids are computed if WVT keyword is set.
#
good = np.unique(classe)
if wvt:
for k in good:
index = np.flatnonzero(classe == k) # Find subscripts of pixels in bin k.
xnode[k], ynode[k] = np.mean(x[index]), np.mean(y[index])
sn = sn_func(index, signal, noise)
scale[k] = np.sqrt(index.size/sn) # Eq. (4) of Diehl & Statler (2006)
else:
mass = ndimage.sum(dens2, labels=classe, index=good)
xnode = ndimage.sum(x*dens2, labels=classe, index=good)/mass
ynode = ndimage.sum(y*dens2, labels=classe, index=good)/mass
diff2 = np.sum((xnode - xnode_old)**2 + (ynode - ynode_old)**2)
diff = np.sqrt(diff2)/pixelsize
if not quiet:
print('Iter: %4i Diff: %.4g' % (it, diff))
if diff < 0.1:
break
# If coordinates have changed, re-compute (Weighted) Voronoi Tessellation of the pixels grid
#
if diff > 0:
classe = voronoi_tessellation(x, y, xnode, ynode, scale)
good = np.unique(classe) # Check for zero-size Voronoi bins
# Only return the generators and scales of the nonzero Voronoi bins
return xnode[good], ynode[good], scale[good], it
#-----------------------------------------------------------------------
def makeMesh(self):
if self.Xs.shape[2] == 0:
return
X = self.Xs[:, :, 0]
Y = self.Ys[:, :, 0]
Z = self.Zs[:, :, 0]
mesh = PolyMesh()
#Come up with vertex indices in the mask
Mask = np.array(self.Mask, dtype=np.int32)
nV = np.sum(Mask)
Mask[self.Mask > 0] = np.arange(nV) + 1
Mask = Mask - 1
VPos = np.zeros((nV, 3))
VPos[:, 0] = X[self.Mask > 0]
VPos[:, 1] = Y[self.Mask > 0]
VPos[:, 2] = -Z[self.Mask > 0]
#Add lower right triangle
v1 = Mask[0:-1, 0:-1].flatten()
v2 = Mask[1:, 0:-1].flatten()
v3 = Mask[1:, 1:].flatten()
N = v1.size
ITris1 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
#Add upper right triangle
v1 = Mask[0:-1, 0:-1].flatten()
v2 = Mask[1:, 1:].flatten()
v3 = Mask[0:-1, 1:].flatten()
N = v1.size
ITris2 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
ITris = np.concatenate((ITris1, ITris2), 0)
#Only retain triangles which have all three points
ITris = ITris[np.sum(ITris == -1, 1) == 0, :]
mesh.VPos = VPos
mesh.ITris = ITris
mesh.VColors = 0.5*np.ones(mesh.VPos.shape)
mesh.updateNormalBuffer()
mesh.VPosVBO = vbo.VBO(np.array(mesh.VPos, dtype=np.float32))
mesh.VNormalsVBO = vbo.VBO(np.array(mesh.VNormals, dtype=np.float32))
mesh.VColorsVBO = vbo.VBO(np.array(mesh.VColors, dtype=np.float32))
mesh.IndexVBO = vbo.VBO(mesh.ITris, target=GL_ELEMENT_ARRAY_BUFFER)
mesh.needsDisplayUpdate = False
self.mesh = mesh
#Compute the delta coordinates and solvers for all frames of the video
def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster?
"""
method = 'step'
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# "upper" bound set by maglim
delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2
# "lower" bound set by bins_mag (maglim shouldn't be 0)
delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2
cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
cdf = cdf_1*cdf_2
if method is None or method == 'none':
comp_cdf = cdf
elif self.band_1_detection == True:
comp = mask.mask_1.completeness(mag_1, method=method)
comp_cdf = comp*cdf
elif self.band_1_detection == False:
comp =mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp*cdf
else:
comp_1 = mask.mask_1.completeness(mag_1, method=method)
comp_2 = mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp_1*comp_2*cdf
observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]