def scaling(image, method="stretching"):
"""
Change the image dynamic.
Parameters
----------
image: Image
the image to be transformed.
method: str, default 'stretching'
the normalization method: 'stretching', 'equalization' or 'adaptive'.
Returns
-------
normalize_image: Image
the normalized image.
"""
# Contrast stretching
if method == "stretching":
p2, p98 = np.percentile(image.data, (2, 98))
norm_data = exposure.rescale_intensity(image.data, in_range=(p2, p98))
# Equalization
elif method == "equalization":
norm_data = exposure.equalize_hist(image.data)
# Adaptive Equalization
elif method == "adaptive":
norm_data = exposure.equalize_adapthist(image.data, clip_limit=0.03)
# Unknown method
else:
raise ValueError("Unknown normalization '{0}'.".format(method))
normalize_image = pisap.Image(data=norm_data)
return normalize_image
python类data()的实例源码
def histogram(image, nbins=256, lower_cut=0., cumulate=0):
"""
Compute the histogram of an input dataset.
Parameters
----------
image: Image
the image that contains the dataset to be analysed.
nbins: int, default 256
the histogram number of bins.
lower_cut: float, default 0
do not consider the intensities under this threshold.
cumulate: bool, default False
if set compute the cumulate histogram.
Returns
-------
hist_im: Image
the generated histogram.
"""
hist, bins = np.histogram(image.data[image.data > lower_cut], nbins)
if cumulate:
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max() / cdf.max()
hist_im = pisap.Image(data=cdf_normalized)
else:
hist_im = pisap.Image(data=hist)
return hist_im
def _label_statistics(image_paths):
'''
Calculates label statistics (number of picked pixels for each class)
Parameters
----------
image_paths : list
List of absolute paths for picked images
Returns
-------
array: numpy array
Number of selected pixels per class
'''
ds = KittiDataset()
def _rgb_2_label(rgb):
return ds.color2label[tuple(rgb)].trainId
total_counts = np.zeros(ds.num_classes())
for img in image_paths:
rgb = skimage.data.load(img)
labels = np.apply_along_axis(_rgb_2_label, 2, rgb)
indices, counts = np.unique(labels, return_counts=True)
if indices[-1] >= ds.num_classes():
indices = indices[0:-1]
counts = counts[0:-1]
total_counts[indices] += counts
return total_counts
def cut_cube(npy_img,voxelCoord, z, width, y_bias, x_bias):
#voxelcoord: y,x,z
npy_ct = npy_img[int(voxelCoord[2] - z / 2):int(voxelCoord[2] + z / 2),:,:]
# datatype(z,y,x) = float32,to input to tensorflow
# y_bias, x bias for data augmentation
cube = np.ndarray([z, width, width], dtype=np.float32)
cube[:, :, :] = npy_ct[:,
int(voxelCoord[0] - width / 2 + y_bias):int(voxelCoord[0] + width / 2 + y_bias),
int(voxelCoord[1] - width / 2 + x_bias):int(voxelCoord[1] + width / 2 + x_bias)]
cube = normalizePlanes(cube)
return cube
def __init__(self, title):
self.title = title
if hasattr(data, title):
self.data = getattr(data, title)
else : self.data = getattr(misc, title)
def run(self, para = None):
img = self.data()
if img.dtype != np.uint8:
img = img.astype(np.uint8)
IPy.show_img([img], self.title)