def rgb2illumination_invariant(img, alpha, hist_eq=False):
"""
this is an implementation of the illuminant-invariant color space published
by Maddern2014
http://www.robots.ox.ac.uk/~mobile/Papers/2014ICRA_maddern.pdf
:param img:
:param alpha: camera paramete
:return:
"""
ii_img = 0.5 + np.log(img[:, :, 1] + 1e-8) - \
alpha * np.log(img[:, :, 2] + 1e-8) - \
(1 - alpha) * np.log(img[:, :, 0] + 1e-8)
# ii_img = exposure.rescale_intensity(ii_img, out_range=(0, 1))
if hist_eq:
ii_img = exposure.equalize_hist(ii_img)
print np.max(ii_img)
print np.min(ii_img)
return ii_img
python类equalize_hist()的实例源码
def visualize_derivatives(image):
'''
Plot gradient on left and Laplacian on right.
Only tested on 2D 1-channel float imags
'''
dx1,dy1 = np.gradient(image)
gradient = dx1 + 1j*dy1
a1 = np.abs(gradient)
plt.figure(None,(12,6))
plt.subplot(121)
a1 = mean_center(blur(exposure.equalize_hist(unitize(a1)),1))
plt.imshow(a1,
origin='lower',interpolation='nearest',cmap='gray',extent=(0,64,)*2)
plt.title('Gradient Magnitude')
plt.subplot(122)
laplacian = scipy.ndimage.filters.laplace(image)
lhist = mean_center(
blur(exposure.equalize_hist(unitize(laplacian)),1))
plt.imshow(lhist,
origin='lower',interpolation='nearest',cmap='gray',extent=(0,64,)*2)
plt.title('Laplacian')
return gradient, laplacian
def equalize(image):
from skimage import exposure
return exposure.equalize_hist(image)
def scaling(image, method="stretching"):
"""
Change the image dynamic.
Parameters
----------
image: Image
the image to be transformed.
method: str, default 'stretching'
the normalization method: 'stretching', 'equalization' or 'adaptive'.
Returns
-------
normalize_image: Image
the normalized image.
"""
# Contrast stretching
if method == "stretching":
p2, p98 = np.percentile(image.data, (2, 98))
norm_data = exposure.rescale_intensity(image.data, in_range=(p2, p98))
# Equalization
elif method == "equalization":
norm_data = exposure.equalize_hist(image.data)
# Adaptive Equalization
elif method == "adaptive":
norm_data = exposure.equalize_adapthist(image.data, clip_limit=0.03)
# Unknown method
else:
raise ValueError("Unknown normalization '{0}'.".format(method))
normalize_image = pisap.Image(data=norm_data)
return normalize_image
def _equalizeHistogram(img):
'''
histogram equalisation not bounded to int() or an image depth of 8 bit
works also with negative numbers
'''
# to float if int:
intType = None
if 'f' not in img.dtype.str:
TO_FLOAT_TYPES = {np.dtype('uint8'): np.float16,
np.dtype('uint16'): np.float32,
np.dtype('uint32'): np.float64,
np.dtype('uint64'): np.float64}
intType = img.dtype
img = img.astype(TO_FLOAT_TYPES[intType], copy=False)
# get image deph
DEPTH_TO_NBINS = {np.dtype('float16'): 256, # uint8
np.dtype('float32'): 32768, # uint16
np.dtype('float64'): 2147483648} # uint32
nBins = DEPTH_TO_NBINS[img.dtype]
# scale to -1 to 1 due to skikit-image restrictions
mn, mx = np.amin(img), np.amax(img)
if abs(mn) > abs(mx):
mx = mn
img /= mx
img = exposure.equalize_hist(img, nbins=nBins)
img *= mx
if intType:
img = img.astype(intType)
return img
def process(self, im):
return exposure.equalize_hist(im)
find_ventricle_location.py 文件源码
项目:Multi-views-fusion
作者: luogongning
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def draw_center_for_check(dcm_path, id, slicelocalization,PixelSpacing,sax, point, points):
debug_folder = os.path.join('E:', 'calc', 'center_find')
if not os.path.isdir(debug_folder):
os.mkdir(debug_folder)
ds = pydicom.read_file(dcm_path)
img = convert_to_grayscale_with_increase_brightness_fast(ds.pixel_array, 1)
raw=int(round(point[0], 0))
#F_col=float(col)
col=int(round(point[1], 0))
#F_raw=float(raw)
row_spacing =PixelSpacing[0]
row_spacing = float(row_spacing)
row_spacing_size=(32.0*1.4)/row_spacing
col_spacing = PixelSpacing[1]
col_spacing = float(col_spacing)
col_spacing_size=(32.0*1.4)/col_spacing
L_raw=raw-int(row_spacing_size)
R_raw=raw+int(row_spacing_size)
L_col=col-int(col_spacing_size)
R_col=col+int(col_spacing_size)
crop_img = img[L_raw:R_raw, L_col:R_col]
img_shape = (64, 64)
crop_img = imresize(crop_img, img_shape)
crop_img=exposure.equalize_hist(crop_img)
crop_img=crop_img*255
crop_img=crop_img.astype(np.uint8)
#cv2.circle(img, (int(round(point[1], 0)), int(round(point[0], 0))), 5, 255, 3)
#cv2.circle(img, 15, 25, 5, 255, 3)
#img = cv2.line(img, (points[1], points[0]), (points[3], points[2]), 127, thickness=2)
#img = cv2.line(img, (points[5], points[4]), (points[7], points[6]), 127, thickness=2)
#img = cv2.line(img, (12, 13), (112, 223), 127, thickness=2)
#img = cv2.line(img, (12,115), (112,256), 127, thickness=2)
#show_image(img)
cv2.imwrite(os.path.join(debug_folder, str(id) + '_' + sax + '_'+ slicelocalization+'.jpg'), crop_img)
def crop_resize_check(dcm_path, id, slicelocalization,PixelSpacing,sax, point, imagename):
dcm_path = dcm_path.replace('\\', '/')
debug_folder = os.path.join('E:', 'calc', 'checkEDES')
if not os.path.isdir(debug_folder):
os.mkdir(debug_folder)
ds = dicom.read_file(dcm_path)
img = convert_to_grayscale_with_increase_brightness_fast(ds.pixel_array, 1)
raw=int(round(point[0], 0))
#F_col=float(col)
col=int(round(point[1], 0))
#F_raw=float(raw)
row_spacing =PixelSpacing[0]
row_spacing = float(row_spacing)
row_spacing_size=(32.0*1.4)/row_spacing
col_spacing = PixelSpacing[1]
col_spacing = float(col_spacing)
col_spacing_size=(32.0*1.4)/col_spacing
L_raw=raw-int(row_spacing_size)
R_raw=raw+int(row_spacing_size)
L_col=col-int(col_spacing_size)
R_col=col+int(col_spacing_size)
crop_img = img[L_raw:R_raw, L_col:R_col]
#img_shape = (64, 64)
crop_img = imresize(crop_img, img_shape)
crop_img=exposure.equalize_hist(crop_img)
crop_img=crop_img*255
crop_img=crop_img.astype(np.uint8)
#cv2.circle(img, (int(round(point[1], 0)), int(round(point[0], 0))), 5, 255, 3)
#cv2.circle(img, 15, 25, 5, 255, 3)
#img = cv2.line(img, (points[1], points[0]), (points[3], points[2]), 127, thickness=2)
#img = cv2.line(img, (points[5], points[4]), (points[7], points[6]), 127, thickness=2)
#img = cv2.line(img, (12, 13), (112, 223), 127, thickness=2)
#img = cv2.line(img, (12,115), (112,256), 127, thickness=2)
#show_image(img)
cv2.imwrite(os.path.join(debug_folder, str(id) + '_' + sax + '_'+ imagename+'_'+str(slicelocalization)+'.jpg'), crop_img)
return crop_img
def visualize_derivatives(image):
laplacian = scipy.ndimage.filters.laplace(image)
lhist = mean_center(
blur(exposure.equalize_hist(unitize(laplacian)),1))
plt.imshow(lhist,
origin='lower',interpolation='nearest',cmap='gray',extent=(0,64,)*2)
plt.title('Laplacian')
return gradient, laplacian
def plot_box(box, title=None, path=None, format=None, scale="log", interval="pts", cmap="viridis"):
"""
This function ...
:param box:
:param title:
:param path:
:param format:
:param scale:
:param interval:
:param cmap:
:return:
"""
# Other new colormaps: plasma, magma, inferno
# Normalization
if scale == "log": norm = ImageNormalize(stretch=LogStretch())
elif scale == "sqrt": norm = ImageNormalize(stretch=SqrtStretch())
#elif scale == "skimage": norm = exposure.equalize_hist
else: raise ValueError("Invalid option for 'scale'")
if interval == "zscale":
vmin, vmax = ZScaleInterval().get_limits(box)
elif interval == "pts":
# Determine the maximum value in the box and the mimimum value for plotting
vmin = max(np.nanmin(box), 0.)
vmax = 0.5 * (np.nanmax(box) + vmin)
elif isinstance(interval, tuple):
vmin = interval[0]
vmax = interval[1]
else: raise ValueError("Invalid option for 'interval'")
#if scale == "skimage":
# vmin = 0.0
# vmax = 1.0
# Make the plot
plt.figure(figsize=(7,7))
plt.imshow(box, origin="lower", interpolation="nearest", vmin=vmin, vmax=vmax, norm=norm, cmap=cmap)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
if title is not None: plt.title(title)
if path is None: plt.show()
else: plt.savefig(path, format=format)
plt.close()
# -----------------------------------------------------------------
def plot_box(box, title=None, path=None, format=None, scale="log", interval="pts", cmap="viridis"):
"""
This function ...
:param box:
:param title:
:param path:
:param format:
:param scale:
:param interval:
:param cmap:
:return:
"""
# Other new colormaps: plasma, magma, inferno
# Normalization
if scale == "log": norm = ImageNormalize(stretch=LogStretch())
elif scale == "sqrt": norm = ImageNormalize(stretch=SqrtStretch())
#elif scale == "skimage": norm = exposure.equalize_hist
else: raise ValueError("Invalid option for 'scale'")
if interval == "zscale":
vmin, vmax = ZScaleInterval().get_limits(box)
elif interval == "pts":
# Determine the maximum value in the box and the mimimum value for plotting
vmin = max(np.nanmin(box), 0.)
vmax = 0.5 * (np.nanmax(box) + vmin)
elif isinstance(interval, tuple):
vmin = interval[0]
vmax = interval[1]
else: raise ValueError("Invalid option for 'interval'")
#if scale == "skimage":
# vmin = 0.0
# vmax = 1.0
# Make the plot
plt.figure(figsize=(7,7))
plt.imshow(box, origin="lower", interpolation="nearest", vmin=vmin, vmax=vmax, norm=norm, cmap=cmap)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
if title is not None: plt.title(title)
if path is None: plt.show()
else: plt.savefig(path, format=format)
plt.close()
# -----------------------------------------------------------------
create_gender_neutral_data.py 文件源码
项目:Gender-Age-Classification-CNN
作者: naritapandhe
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def read_fold(fold_csv_prefix,fold_image_prefix,fold_names):
width = 256
height = 256
i=0
for fold in fold_names:
print("Reading fold: %s" % fold)
df = pd.read_csv(fold_csv_prefix+fold+'.csv')
inputimages = []
genders = []
ages = []
for index, row in df.iterrows():
yaw_angle = row['fiducial_yaw_angle']
gender = row['gender']
age = row['age']
if ((gender!='u') and (gender!='Nan') and (age!='None') and (gender!=' ') and (age!=' ') and (yaw_angle >= -45) and (yaw_angle <= 45)):
folder_name = row['user_id']
image_name = row['original_image']
face_id = row['face_id']
age_tuple = make_tuple(age)
age_id = get_age_range_id(age_tuple)
image_path = fold_image_prefix+folder_name+'/landmark_aligned_face.'+str(face_id)+'.'+image_name
image = Image.open(image_path)
#Resize image
image = image.resize((width, height), PIL.Image.ANTIALIAS)
image_arr = np.array(image)
#image_arr = exposure.equalize_hist(image_arr)
if(gender == 'm'):
g=0
else:
g=1
inputimages.append(image_arr)
genders.append(g)
ages.append(age_id)
print('Done: {0}/{1} folds'.format(i, len(fold_names)))
i=i+1
print ('Fold Name: %s' % fold)
print ('Images: %i, Gender: %i, Ages: %i' % (len(inputimages), len(genders), len(ages)))
print ('')
currDict = {'fold_name': fold, 'images': inputimages, 'genders': genders, 'ages': ages}
save_pickle(currDict,fold, '/home/narita/Documents/pythonworkspace/data-science-practicum/gender-age-classification/gender_neutral_data/')