def my_label2rgboverlay(labels, colors, image, alpha=0.2):
"""
Generates image with segmentation labels on top
Parameters
----------
labels: labels of one image (0, 1)
colors: colormap
image: image (0, 1, c), where c=3 (rgb)
alpha: transparency
"""
image_float = gray2rgb(img_as_float(rgb2gray(image) if
image.shape[2] == 3 else
np.squeeze(image)))
label_image = my_label2rgb(labels, colors)
output = image_float * alpha + label_image * (1 - alpha)
return output
python类rgb2gray()的实例源码
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_original = self.transform(img)
img_original = np.asarray(img_original)
img_lab = rgb2lab(img_original)
img_lab = (img_lab + 128) / 255
img_ab = img_lab[:, :, 1:3]
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1)))
img_original = rgb2gray(img_original)
img_original = torch.from_numpy(img_original)
if self.target_transform is not None:
target = self.target_transform(target)
return (img_original, img_ab), target
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
img_scale = img.copy()
img_original = img
img_scale = scale_transform(img_scale)
img_scale = np.asarray(img_scale)
img_original = np.asarray(img_original)
img_scale = rgb2gray(img_scale)
img_scale = torch.from_numpy(img_scale)
img_original = rgb2gray(img_original)
img_original = torch.from_numpy(img_original)
return (img_original, img_scale), target
def batch_generator(batch_size, nb_batches):
batch_count = 0
while True:
pos = batch_count * batch_size
batch = dataset[pos:pos+batch_size]
X = np.zeros((batch_size, 1, img_size, img_size), dtype=np.float32)
for k, path in enumerate(batch):
im = io.imread(path)
im = color.rgb2gray(im)
X[k] = im[np.newaxis, ...]
X = torch.from_numpy(X)
X = Variable(X)
yield X, batch
batch_count += 1
if batch_count > nb_batches:
batch_count = 0
def convert_new(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return np.array(resized)
def convert_new_regions(fname, target_size):
print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
draw_top_regions(properties, 3)
return ba
def convert(fname, target_size):
# print('Processing image: %s' % fname)
img = Image.open(fname)
blurred = img.filter(ImageFilter.BLUR)
ba = np.array(blurred)
ba_gray = rgb2gray(ba)
val = filters.threshold_otsu(ba_gray)
# foreground = (ba_gray > val).astype(np.uint8)
foreground = closing(ba_gray > val, square(3))
# kernel = morphology.rectangle(5, 5)
# foreground = morphology.binary_dilation(foreground, kernel)
labels = measure.label(foreground)
properties = measure.regionprops(labels)
properties = sorted(properties, key=lambda p: p.area, reverse=True)
# draw_top_regions(properties, 3)
# return ba
bbox = properties[0].bbox
bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
cropped = img.crop(bbox)
resized = cropped.resize([target_size, target_size])
return resized
def fits2jpg(fname):
hdu_list = fits.open(fname)
image = hdu_list[0].data
image = np.squeeze(image)
img = np.copy(image)
idx = np.isnan(img)
img[idx] = 0
img_clip = np.flipud(img)
sigma = 3.0
# Estimate stats
mean, median, std = sigma_clipped_stats(img_clip, sigma=sigma, iters=10)
# Clip off n sigma points
img_clip = clip(img_clip,std*sigma)
if img_clip.shape[0] !=150 or img_clip.shape[1] !=150:
img_clip = resize(img_clip, (150,150))
#img_clip = rgb2gray(img_clip)
outfile = fname[0:-5] +'.png'
imsave(outfile, img_clip)
return img_clip,outfile
# Do the fusion classification
def load_next_image(self):
"""
Loads next image from train index for training.
:return: True if the next image is present, else False
"""
if len(self.image_list) == self.image_ptr:
return False
sys.stderr.write('Loaded Image #' + str(self.image_ptr) + ' ...\n')
self.image = ndimage.imread(self.image_list[self.image_ptr])
is_color = self.__check_color()
if is_color:
self.image = rgb2gray(self.image)
assert self.image.shape == (256, 256), 'Image not 256 x 256'
self.__break_into_jigzaw_pieces()
self.image_ptr += 1
self.tries = 1
return True
def load_next_image(self):
"""
Loads next image from train index for training.
:return: True if the next image is present, else False
"""
if len(self.image_list) == self.image_ptr:
return False
print 'Loaded New Image'
self.image = ndimage.imread(self.image_list[self.image_ptr])
self.image_name = self.image_list[self.image_ptr]
is_color = self.__check_color()
if is_color:
self.image = rgb2gray(self.image)
assert self.image.shape == (256, 256), 'Image not 256 x 256'
self.image_ptr += 1
return True
def postprocess(imgs, size, grayscale=False):
print("Postprocessing images and resize (at %d)" % size)
keyname = ('gray_%d' if grayscale else 'color_%d') % size
for img in imgs:
# Continue if already calculated
if img.isSetByName(keyname):
continue
floatimg = img_as_float(img.image)
floatimg = resize(floatimg, (size, size))
if grayscale:
floatimg = rgb2gray(floatimg)
img.setByName(keyname, floatimg) # expect to return floats
# Augment images
def pre_proc(X):
'''????? ???.
Args:
X(np.array): ??? ???? ??? ???? ? 84X84? ????
??? ????? ??????(??? ?? ??? ??) 255? ??
Returns:
np.array: ??? ???
'''
# ?? ? frame? ???? max? ????? flickering? ??
# x = np.maximum(X, X1)
# ??? ????? ????? ?? ??? ?? ??
x = np.uint8(resize(rgb2gray(X), (HEIGHT, WIDTH), mode='reflect') * 255)
return x
def pre_proc(X):
'''????? ???.
Args:
X(np.array): ??? ???? ??? ???? ? 84X84? ????
??? ????? ??????(??? ?? ??? ??) 255? ??
Returns:
np.array: ??? ???
'''
# ?? ? frame? ???? max? ????? flickering? ??
# x = np.maximum(X, X1)
# ??? ????? ????? ?? ??? ?? ??
x = np.uint8(resize(rgb2gray(X), (HEIGHT, WIDTH), mode='reflect') * 255)
return x
def create_mask(im_arr, erode=0):
if im_arr.shape[2] == 3:
im_arr = rgb2gray(im_arr)
thresh = 0.05
inv_bin = np.invert(im_arr > thresh)
all_labels = measure.label(inv_bin)
# Select largest object and invert
seg_arr = all_labels == 0
if erode > 0:
strel = selem.disk(erode, dtype=np.bool)
seg_arr = binary_erosion(seg_arr, selem=strel)
elif erode < 0:
strel = selem.disk(abs(erode), dtype=np.bool)
seg_arr = binary_dilation(seg_arr, selem=strel)
return seg_arr.astype(np.bool)
def load_img(path, grayscale=False, resize=None, order=1):
# Load image
img = io.imread(path)
# Resize
# print('Desired resize: ' + str(resize))
if resize is not None:
img = skimage.transform.resize(img, resize, order=order,
preserve_range=True)
# print('Final resize: ' + str(img.shape))
# Color conversion
if len(img.shape)==2 and not grayscale:
img = gray2rgb(img)
elif len(img.shape)>2 and img.shape[2]==3 and grayscale:
img = rgb2gray(img)
# Return image
return img
def predict_image(self, test_img):
"""
predicts classes of input image
:param test_img: filepath to image to predict on
:param show: displays segmentation results
:return: segmented result
"""
img = np.array( rgb2gray( imread( test_img ).astype( 'float' ) ).reshape( 5, 216, 160 )[-2] ) / 256
plist = []
# create patches from an entire slice
img_1 = adjust_sigmoid( img ).astype( float )
edges_1 = adjust_sigmoid( img, inv=True ).astype( float )
edges_2 = img_1
edges_5_n = normalize( laplace( img_1 ) )
edges_5_n = img_as_float( img_as_ubyte( edges_5_n ) )
plist.append( extract_patches_2d( edges_1, (23, 23) ) )
plist.append( extract_patches_2d( edges_2, (23, 23) ) )
plist.append( extract_patches_2d( edges_5_n, (23, 23) ) )
patches = np.array( zip( np.array( plist[0] ), np.array( plist[1] ), np.array( plist[2] ) ) )
# predict classes of each pixel based on model
full_pred = self.model.predict_classes( patches )
fp1 = full_pred.reshape( 194, 138 )
return fp1
brain_tumor_segmentation_models.py 文件源码
项目:nn-segmentation-for-lar
作者: cvdlab
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def predict_image(self, test_img):
"""
predicts classes of input image
:param test_img: filepath to image to predict on
:return: segmented result
"""
# imgs = io.imread(test_img).astype('float').reshape(5, 216, 160)
imgs = mpimg.imread(test_img).astype('float')
imgs = rgb2gray(imgs).reshape(5, 216, 160)
plist = []
# create patches_to_predict from an entire slice
for img in imgs[:-1]:
if np.max(img) != 0:
img /= np.max(img)
p = extract_patches_2d(img, (33, 33))
plist.append(p)
patches_to_predict = np.array(
zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))
# predict classes of each pixel based on model
full_pred = self.model.predict_classes(patches_to_predict)
fp1 = full_pred.reshape(184, 128)
return fp1
def extract_blur(self, plot=False):
"""
Calculate the variance of the 2nd derivative of the image to get blur.
Input: plot (bool) whether or not to show the image after Laplacian
Output: None"""
# do on grayscale
# check what the mean would give instead of variance
self.bluriness = filters.laplace(color.rgb2gray(self.image)).var()
if plot is True:
sns.set_style("whitegrid", {'axes.grid': False})
self.lap = filters.laplace(color.rgb2gray(self.image))
plt.imshow(self.lap)
plt.title('Laplacian of {}'.format(self.short_name))
plt.show()
plt.imshow(self.lap)
plt.show()
def extract_symmetry(self):
"""
Calculate the symmetry of the image by substracting left from right.
Input: None
Output: None
"""
# currently this is only for horizontal symmetry
if len(self.image.shape) == 3:
height, width, _ = self.image.shape
else:
height, width = self.image.shape
if width % 2 != 0:
width -= 1
pixels = height * width
left = self.image[:, :width/2]
right = self.image[:, width/2:-1]
else:
pixels = height * width
left = self.image[:, :width/2]
right = self.image[:, width/2:]
left_gray = color.rgb2gray(left)
right_gray = color.rgb2gray(right)
self.symmetry = np.abs(left_gray -
np.fliplr(right_gray)).sum()/(pixels/1.*2)
def load_img(path, grayscale=False, resize=None, order=1):
# Load image
img = io.imread(path)
# Resize
# print('Desired resize: ' + str(resize))
if resize is not None:
img = skimage.transform.resize(img, resize, order=order,
preserve_range=True)
# print('Final resize: ' + str(img.shape))
# Color conversion
if len(img.shape) == 2 and not grayscale:
img = gray2rgb(img)
elif len(img.shape) > 2 and img.shape[2] == 3 and grayscale:
img = rgb2gray(img)
# Return image
return img
def extract_pos_hog_features(path, num_samples):
features = []
cnt = 0
for dirpath, dirnames, filenames in walk(path):
for my_file in filenames:
print path+my_file
if cnt < num_samples:
cnt = cnt + 1
im = cv2.imread(path + my_file)
print im.shape
image = color.rgb2gray(im)
image = image[17:145, 16:80]
my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
features.append(my_feature)
return features
def extract_neg_hog_features(path, num_samples):
features = []
cnt = 0
for dirpath, dirnames, filenames in walk(path):
for my_file in filenames:
if cnt < num_samples:
cnt = cnt + 1
im = cv2.imread(path + my_file)
image = color.rgb2gray(im)
image = image[17:145, 16:80]
#cv2.imshow('test',image)
#cv2.waitKey(0)
my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
features.append(my_feature)
return features
def get_screen(self):
screen = self.env.render(mode='rgb_array')
screen = color.rgb2gray(screen)
screen = imresize(screen, (110, 84))
screen = screen[18:102][:] / 255.0
return screen.astype(np.float)
def scaler(_imageFile):
_scaled = color.rgb2gray(_imageFile);
return _scaled;
def scaler(_imageFile):
_scaled = color.rgb2gray(_imageFile);
return _scaled;
def _load_image_mask(self):
# Sometimes an approximate mask can be produced based on Google range data
# the mask indicates which parts of the image are not facade
mask_path = os.path.join(os.path.dirname(self.path), 'mask.png')
if self.use_mask and os.path.isfile(mask_path):
self.data_mask = rgb2gray(imread(mask_path)) > 0.5
else:
self.data_mask = None
def get_preprocessed_frame(self, observation):
"""
0) Atari frames: 210 x 160
1) Get image grayscale
2) Rescale image 110 x 84
3) Crop center 84 x 84 (you can crop top/bottom according to the game)
"""
return resize(rgb2gray(observation), (110, 84))[13:110 - 13, :]
def save_observation(observation):
global observations
observations = np.roll(observations, -input_depth, axis=0)
observations[-input_depth:, ...] = rgb2gray(imresize(observation, screen))[None, ...]
def _preprocess_observation(self, obs):
# clop center
return np.asarray(resize(rgb2gray(obs), (110, 84))[-84:, :]*255, dtype=np.uint8)
# Hyperparameters
# env_name = 'CartPole-v0' # env to play
def _preprocess_observation(self, obs):
# clop center
return np.asarray(resize(rgb2gray(obs), (110, 84))[-84:, :]*255, dtype=np.uint8)
# Hyperparameters
# env_name = 'CartPole-v0' # env to play