def average(self, aligned = True):
''' averaging procedure, this function saves the newly calculated average'''
if aligned:
dataset = self.algimgs
else:
dataset = self.imgs
s = MyRGBImg(np.zeros(dataset[0].data.shape))
s = color.rgb2lab(s.data)
for i, picture in enumerate(dataset):
print("Averaging image: " , i)
# convert both to lab
im = color.rgb2lab(picture.data)
#perform operations
s += im
s = s / float(len(dataset))
self.avg = MyRGBImg(color.lab2rgb(s))
python类rgb2lab()的实例源码
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_original = self.transform(img)
img_original = np.asarray(img_original)
img_lab = rgb2lab(img_original)
img_lab = (img_lab + 128) / 255
img_ab = img_lab[:, :, 1:3]
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1)))
img_original = rgb2gray(img_original)
img_original = torch.from_numpy(img_original)
if self.target_transform is not None:
target = self.target_transform(target)
return (img_original, img_ab), target
def extract_colour_histogram(image, labels, n_bins=8, use_lab=False):
ih, iw, _ = image.shape
n_labels = labels.max()+1
_range = np.array([[0, 256], [0, 256], [0, 256]], dtype='float') # for rgb histograms
if use_lab:
image = rgb2lab(image)
_range[:] = [[0,100],[-500*25/29, 500*25/29], [-200*25/29, 200*25/29]]
hist = np.zeros((n_labels, n_bins**3))
mask = np.zeros((ih, iw), dtype='bool')
for i in range(n_labels):
mask[:] = labels == i
yy, xx = mask.nonzero()
pixels = image[yy, xx, :]
hist[i, :] = np.histogramdd(sample=pixels, bins=n_bins, range=_range)[0].flat
return hist
def ransac_guess_color(colors, n_iter=50, std=2):
colors = rgb2lab(colors)
colors = colors.reshape(-1, 3)
masked = colors[:, 0] < 0.1
colors = colors[~masked]
assert len(colors) > 0, "Must have at least one color"
best_mu = np.array([0, 0, 0])
best_n = 0
for k in range(n_iter):
subset = colors[np.random.choice(np.arange(len(colors)), 1)]
mu = subset.mean(0)
#inliers = (((colors - mu) ** 2 / std) < 1).all(1)
inliers = ((np.sqrt(np.sum((colors - mu)**2, axis=1)) / std) < 1)
mu = colors[inliers].mean(0)
n = len(colors[inliers])
if n > best_n:
best_n = n
best_mu = mu
#import ipdb; ipdb.set_trace()
best_mu = np.squeeze(lab2rgb(np.array([[best_mu]])))
return best_mu
def applyTexture(x, y, texture = texture_input):
text = imread(texture_input)
height,width = text.shape[:2]
xmin, ymin = amin(x),amin(y)
xmax, ymax = amax(x),amax(y)
scale = max(((xmax - xmin + 2)/height),((ymax - ymin + 2)/width))
text = imresize(text, scale)
# print text.shape[:2]
# print xmax - xmin +2, ymax - ymin+2
X = (x-xmin).astype(int)
Y = (y-ymin).astype(int)
val1 = color.rgb2lab((text[X, Y]/255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:,0]), mean(val2[:,1]), mean(val2[:,2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:,0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:,1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:,2], -127, 128)
im[x,y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
# points = np.loadtxt('nailpoint_5')
comicolorization_task.py 文件源码
项目:Comicolorization
作者: DwangoMediaVillage
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __call__(self, image, test):
image_data = numpy.asarray(image, dtype=numpy.float32)[:, :, :3]
rgb_image_data = image_data.transpose(2, 0, 1)
lab_image_data = rgb2lab(image_data / 255).transpose(2, 0, 1).astype(numpy.float32)
luminous_image_data = lab_image_data[0].astype(numpy.uint8)
try:
th = threshold_otsu(luminous_image_data)
except:
import traceback
print(traceback.format_exc())
th = 0
linedrawing = (luminous_image_data > th).astype(numpy.float32)
linedrawing = numpy.expand_dims(linedrawing, axis=0)
return lab_image_data, linedrawing, rgb_image_data
def _draw_process(self, small_input_image, big_input_image):
lab = rgb2lab(numpy.array(small_input_image))
lab[:, :, 0] /= 100
small_image = self.drawer.draw(
input_images_array=lab.astype(numpy.float32).transpose(2, 0, 1)[numpy.newaxis],
rgb_images_array=numpy.array(self.reference_image, dtype=numpy.float32).transpose(2, 0, 1)[numpy.newaxis],
)[0]
small_image = small_image.convert('RGB')
if self.drawer_sr is not None:
drawn_panel_image = self._superresolution_process(small_image, big_input_image)
else:
drawn_panel_image = small_image
return drawn_panel_image
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def Get_Batch_Chrominance():
''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]
Return:
AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
'''
global AbColores_values
global ColorImages_Batch
AbColores_values = np.empty((Batch_size,224,224,2),"float32")
for indx in range(Batch_size):
lab = color.rgb2lab(ColorImages_Batch[indx])
Min_valueA = np.amin(lab[:,:,1])
Max_valueA = np.amax(lab[:,:,1])
Min_valueB = np.amin(lab[:,:,2])
Max_valueB = np.amax(lab[:,:,2])
AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],-128,127)
AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],-128,127)
Utilities.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def Get_Batch_Chrominance(ColorImages_Batch, Batch_Size):
''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]
Return:
AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
'''
AbColores_values = np.empty((Batch_Size, MainHeight, MainWidth, 2),"float32")
for indx in range(Batch_Size):
lab = color.rgb2lab(ColorImages_Batch[indx])
AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],NormalizationRange[0],NormalizationRange[1])
AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],NormalizationRange[0],NormalizationRange[1])
return AbColores_values
#----------------------------------------------------------------------------------------------
myrgb2lab.py 文件源码
项目:Linear-Spectral-Clustering-Superpixel-Segmentation-Algorithm_Python
作者: shifvb
项目源码
文件源码
阅读 15
收藏 0
点赞 0
评论 0
def myrgb2lab(I: np.ndarray, row_num: int, col_num: int):
"""
change rgb to lab format
:param I: rgb format image
:return:
L: L channel, range from 0 to 255, dtype: uint8, shape: (row_num * col_num,)
a: a channel, range from 0 to 255, dtype: uint8, shape: (row_num * col_num,)
b: b channel, range from 0 to 255, dtype: uint8, shape: (row_num * col_num,)
"""
lab_img = color.rgb2lab(I).transpose([2, 1, 0])
L = lab_img[0].copy().reshape([row_num * col_num])
a = lab_img[1].copy().reshape([row_num * col_num])
b = lab_img[2].copy().reshape([row_num * col_num])
L /= (100 / 255) # L is [0, 100], change it to [0, 255]
L += 0.5
a += 128 + 0.5 # A is [-128, 127], change it to [0, 255]
b += 128 + 0.5 # B is [-128, 127], change it to [0, 255]
return L.astype(np.uint8), a.astype(np.uint8), b.astype(np.uint8)
def _global(self, img):
h, w = img.shape[:2]
mask = np.zeros((h, w), dtype=np.bool)
max_distance = self._settings['max_distance']
if self._settings['use_lab']:
img = skc.rgb2lab(img)
# Compute euclidean distance of each corner against all other pixels.
corners = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
for color in (img[i, j] for i, j in corners):
norm = np.sqrt(np.sum(np.square(img - color), 2))
# Add to the mask pixels close to one of the corners.
mask |= norm < max_distance
return mask
def _transform(self, filename):
try:
image = misc.imread(filename)
if len(image.shape) < 3: # make sure images are of shape(h,w,3)
image = np.array([image for i in range(3)])
if self.image_options.get("resize", False) and self.image_options["resize"]:
resize_size = int(self.image_options["resize_size"])
resize_image = misc.imresize(image,
[resize_size, resize_size])
else:
resize_image = image
if self.image_options.get("color", False):
option = self.image_options['color']
if option == "LAB":
resize_image = color.rgb2lab(resize_image)
elif option == "HSV":
resize_image = color.rgb2hsv(resize_image)
except:
print ("Error reading file: %s of shape %s" % (filename, str(image.shape)))
raise
return np.array(resize_image)
def quantize(cls, raster, n_colors, **kwargs):
lab_raster = color.rgb2lab(raster)
lab_quantized_raster = super(RGBtoLABmixin, cls).quantize(
lab_raster, n_colors, **kwargs)
quantized_raster = (color.lab2rgb(lab_quantized_raster) * 255).astype(
'uint8')
return quantized_raster
def applyNailPolish(x , y , r = Rg, g = Gg, b = Bg):
val = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
def rgb_to_lab(x):
"""Converts RGB image to the lab colorspace [0; 100] [-127; 128] [-128; 127]."""
return rgb2lab(x)
def __init__(self, image, pred, params):
""" pred: [C x H x W] """
#assert pred.shape[1:3] == prediction_shape
prediction_shape = pred.shape[1:3]
self.prediction_shape = prediction_shape
self.npixels = np.prod(prediction_shape[0:2])
self.nlabels = pred.shape[0]
self.params = params
# Convert from [C x H x W] to [(H*W) x C]
unary_probs = pred.reshape(self.nlabels, self.npixels).transpose()
# remove 'other' class
#unary_probs[:, config.NAME_TO_LABEL['other']] = 0.0
self.unary_costs = -np.log(np.clip(unary_probs + params['unary_prob_padding'], 1e-20, 1e20))
self.unary_costs = np.copy(self.unary_costs, order='C').astype(np.float32)
if image.shape[0:2] == prediction_shape:
self.im_lab = rgb2lab(image)
else:
self.im_lab = rgb2lab(transform.resize(image, prediction_shape[0:2]))
# scale features to have have dynamic range ~10-20ish
self.scaled_positions = (
np.indices(prediction_shape[0:2]).astype(np.float32) *
10.0 / float(np.min(prediction_shape[0:2]))
)
self.bilateral_features = np.zeros((self.npixels, 5), dtype=np.float32)
self.bilateral_features[:, 0] = self.scaled_positions[0].ravel()
self.bilateral_features[:, 1] = self.scaled_positions[1].ravel()
self.bilateral_features[:, 2] = self.im_lab[:, :, 0].ravel() / 10.0
self.bilateral_features[:, 3] = self.im_lab[:, :, 1].ravel() / 10.0
self.bilateral_features[:, 4] = self.im_lab[:, :, 2].ravel() / 10.0
def __init__(self, image, pred, params):
""" pred: [C x H x W] """
#assert pred.shape[1:3] == prediction_shape
prediction_shape = pred.shape[1:3]
self.prediction_shape = prediction_shape
self.npixels = np.prod(prediction_shape[0:2])
self.nlabels = pred.shape[0]
self.params = params
# Convert from [C x H x W] to [(H*W) x C], remove 'other' class
unary_probs = pred.reshape(self.nlabels, self.npixels).transpose()
unary_probs[:, config.NAME_TO_LABEL['other']] = 0.0
self.unary_costs = -np.log(np.clip(unary_probs + params['unary_prob_padding'], 1e-20, 1e20))
self.unary_costs = np.copy(self.unary_costs, order='C').astype(np.float32)
if image.shape[0:2] == prediction_shape:
self.im_lab = rgb2lab(image)
else:
self.im_lab = rgb2lab(transform.resize(image, prediction_shape[0:2]))
# scale features to have have dynamic range ~10-20ish
self.scaled_positions = (
np.indices(prediction_shape[0:2]).astype(np.float32) *
10.0 / float(np.min(prediction_shape[0:2]))
)
self.bilateral_features = np.zeros((self.npixels, 5), dtype=np.float32)
self.bilateral_features[:, 0] = self.scaled_positions[0].ravel()
self.bilateral_features[:, 1] = self.scaled_positions[1].ravel()
self.bilateral_features[:, 2] = self.im_lab[:, :, 0].ravel() / 10.0
self.bilateral_features[:, 3] = self.im_lab[:, :, 1].ravel() / 10.0
self.bilateral_features[:, 4] = self.im_lab[:, :, 2].ravel() / 10.0
#position_features = np.zeros((npixels, 2), dtype=np.float32)
#position_features[:, 0] = scaled_positions[0].ravel() * (params['position_theta_xy'] / mindim)
#position_features[:, 1] = scaled_positions[1].ravel() * (params['position_theta_xy'] / mindim)
def __call__(self, image, test):
# type: (Image.Image, any) -> any
image = numpy.asarray(image, dtype=self._dtype)[:, :, :3] / 255 # rgb
image = rgb2lab(image).astype(self._dtype).transpose(2, 0, 1)
if self._normalize:
image /= 50
image[0] -= 1
return image
def _calc_rgb2lab_min_max():
"""
:return: ([L_min, a_min, b_min], [L_max, a_max, b_max])
"""
num_space = 16
size_image = num_space * num_space * num_space
values_pixel = numpy.linspace(0, 1, num_space)
image_array = [[r, g, b] for r in values_pixel for g in values_pixel for b in values_pixel]
image_array = numpy.vstack(image_array).reshape((1, size_image, 3))
image_array = rgb2lab(image_array) # illuminant='D65'
return image_array.min(axis=1).squeeze(), image_array.max(axis=1).squeeze()
def get_example(self, i):
# type: (any) -> typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
rgb_image_data, gray_image_data, _ = self.base[i]
dtype = rgb_image_data.dtype
image_data = rgb_image_data.transpose(1, 2, 0) / 255
lab_image_data = rgb2lab(image_data).transpose(2, 0, 1).astype(dtype)
luminous_image_data = numpy.expand_dims(lab_image_data[0], axis=0)
return lab_image_data, luminous_image_data, rgb_image_data
def image_a_b_gen(batch_size):
for batch in datagen.flow(Xtrain, batch_size=batch_size):
if batch == None:
break
lab_batch = rgb2lab(batch)
X_batch = lab_batch[:,:,:,0]
Y_batch = lab_batch[:,:,:,1:]
yield (X_batch.reshape(X_batch.shape+(1,)), Y_batch)
# Train model
def apply_nail_polish(x, y, r=Rg, g=Gg, b=Bg):
val = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:, 0]), mean(val[:, 1]), mean(val[:, 2])
L1, A1, B1 = color.rgb2lab(np.array((r / 255., g / 255., b / 255.)).reshape(1, 1, 3)).reshape(3, )
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
def apply_texture(x, y):
xmin, ymin = amin(x), amin(y)
X = (x - xmin).astype(int)
Y = (y - ymin).astype(int)
val1 = color.rgb2lab((text[X, Y] / 255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:, 0]), mean(val2[:, 1]), mean(val2[:, 2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:, 0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:, 1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:, 2], -127, 128)
im[x, y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
def apply_blush_color(r=Rg, g=Gg, b=Bg):
global im
val = color.rgb2lab((im / 255.)).reshape(width * height, 3)
L, A, B = mean(val[:, 0]), mean(val[:, 1]), mean(val[:, 2])
L1, A1, B1 = color.rgb2lab(np.array((r / 255., g / 255., b / 255.)).reshape(1, 1, 3)).reshape(3, )
ll, aa, bb = (L1 - L) * intensity, (A1 - A) * intensity, (B1 - B) * intensity
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im = color.lab2rgb(val.reshape(height, width, 3)) * 255
def load_demo_image():
im = color.rgb2lab(Image.open('../object_recognition/img/Patern_test.jpg')) / 100.0
return im[..., 0]
def trans(self, img):
rst = color.rgb2lab(img)
print('============', rst.min(), rst.max())
rst+=100; rst*=(255/200.0)
return (rst).astype(np.uint8)
def preprocess(image, use_rgb=False, use_hsv=False, norm=True):
"""
Preprocesses a RGB image before extracting super-regions from it. Improves
the quality of the super-regions by transforming to the L*a*b colorspace
and normalizing the image.
Args:
image: numpy (H, W) or (H, W, 3) array
RGB image to be preprocessed.
use_rgb: bool
Wether to append RGB channels to the L*a*b channels.
use_hsv: bool
Wether to append HSV channels to the L*a*b channels.
norm:
Wether to standardize individual channels.
Result:
result: numpy (H * W, K) array
Where K is 3, 6 or depending on `use_rgb` and `use_hsv`. channel
specific normalization to enhance distances.
"""
if image.ndim == 2 or image.shape[2] == 1:
data = (np.squeeze(image) - image.mean()) / image.std()
return data
assert image.shape[2] == 3, 'Error: invalid image format'
result = color.rgb2lab(image).reshape(-1, 3)
if use_rgb:
result = np.column_stack(result, image.reshape(-1, 3))
if use_hsv:
result = np.column_stack(result, color.rgb2hsv(data).reshape(-1, 3))
# Standardize channels and reshape in-place
if norm:
result = (result - result.mean(0)) / result.std(0)
return result.astype(np.float32)
def __call__(self, image: numpy.ndarray, test):
from scipy import stats
def dilate_diff(image, range, iterations=1):
dil = cv2.dilate(image, numpy.ones((range, range), numpy.float32), iterations=iterations)
image = cv2.absdiff(image, dil)
return image
dtype = image.dtype
rgb = (image.transpose(1, 2, 0) + 1) / 2
lab = rgb2lab(rgb) / 100
image = lab[:, :, 0]
image = dilate_diff(image, 3).astype(numpy.float32)
rand = 0.2 + (numpy.random.randn(1) / 20 if not test else 0)
rand = 0.000001 if rand <= 0 else rand
image = cv2.GaussianBlur(image, (5, 5), rand)
rand = 0.4 + (numpy.random.randn(1) / 20 if not test else 0)
rand = 0.000001 if rand <= 0 else rand
image = cv2.GaussianBlur(image, (5, 5), rand)
rand = numpy.random.randn(1) / 40 if not test else 0
image = numpy.power(image, 0.8 + rand)
image = image.astype(dtype)[numpy.newaxis]
return image
def _find_source_patch(self, target_pixel):
target_patch = self._get_patch(target_pixel)
height, width = self.working_image.shape[:2]
patch_height, patch_width = self._patch_shape(target_patch)
best_match = None
best_match_difference = 0
lab_image = rgb2lab(self.working_image)
for y in range(height - patch_height + 1):
for x in range(width - patch_width + 1):
source_patch = [
[y, y + patch_height-1],
[x, x + patch_width-1]
]
if self._patch_data(self.working_mask, source_patch) \
.sum() != 0:
continue
difference = self._calc_patch_difference(
lab_image,
target_patch,
source_patch
)
if best_match is None or difference < best_match_difference:
best_match = source_patch
best_match_difference = difference
return best_match
def rgb2lch(rgb):
"""Convert RBG to LCH colorspace (via LAB)
Input and output are in (bands, cols, rows) order
"""
# reshape for skimage (bands, cols, rows) -> (cols, rows, bands)
srgb = np.swapaxes(rgb, 0, 2)
# convert colorspace
lch = lab2lch(rgb2lab(srgb))
# return in (bands, cols, rows) order
return np.swapaxes(lch, 2, 0)