def average(self, aligned = True):
''' averaging procedure, this function saves the newly calculated average'''
if aligned:
dataset = self.algimgs
else:
dataset = self.imgs
s = MyRGBImg(np.zeros(dataset[0].data.shape))
s = color.rgb2lab(s.data)
for i, picture in enumerate(dataset):
print("Averaging image: " , i)
# convert both to lab
im = color.rgb2lab(picture.data)
#perform operations
s += im
s = s / float(len(dataset))
self.avg = MyRGBImg(color.lab2rgb(s))
python类lab2rgb()的实例源码
def ransac_guess_color(colors, n_iter=50, std=2):
colors = rgb2lab(colors)
colors = colors.reshape(-1, 3)
masked = colors[:, 0] < 0.1
colors = colors[~masked]
assert len(colors) > 0, "Must have at least one color"
best_mu = np.array([0, 0, 0])
best_n = 0
for k in range(n_iter):
subset = colors[np.random.choice(np.arange(len(colors)), 1)]
mu = subset.mean(0)
#inliers = (((colors - mu) ** 2 / std) < 1).all(1)
inliers = ((np.sqrt(np.sum((colors - mu)**2, axis=1)) / std) < 1)
mu = colors[inliers].mean(0)
n = len(colors[inliers])
if n > best_n:
best_n = n
best_mu = mu
#import ipdb; ipdb.set_trace()
best_mu = np.squeeze(lab2rgb(np.array([[best_mu]])))
return best_mu
def applyTexture(x, y, texture = texture_input):
text = imread(texture_input)
height,width = text.shape[:2]
xmin, ymin = amin(x),amin(y)
xmax, ymax = amax(x),amax(y)
scale = max(((xmax - xmin + 2)/height),((ymax - ymin + 2)/width))
text = imresize(text, scale)
# print text.shape[:2]
# print xmax - xmin +2, ymax - ymin+2
X = (x-xmin).astype(int)
Y = (y-ymin).astype(int)
val1 = color.rgb2lab((text[X, Y]/255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:,0]), mean(val2[:,1]), mean(val2[:,2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:,0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:,1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:,2], -127, 128)
im[x,y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
# points = np.loadtxt('nailpoint_5')
def lab_array_to_image(images_array, normalized=True):
# type: (numpy.ndarray, any) -> typing.List[Image.Image]
images_array = images_array.transpose(0, 2, 3, 1)
if normalized:
images_array[:, :, :, 0] = images_array[:, :, :, 0] + 1
images_array *= 50
def lab2image(image_array):
image_array = image_array.astype(dtype=numpy.float64)
rgb = (lab2rgb(image_array) * 255).astype(numpy.uint8)
image = Image.fromarray(rgb)
return image
images = [lab2image(image_array) for image_array in images_array]
return images
def itf( X, npx, mode='RGB' ):
#pdb.set_trace()
X = ( X.reshape( -1, nc, npx, npx ).transpose( 0, 2, 3, 1 ) + 1. ) / 2.
#pdb.set_trace()
if mode == 'LAB':
#pdb.set_trace()
X[:,:,:,0] *= 100
X[:,:,:,1] *= 255
X[:,:,:,2] *= 255
X[:,:,:,1] -= 128
X[:,:,:,2] -= 128
#pdb.set_trace()
for i in range(X.shape[0]):
#X[i,:,:,:] = color.lab2rgb(X[i,])
#pdb.set_trace()
X[i,:,:,:] = color.lab2rgb(X[i,:,:,:].astype('int8'))
#pdb.set_trace()
#pdb.set_trace()
#X *= 255
return X.astype('float32')
def itf( X, npx, mode='RGB' ):
#pdb.set_trace()
X = ( X.reshape( -1, nc, npx, npx ).transpose( 0, 2, 3, 1 ) + 1. ) / 2.
#pdb.set_trace()
if mode == 'LAB':
#pdb.set_trace()
X[:,:,:,0] *= 100
X[:,:,:,1] *= 255
X[:,:,:,2] *= 255
X[:,:,:,1] -= 128
X[:,:,:,2] -= 128
#pdb.set_trace()
for i in range(X.shape[0]):
#X[i,:,:,:] = color.lab2rgb(X[i,])
#pdb.set_trace()
X[i,:,:,:] = color.lab2rgb(X[i,:,:,:].astype('int8'))
#pdb.set_trace()
#pdb.set_trace()
#X *= 255
return X.astype('float32')
def itf( X, npx, mode='RGB' ):
#pdb.set_trace()
X = ( X.reshape( -1, nc, npx, npx ).transpose( 0, 2, 3, 1 ) + 1. ) / 2.
#pdb.set_trace()
if mode == 'LAB':
#pdb.set_trace()
X[:,:,:,0] *= 100
X[:,:,:,1] *= 255
X[:,:,:,2] *= 255
X[:,:,:,1] -= 128
X[:,:,:,2] -= 128
#pdb.set_trace()
for i in range(X.shape[0]):
#X[i,:,:,:] = color.lab2rgb(X[i,])
#pdb.set_trace()
X[i,:,:,:] = color.lab2rgb(X[i,:,:,:].astype('int8'))
#pdb.set_trace()
#pdb.set_trace()
#X *= 255
return X.astype('float32')
def quantize(cls, raster, n_colors, **kwargs):
lab_raster = color.rgb2lab(raster)
lab_quantized_raster = super(RGBtoLABmixin, cls).quantize(
lab_raster, n_colors, **kwargs)
quantized_raster = (color.lab2rgb(lab_quantized_raster) * 255).astype(
'uint8')
return quantized_raster
def applyNailPolish(x , y , r = Rg, g = Gg, b = Bg):
val = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
def lab_to_rgb(x, eps=1e-8):
"""Converts a lab image [0; 100] [-127; 128] [-128; 127] to a valid RGB image."""
x_rectified = np.array(x)
upper_bound = 200 * (x[..., 0] + 16.) / 116. - eps
x_rectified[..., 2] = np.clip(x_rectified[..., 2], - float('inf'), upper_bound)
return np.array([lab2rgb(y) * 255. for y in x_rectified]).astype(np.uint8)
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def Test(image_Name,flag):
if(flag==False):
saver = tf.train.Saver()
saver = tf.train.import_meta_graph('Model Directory/our_model.meta')
saver.restore(sess, 'Model Directory/our_model')
GreyImagesRezied_Batch = []
OriginalImage_Batch=[]
Original_Img = Image.open(TestingImgPath+image_Name).convert('RGB').convert('L')
width,height=Original_Img.size
Original_Img = Original_Img.resize((int(width/8) * 8,int(height/8) * 8),Image.ANTIALIAS)
Grey_img = Original_Img.resize((224,224),Image.ANTIALIAS)
Original_Img = np.asanyarray(Original_Img)
Grey_img = np.asanyarray(Grey_img)
img_shape = Original_Img.shape
Original_reshaped = Original_Img.reshape(img_shape[0],img_shape[1], GreyChannels)#[H,W,1]
OriginalImage_Batch.append(Original_reshaped)#[#imgs,224,224,1]
img_reshaped = Grey_img.reshape(224, 224, GreyChannels)#[224,224,1]
GreyImagesRezied_Batch.append(img_reshaped)#[#imgs,224,224,1]
TestImage = tf.placeholder(dtype=tf.float32,shape=[1,224,224,1])
original = tf.placeholder(dtype=tf.float32,shape=[1,None,None,1])
Prediction = TestModel(original,TestImage,Original_Img.shape[0],Original_Img.shape[1])
Chrominance = sess.run(Prediction,feed_dict={TestImage:GreyImagesRezied_Batch,original:OriginalImage_Batch})
NewImg = np.empty((Original_Img.shape[0],Original_Img.shape[1],3))
for i in range(len(Original_reshaped[:,1,0])):
for j in range(len(Original_reshaped[1,:,0])):
NewImg[i,j,0]= 0 + ( (Original_reshaped[i,j,0] - 0) * (100 - 0) / (255 - 0) )
NewImg[:,:,1] = DeNormalize(Chrominance[0,:,:,0],0,1)
NewImg[:,:,2] = DeNormalize(Chrominance[0,:,:,1],0,1)
NewImg = color.lab2rgb(NewImg)
plt.imsave(ResultImagePath+image_Name[0:-4]+"_Colored"+image_Name[len(image_Name)-4:],NewImg)
#------------------------------------------------
Utilities.py 文件源码
项目:Deep-learning-Colorization-for-visual-media
作者: OmarSayedMostafa
项目源码
文件源码
阅读 18
收藏 0
点赞 0
评论 0
def Merge_Chrominance_Luminance(Chrominance, Luminance):
NewImg = np.empty((Luminance.shape[0],Luminance.shape[1],3))
for i in range(len(Luminance[:,1,0])):
for j in range(len(Luminance[1,:,0])):
NewImg[i,j,0]= 0 + ( (Luminance[i,j,0] - 0) * (100 - 0) / (255 - 0) )
NewImg[:,:,1] = DeNormalize(Chrominance[0,:,:,0],0,1)
NewImg[:,:,2] = DeNormalize(Chrominance[0,:,:,1],0,1)
NewImg = color.lab2rgb(NewImg)
return NewImg
#----------------------------------------------------------------------------------------------
def apply_nail_polish(x, y, r=Rg, g=Gg, b=Bg):
val = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:, 0]), mean(val[:, 1]), mean(val[:, 2])
L1, A1, B1 = color.rgb2lab(np.array((r / 255., g / 255., b / 255.)).reshape(1, 1, 3)).reshape(3, )
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
def apply_texture(x, y):
xmin, ymin = amin(x), amin(y)
X = (x - xmin).astype(int)
Y = (y - ymin).astype(int)
val1 = color.rgb2lab((text[X, Y] / 255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:, 0]), mean(val2[:, 1]), mean(val2[:, 2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:, 0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:, 1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:, 2], -127, 128)
im[x, y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
def apply_blush_color(r=Rg, g=Gg, b=Bg):
global im
val = color.rgb2lab((im / 255.)).reshape(width * height, 3)
L, A, B = mean(val[:, 0]), mean(val[:, 1]), mean(val[:, 2])
L1, A1, B1 = color.rgb2lab(np.array((r / 255., g / 255., b / 255.)).reshape(1, 1, 3)).reshape(3, )
ll, aa, bb = (L1 - L) * intensity, (A1 - A) * intensity, (B1 - B) * intensity
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im = color.lab2rgb(val.reshape(height, width, 3)) * 255
def ConvertGenOutput( X_src, X_est ):
#X_est : estimated AB-channel
if X_est.shape[1] == 3:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est[:,[1,2],:,:]), axis = 1 )
if X_est.shape[1] == 2:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est), axis = 1)
#pdb.set_trace()
#pdb.set_trace()
return LAB_est
# RGB_est = color.lab2rgb(LAB_est)
# return floatX( RGB_est/ 0.5 - 1. )
def LAB2RGB(X):
for i in range(X.shape[0]):
X[i,:,:,:] = color.lab2rgb(X[i,:,:,:].astype('int8'))
return X.astype('float32')
def ConvertGenOutput( X_src, X_est ):
#X_est : estimated AB-channel
if X_est.shape[1] == 3:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est[:,[1,2],:,:]), axis = 1 )
if X_est.shape[1] == 2:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est), axis = 1)
#pdb.set_trace()
#pdb.set_trace()
return LAB_est
# RGB_est = color.lab2rgb(LAB_est)
# return floatX( RGB_est/ 0.5 - 1. )
def itf( X, npx, mode='RGB' ):
X = ( X.reshape( -1, nc, npx, npx ).transpose( 0, 2, 3, 1 ) + 1. ) / 2.
if mode == 'LAB':
X[:,:,:,0] *= 100
X[:,:,:,1] *= 255
X[:,:,:,2] *= 255
X[:,:,:,1] -= 128
X[:,:,:,2] -= 128
for i in range(X.shape[0]):
X[i,:,:,:] = color.lab2rgb(X[i,:,:,:].astype('int8'))
return X.astype('float32')
def ConvertGenOutput( X_src, X_est ):
#X_est : estimated AB-channel
if X_est.shape[1] == 3:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est[:,[1,2],:,:]), axis = 1 )
if X_est.shape[1] == 2:
LAB_est = np.concatenate((X_src[:,[0],:,:], X_est), axis = 1)
#pdb.set_trace()
#pdb.set_trace()
return LAB_est
# RGB_est = color.lab2rgb(LAB_est)
# return floatX( RGB_est/ 0.5 - 1. )
def trans(self, img1, img2, img3):
rst = np.array((img1.T, img2.T, img3.T), dtype=np.float64)
rst *= (200/255.0); rst -= 100
rst = color.lab2rgb(rst.T)
rst *= 255
return (rst).astype(np.uint8)
def save_image(image, save_dir, name):
"""
Save image by unprocessing and converting to rgb.
:param image: iamge to save
:param save_dir: location to save image at
:param name: prefix to save filename
:return:
"""
image = color.lab2rgb(image)
io.imsave(os.path.join(save_dir, name + ".png"), image)
def lch2rgb(lch):
"""Convert LCH to RGB colorspace (via LAB)
Input and output are in (bands, cols, rows) order
"""
# reshape for skimage (bands, cols, rows) -> (cols, rows, bands)
slch = np.swapaxes(lch, 0, 2)
# convert colorspace
rgb = lab2rgb(lch2lab(slch))
# return in (bands, cols, rows) order
return np.swapaxes(rgb, 2, 0)
def save_zhang_feats(img_fns, ext='JPEG'):
gpu_id = 0
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net('third_party/colorization/models/colorization_deploy_v1.prototxt', \
'third_party/colorization/models/colorization_release_v1.caffemodel', caffe.TEST)
(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature
feats_fns = []
for img_fn_i, img_fn in enumerate(img_fns):
# load the original image
img_rgb = caffe.io.load_image(img_fn)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:,:,0] # pull out L channel
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# create grayscale version of image (just for displaying)
img_lab_bw = img_lab.copy()
img_lab_bw[:,:,1:] = 0
img_rgb_bw = color.lab2rgb(img_lab_bw)
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:,:,0]
net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
net.forward() # run network
npz_fn = img_fn.replace(ext, 'npz')
np.savez_compressed(npz_fn, net.blobs['conv7_3'].data)
feats_fns.append(npz_fn)
return feats_fns
def average_mean(self, aligned = True, debug = False, transition = True):
''' performs the mean of the images, aligned is True will use the
aligned pictures while if false will use the original picture,
for the transition, each averaging step is printed out
'''
self.mylog.log("started the mean averaging procedure")
sizedataset = len(self.imgs_names)
if aligned:
picture = self.get_alg_image(0)
else:
picture = self.get_image(0)
# initialize sum variable
s = MyRGBImg(np.zeros(picture.data.shape))
#s = color.rgb2lab(s.data)
for i in range(sizedataset):
if debug:
self.mylog.log("Averaging image: " + str(i))
#load the picture
if aligned:
picture = self.get_alg_image(i)
else:
picture = self.get_image(i)
# convert both to lab
#im = color.rgb2lab(picture.data)
im = picture.data
#perform operations
s += im
# if the transition is true show what happens to each picture
if transition:
tr = s / float(i + 1)
#avg = MyRGBImg(color.lab2rgb(tr))
avg = tr
avg.save(join(self.subfolders["avg_transition"], "avg_tr_" + str(i) + ".png"))
# calculate the average
s = s / float(sizedataset)
#self.avg = MyRGBImg(color.lab2rgb(s))
self.avg = s
# small trick to align the image in the correct sense if they are
# squared
if self.avg.data.shape[0] == self.avg.data.shape[1]:
self.avg.rotate(90)
self.avg.flip_V()
def val():
color_model.eval()
i = 0
for data, _ in val_loader:
original_img = data[0].unsqueeze(1).float()
gray_name = './gray/' + str(i) + '.jpg'
for img in original_img:
pic = img.squeeze().numpy()
pic = pic.astype(np.float64)
plt.imsave(gray_name, pic, cmap='gray')
w = original_img.size()[2]
h = original_img.size()[3]
scale_img = data[1].unsqueeze(1).float()
if have_cuda:
original_img, scale_img = original_img.cuda(), scale_img.cuda()
original_img, scale_img = Variable(original_img, volatile=True), Variable(scale_img)
_, output = color_model(original_img, scale_img)
color_img = torch.cat((original_img, output[:, :, 0:w, 0:h]), 1)
color_img = color_img.data.cpu().numpy().transpose((0, 2, 3, 1))
for img in color_img:
img[:, :, 0:1] = img[:, :, 0:1] * 100
img[:, :, 1:3] = img[:, :, 1:3] * 255 - 128
img = img.astype(np.float64)
img = lab2rgb(img)
color_name = './colorimg/' + str(i) + '.jpg'
plt.imsave(color_name, img)
i += 1
# use the follow method can't get the right image but I don't know why
# color_img = torch.from_numpy(color_img.transpose((0, 3, 1, 2)))
# sprite_img = make_grid(color_img)
# color_name = './colorimg/'+str(i)+'.jpg'
# save_image(sprite_img, color_name)
# i += 1
def array_to_image(
color_images_array=None,
gray_images_array=None,
mode='RGB',
color_normalize=False,
linedrawing=None,
):
# type: (any,any,any,any,any) -> typing.List[Image.Image]
"""
:param color_images_array: shape is [number of image, channel(3), width, height]
:param gray_images_array: used when mode=='ab' or 'gray'
:param mode: mode of input images array (RGB, Lab, ab, gray)
:param color_normalize: normalize rgb color to [min(rgb_images_array) max(rgb_images_array)]
"""
if color_images_array is not None:
color_images_array = chainer.cuda.to_cpu(color_images_array)
if gray_images_array is not None:
gray_images_array = chainer.cuda.to_cpu(gray_images_array)
if mode == 'gray':
color_images_array = numpy.concatenate([gray_images_array] * 3, axis=1)
mode = 'RGB'
if mode == 'ab':
# concat gray image(luminance) and ab(chromaticity)
color_images_array = chainer.cuda.to_cpu(color_images_array)
color_images_array = numpy.concatenate((gray_images_array, color_images_array), axis=1)
mode = 'Lab'
color_images_array = color_images_array.transpose(0, 2, 3, 1)
if mode == 'Lab':
color_images_array = color_images_array.astype(dtype=numpy.float64)
image_array_list = [lab2rgb(image_array) * 255 for image_array in color_images_array]
color_images_array = numpy.concatenate(
[numpy.expand_dims(image_array, axis=0) for image_array in image_array_list]
)
mode = 'RGB'
if mode == 'RGB':
rgb_images_array = color_images_array
else:
raise ValueError('{} mode is not supported'.format(mode))
# to uint8
if color_normalize:
minmax = (rgb_images_array.min(), rgb_images_array.max())
else:
if linedrawing is not None:
minmax = (0, 1)
else:
minmax = (0, 255)
def clip_image(x):
x = (x - minmax[0]) / (minmax[1] - minmax[0]) * 255 # normalize to 0~255
return numpy.float32(0 if x < 0 else (255 if x > 255 else x))
rgb_images_array = numpy.vectorize(clip_image)(rgb_images_array)
rgb_images_array = rgb_images_array.astype(numpy.uint8)
return [Image.fromarray(image_array) for image_array in rgb_images_array]