python类rgb2lab()的实例源码

nlc.py 文件源码 项目:videoseg 作者: pathak22 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def color_hist(im, colBins):
    """
    Get color histogram descriptors for RGB and LAB space.
    Input: im: (h,w,c): 0-255: np.uint8: RGB
    Output: descriptor: (colBins*6,)
    """
    assert im.ndim == 3 and im.shape[2] == 3, "image should be rgb"
    arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))
    desc = np.zeros((colBins * 6,), dtype=np.float)
    for i in range(3):
        desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
            arr[:, i], bins=colBins, range=(0, 255))
        desc[i * colBins:(i + 1) * colBins] /= np.sum(
            desc[i * colBins:(i + 1) * colBins]) + (
            np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    i += 1
    desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
        arr[:, i], bins=colBins, range=(0, 100))
    desc[i * colBins:(i + 1) * colBins] /= np.sum(
        desc[i * colBins:(i + 1) * colBins]) + (
        np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    for i in range(4, 6):
        desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
            arr[:, i], bins=colBins, range=(-128, 127))
        desc[i * colBins:(i + 1) * colBins] /= np.sum(
            desc[i * colBins:(i + 1) * colBins]) + (
            np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    return desc
vid2shots.py 文件源码 项目:videoseg 作者: pathak22 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def color_hist(im, colBins):
    """
    Get color histogram descriptors for RGB and LAB space.
    Input: im: (h,w,c): 0-255: np.uint8
    Output: descriptor: (colBins*6,)
    """
    assert im.ndim == 3 and im.shape[2] == 3, "image should be rgb"
    arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))
    desc = np.zeros((colBins * 6,), dtype=np.float)
    for i in range(3):
        desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
            arr[:, i], bins=colBins, range=(0, 255))
        desc[i * colBins:(i + 1) * colBins] /= np.sum(
            desc[i * colBins:(i + 1) * colBins]) + (
            np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    i += 1
    desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
        arr[:, i], bins=colBins, range=(0, 100))
    desc[i * colBins:(i + 1) * colBins] /= np.sum(
        desc[i * colBins:(i + 1) * colBins]) + (
        np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    for i in range(4, 6):
        desc[i * colBins:(i + 1) * colBins], _ = np.histogram(
            arr[:, i], bins=colBins, range=(-128, 127))
        desc[i * colBins:(i + 1) * colBins] /= np.sum(
            desc[i * colBins:(i + 1) * colBins]) + (
            np.sum(desc[i * colBins:(i + 1) * colBins]) < 1e-4)
    return desc
saliency.py 文件源码 项目:saliency-bms 作者: fzliu 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def compute_saliency(img):
    """
        Computes Boolean Map Saliency (BMS).
    """

    img_lab = rgb2lab(img)
    img_lab -= img_lab.min()
    img_lab /= img_lab.max()
    thresholds = np.arange(0, 1, 1.0 / N_THRESHOLDS)[1:]

    # compute boolean maps
    bool_maps = []
    for thresh in thresholds:
        img_lab_T = img_lab.transpose(2, 0, 1)
        img_thresh = (img_lab_T > thresh)
        bool_maps.extend(list(img_thresh))

    # compute mean attention map
    attn_map = np.zeros(img_lab.shape[:2], dtype=np.float)
    for bool_map in bool_maps:
        attn_map += activate_boolean_map(bool_map)
    attn_map /= N_THRESHOLDS

    # gaussian smoothing
    attn_map = cv2.GaussianBlur(attn_map, (0, 0), 3)

    # perform normalization
    norm = np.sqrt((attn_map**2).sum())
    attn_map /= norm
    attn_map /= attn_map.max() / 255

    return attn_map.astype(np.uint8)
save_zhang_feats.py 文件源码 项目:divcolor 作者: aditya12agd5 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def save_zhang_feats(img_fns, ext='JPEG'):

    gpu_id = 0
    caffe.set_mode_gpu()
    caffe.set_device(gpu_id)
    net = caffe.Net('third_party/colorization/models/colorization_deploy_v1.prototxt', \
    'third_party/colorization/models/colorization_release_v1.caffemodel', caffe.TEST)

    (H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
    (H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
    net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature

    feats_fns = []
    for img_fn_i, img_fn in enumerate(img_fns):

        # load the original image
        img_rgb = caffe.io.load_image(img_fn)
        img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
        img_l = img_lab[:,:,0] # pull out L channel
        (H_orig,W_orig) = img_rgb.shape[:2] # original image size

        # create grayscale version of image (just for displaying)
        img_lab_bw = img_lab.copy()
        img_lab_bw[:,:,1:] = 0
        img_rgb_bw = color.lab2rgb(img_lab_bw)

        # resize image to network input size
        img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
        img_lab_rs = color.rgb2lab(img_rs)
        img_l_rs = img_lab_rs[:,:,0]

        net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
        net.forward() # run network

        npz_fn = img_fn.replace(ext, 'npz')
        np.savez_compressed(npz_fn, net.blobs['conv7_3'].data)
        feats_fns.append(npz_fn)

    return feats_fns
myfunc.py 文件源码 项目:RFCN 作者: zengxianyu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def tensor2image(image):
    """
    convert a mean-0 tensor to float numpy image
    :param image: 
    :return: image
    """
    image = image.clone()
    image[0] = image[0] + 122.67891434
    image[1] = image[1] + 116.66876762
    image[2] = image[2] + 104.00698793
    image = image.numpy() / 255.0
    image = image.transpose((1, 2, 0))
    image = img_as_ubyte(image)
    return image


# def prior_map(img):
#     """
#     get RFCN prior map
#     :param img: numpy array (H*W*C, RGB), [0, 1], float
#     :return: pmap
#     """
#     # step 1 over segmentation into superpixels
#     sp = slic(img, n_segments=200, sigma=5)
#     sp_num = sp.max() + 1
#     sp = sp.astype(float)
#
#     # step 2 the mean lab color of the sps
#     mean_lab_color = np.zeros((sp_num, 3))
#     lab_img = color.rgb2lab(img)
#     for c in range(3):
#         for i in range(sp_num):
#             mean_lab_color[i, c] = lab_img[sp == i, c].mean()
#
#     # step 3, element uniqueness




    return pimg
AvgRGB_class.py 文件源码 项目:DenoiseAverage 作者: Pella86 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def average_mean(self, aligned = True, debug = False, transition = True):
        ''' performs the mean of the images, aligned is True will use the
        aligned pictures while if false will use the original picture, 
        for the transition, each averaging step is printed out
        '''

        self.mylog.log("started the mean averaging procedure")

        sizedataset = len(self.imgs_names)

        if aligned:
            picture = self.get_alg_image(0)
        else:
            picture = self.get_image(0)       

        # initialize sum variable
        s = MyRGBImg(np.zeros(picture.data.shape))
        #s = color.rgb2lab(s.data)

        for i in range(sizedataset):
            if debug:
                self.mylog.log("Averaging image: " + str(i))
            #load the picture
            if aligned:
                picture = self.get_alg_image(i)
            else:
                picture = self.get_image(i)
            # convert both to lab
            #im = color.rgb2lab(picture.data)
            im = picture.data

            #perform operations
            s += im

            # if the transition is true show what happens to each picture
            if transition:
                tr = s / float(i + 1)
                #avg = MyRGBImg(color.lab2rgb(tr))
                avg = tr
                avg.save(join(self.subfolders["avg_transition"], "avg_tr_" + str(i) + ".png"))

        # calculate the average    
        s = s / float(sizedataset)
        #self.avg = MyRGBImg(color.lab2rgb(s))
        self.avg = s

        # small trick to align the image in the correct sense if they are 
        # squared
        if self.avg.data.shape[0] == self.avg.data.shape[1]:
            self.avg.rotate(90)
            self.avg.flip_V()
sf_method.py 文件源码 项目:saliency_method 作者: lee88688 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def generate_features(self):
        # prepare variables
        img_lab = rgb2lab(self._img)
        segments = slic(img_lab, n_segments=500, compactness=30.0, convert2lab=False)
        max_segments = segments.max() + 1

        # create x,y feather
        shape = self._img.shape
        a = shape[0]
        b = shape[1]
        x_axis = np.linspace(0, b - 1, num=b)
        y_axis = np.linspace(0, a - 1, num=a)

        x_coordinate = np.tile(x_axis, (a, 1,))  # ??X?????
        y_coordinate = np.tile(y_axis, (b, 1,))  # ??y?????
        y_coordinate = np.transpose(y_coordinate)

        coordinate_segments_mean = np.zeros((max_segments, 2))

        # create lab feather
        img_l = img_lab[:, :, 0]
        img_a = img_lab[:, :, 1]
        img_b = img_lab[:, :, 2]

        img_segments_mean = np.zeros((max_segments, 3))

        for i in xrange(max_segments):
            segments_i = segments == i

            coordinate_segments_mean[i, 0] = x_coordinate[segments_i].mean()
            coordinate_segments_mean[i, 1] = y_coordinate[segments_i].mean()

            img_segments_mean[i, 0] = img_l[segments_i].mean()
            img_segments_mean[i, 1] = img_a[segments_i].mean()
            img_segments_mean[i, 2] = img_b[segments_i].mean()

        # element distribution
        wc_ij = np.exp(-cdist(img_segments_mean, img_segments_mean) ** 2 / (2 * self._sigma_distribution ** 2))
        wc_ij = wc_ij / wc_ij.sum(axis=1)[:, None]
        mu_i = np.dot(wc_ij, coordinate_segments_mean)
        distribution = np.dot(wc_ij, np.linalg.norm(coordinate_segments_mean - mu_i, axis=1) ** 2)
        distribution = normalize(distribution)
        distribution = np.array([distribution]).T

        # element uniqueness feature
        wp_ij = np.exp(
            -cdist(coordinate_segments_mean, coordinate_segments_mean) ** 2 / (2 * self._sigma_uniqueness ** 2))
        wp_ij = wp_ij / wp_ij.sum(axis=1)[:, None]
        uniqueness = np.sum(cdist(img_segments_mean, img_segments_mean) ** 2 * wp_ij, axis=1)
        uniqueness = normalize(uniqueness)
        uniqueness = np.array([uniqueness]).T

        # save features and variables
        self.img_lab = img_lab
        self.segments = segments
        self.img_segments_mean = img_segments_mean
        self.coordinate_segments_mean = coordinate_segments_mean
        self.uniqueness = uniqueness
        self.distribution = distribution


问题


面经


文章

微信
公众号

扫码关注公众号