python类merge()的实例源码

clahe.py 文件源码 项目:fully-convolutional-network-semantic-segmentation 作者: alecng94 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def enhance(image_path, clip_limit=3):
    image = cv2.imread(image_path)
    # convert image to LAB color model
    image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)

    # split the image into L, A, and B channels
    l_channel, a_channel, b_channel = cv2.split(image_lab)

    # apply CLAHE to lightness channel
    clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
    cl = clahe.apply(l_channel)

    # merge the CLAHE enhanced L channel with the original A and B channel
    merged_channels = cv2.merge((cl, a_channel, b_channel))

    # convert iamge from LAB color model back to RGB color model
    final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
    return cv2_to_pil(final_image)
imgconnector.py 文件源码 项目:MusicGenerator 作者: Conchylicultor 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img)
scanner.py 文件源码 项目:robik 作者: RecunchoMaker 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200

        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None
dataset_utils.py 文件源码 项目:rpg_davis_simulator 作者: uzh-rpg 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def extract_grayscale(img, srgb=False):
  dw = img.header()['dataWindow']

  size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
  precision = Imath.PixelType(Imath.PixelType.FLOAT)
  R = img.channel('R', precision)
  G = img.channel('G', precision)
  B = img.channel('B', precision)

  r = np.fromstring(R, dtype = np.float32)
  g = np.fromstring(G, dtype = np.float32)
  b = np.fromstring(B, dtype = np.float32)

  r.shape = (size[1], size[0])
  g.shape = (size[1], size[0])
  b.shape = (size[1], size[0])

  rgb = cv2.merge([b, g, r])
  grayscale = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)

  if srgb:
      grayscale = lin2srgb(grayscale)

  return grayscale
utils.py 文件源码 项目:unet-color 作者: 4g 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def arrange_images(Y):
    concat_image = None
    Y = (Y + 1)/2
    for yi in np.split(Y, 10):
        image = None
        for y in yi:
            img = cv2.merge((y[0, :, :], y[1, :, :], y[2, :, :]))
            if image is None:
                image = img
            else:
                image = np.concatenate((image, img))
        if concat_image is None:
            concat_image = image
        else:
            concat_image = np.concatenate((concat_image, image), axis=1)
    return concat_image
train.py 文件源码 项目:unet-tensorflow 作者: timctho 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
psuedo_label_dataset_generator.py 文件源码 项目:unet-tensorflow 作者: timctho 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
choose_movie_frames.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_rescaled(fname, metadata, directory, rescaled_directory):
    # TODO(dek): move rescaling to its own function
    rescaled_fname = fname + ".rescaled.png"
    rescaled = os.path.join(rescaled_directory, rescaled_fname)
    if not os.path.exists(rescaled):
        print "Unable to find cached rescaled image for", fname
        return None
    image = cv2.imread(rescaled, cv2.IMREAD_UNCHANGED)
    if image is None:
        print "Failed to read image from", rescaled
        return None
    b_channel, g_channel, r_channel = cv2.split(image)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
    image = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))

    return image
signal_generator.py 文件源码 项目:colorcs 作者: ch3njust1n 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def check_split(self):
        # from copy import deepcopy
        # h = deepcopy(self.h)
        # s = deepcopy(self.s)
        # v = deepcopy(self.v)

        if not os.path.exists(self.output_path + 'check_merge/'):
            os.makedirs(self.output_path + 'check_merge/')

        merged = cv2.merge((self.h, self.s, self.v))
        cv2.imshow('hsv-remerged', merged)
        cv2.imwrite(self.output_path + 'check_merge/hsv-merged.jpg', merged)

        # Try to merge 3 noisy hsv channels into 1 noisy image
        merged2 = cv2.merge((self.n_h, self.n_s, self.n_v))
        cv2.imshow('hsv-noisy-remerged', merged2)

        rgb = cv2.cvtColor(merged, cv2.COLOR_HSV2BGR)
        cv2.imshow('rbg-remerged', rgb)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
class_PlantIdentifier.py 文件源码 项目:Farmbot_GeneralAP 作者: SpongeYao 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def extractPlantsArea(self, arg_mode=0,arg_INV= False, b_threshold=80, a_threshold=80):
        zeros = np.zeros(self.image.shape[:2], dtype = "uint8")

        imgLAB = cv2.cvtColor(self.image, self.colorSpace)
        (L, A, B) = cv2.split(imgLAB)
        cv2.imwrite('Debug/imgB.jpg',B)
        cv2.imwrite('Debug/imgA.jpg',A)
        #(T_weeds_b, thresh_weeds_b) = cv2.threshold(B, b_threshold, 255, cv2.THRESH_BINARY)
        #(T_weeds_a, thresh_weeds_a) = cv2.threshold(A, a_threshold, 255, cv2.THRESH_BINARY)
        if arg_mode==0:
            thresh_weeds_a= imgProcess_tool.binarialization(A,0,arg_INV, a_threshold)
            thresh_weeds_b= imgProcess_tool.binarialization(B,0,arg_INV, b_threshold)
        elif arg_mode==1:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 1, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 1, arg_INV)
        elif arg_mode==2:
            thresh_weeds_b= imgProcess_tool.binarialization(B, 2, arg_INV)
            thresh_weeds_a= imgProcess_tool.binarialization(A, 2, arg_INV)
        cv2.imwrite('Debug/imgB_thr.jpg',thresh_weeds_b)
        cv2.imwrite('Debug/imgA_thr.jpg',thresh_weeds_a)
        imgRGB = cv2.merge([zeros, thresh_weeds_b, thresh_weeds_a])
        return thresh_weeds_a, thresh_weeds_b
helpers.py 文件源码 项目:head-segmentation 作者: szywind 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
denseCRF.py 文件源码 项目:head-segmentation 作者: szywind 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main():
    imgList = getImageList(input_folder='/home/jin/shenzhenyuan/head-segmentation/input/test',
                           output_file='/home/jin/shenzhenyuan/head-segmentation/input/testSet.txt')
    for img_path in imgList:
        img = cv2.imread('{}'.format(img_path))
        if img_path[:img_path.rfind('.')].endswith('png'):
            str = img_path[:img_path.rfind('.')] + '-seg.png'
        else:
            str = img_path[:img_path.rfind('.')] + '.png-seg.png'
        mask = cv2.imread('{}'.format(str))
        prob = mask[:,:,0:2] / 255.0
        prob[:, :, 1] = 1 - prob[:, :, 0]
        res, Q = denseCRF(img, prob)
        a = 1-res
        a = a.astype('uint8')

        r_channel, g_channel, b_channel = cv2.split(img)
        img_rgba = cv2.merge((r_channel, g_channel, b_channel, a*255))
        cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), img_rgba)

        # a = np.dstack((a,)*3)
        # plt.imshow(a*img)
        # cv2.imwrite('{}_crf.png'.format(img_path[:img_path.find('.')]), (a>0.1)*img)

        cv2.imwrite('{}_crf_qtsu.png'.format(img_path[:img_path.find('.')]), cropHead(Q, img))
flownet.py 文件源码 项目:Bayesian-FlowNet 作者: Johswald 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def flows_to_img(flows):
    """Pyfunc wrapper to transorm flow vectors in color coding"""

    def _flow_transform(flows):
        """ Tensorflow Pyfunc to transorm flow to color coding"""

        flow_imgs = []
        for flow in flows:
            img = computeColor.computeImg(flow)
            # cv2 returns bgr images
            b, g, r = cv2.split(img)
            img = cv2.merge((r, g, b))
            flow_imgs.append(img)
        return [flow_imgs]

    flow_imgs = tf.py_func(_flow_transform, [flows],
                           [tf.uint8], stateful=False, name='flow_transform')

    flow_imgs = tf.squeeze(tf.stack(flow_imgs))
    flow_imgs.set_shape([FLAGS.batchsize] + FLAGS.d_shape_img)
    return flow_imgs
imgconnector.py 文件源码 项目:How_to_generate_music_in_tensorflow_LIVE 作者: llSourcell 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img)
train.py 文件源码 项目:Kaggle-Carvana-Image-Masking-Challenge 作者: petrosgk 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image
normalized.py 文件源码 项目:virtual-dressing-room 作者: akash0x53 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def normalized(self):

#        t1=time.time()
        b=self.down[:,:,0]
        g=self.down[:,:,1]
        r=self.down[:,:,2]

        sum=b+g+r


        self.norm[:,:,0]=b/sum*255.0
        self.norm[:,:,1]=g/sum*255.0
        self.norm[:,:,2]=r/sum*255.0

 #       print "conversion time",time.time()-t1

        #self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
        self.norm_rgb=cv2.convertScaleAbs(self.norm)
        #self.norm.dtype=np.uint8
        return self.norm_rgb
bwm.py 文件源码 项目:BlindWaterMark 作者: chishaxie 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bgr_to_rgb(img):
    b, g, r = cv2.split(img)
    return cv2.merge([r, g, b])
apply_mask.py 文件源码 项目:masks-and-hats 作者: leoneckert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def add_alpha_channel(img):
    # img = cv2.imread(path)
    b_channel, g_channel, r_channel = cv2.split(img)
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255 #creating a dummy alpha channel image.
    return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
vwriter.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def addFrame(self, frame, width=300):
        frame = imutils.resize(frame, width)

        # check if the writer is None
        if self.writer is None:
            # store the image dimensions, initialzie the video writer,
            # and construct the zeros array
            (self.h, self.w) = frame.shape[:2]
            self.writer = cv2.VideoWriter(self.output, self.fourcc, self.fps,
                                          (self.w * 2, self.h * 2), True)
            self.zeros = np.zeros((self.h, self.w), dtype="uint8")

        # break the image into its RGB components, then construct the
        # RGB representation of each frame individually
        (B, G, R) = cv2.split(frame)
        R = cv2.merge([self.zeros, self.zeros, R])
        G = cv2.merge([self.zeros, G, self.zeros])
        B = cv2.merge([B, self.zeros, self.zeros])

        # construct the final output frame, storing the original frame
        # at the top-left, the red channel in the top-right, the green
        # channel in the bottom-right, and the blue channel in the
        # bottom-left
        output = np.zeros((self.h * 2, self.w * 2, 3), dtype="uint8")
        output[0:self.h, 0:self.w] = frame
        output[0:self.h, self.w:self.w * 2] = R
        output[self.h:self.h * 2, self.w:self.w * 2] = G
        output[self.h:self.h * 2, 0:self.w] = B

        # write the output frame to file
        self.writer.write(output)
increase_picture.py 文件源码 项目:tensorflow-pi 作者: karaage0703 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def equalizeHistRGB(src):

    RGB = cv2.split(src)
    Blue   = RGB[0]
    Green = RGB[1]
    Red    = RGB[2]
    for i in range(3):
        cv2.equalizeHist(RGB[i])

    img_hist = cv2.merge([RGB[0],RGB[1], RGB[2]])
    return img_hist

# ????????
opencv_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def rgb(bgr_img):
    b,g,r = cv.split(bgr_img)       # get b,g,r
    rgb_img = cv.merge([r,g,b])     # switch it to rgb
    return rgb_img

# Given directory loc, get all images in directory and crop to just faces
# Returns face_list, an array of cropped image file names
opencv_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def toggleRGB(img):
  r,g,b = cv.split(img)
  img = cv.merge([b,g,r])
  return img

# Combine two images for displaying side-by-side
# If maxSize is true, crops sides of image to keep under 2880 pixels in width
caffe_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def load_minibatch(input_list, color, labels, start,num):
    # Enforce maximum on start
    start = max(0,start)

    # Enforce minimum on end
    end = start + num
    end = min(len(input_list), end)

    # Isolate files
    files = input_list[start:end]

    images = []
    for file in files:
        img = caffe.io.load_image(file, color)

        # Handle incorrect image dims for uncropped images
        # TODO: Get uncropped images to import correctly
        if img.shape[0] == 3 or img.shape[0] == 1:
            img = np.swapaxes(np.swapaxes(img, 0, 1), 1, 2)

        # BUG FIX: Is this ok?
        # color=True gets the correct desired dimension of WxHx3
        # But color=False gets images of WxHx1. Need WxHx3 or will get "Index out of bounds" exception
        # Fix by concatenating three copies of the image
        if img.shape[2] == 1:
            img = cv.merge([img,img,img])

        # Add image array to batch
        images.append(img)

    labelsReduced = labels[start:end]
    return images, labelsReduced

# Classify all images in a list of image file names
# No return value, but can display outputs if desired
functions.py 文件源码 项目:cvloop 作者: shoeffner 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_hat(self, path):  # pylint: disable=no-self-use
        """Loads the hat from a picture at path.

        Args:
            path: The path to load from

        Returns:
            The hat data.
        """
        hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        if hat is None:
            raise ValueError('No hat image found at `{}`'.format(path))
        b, g, r, a = cv2.split(hat)
        return cv2.merge((r, g, b, a))
preprocessing.py 文件源码 项目:pycolor_detection 作者: parth1993 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def removebg(segmented_img):
    src = cv2.imdecode(np.squeeze(np.asarray(segmented_img[1])), 1)
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    processed_img = cv2.imencode('.png', dst)

    return processed_img
Unet_test.py 文件源码 项目:segmentation-visualization-training 作者: tkwoo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predict_image(flag):
    t_start = cv2.getTickCount()
    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
            loaded_model_json = json_file.read()
    model = model_from_json(loaded_model_json)
    weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
    model.load_weights(weight_list[-1])
    print "[*] model load : %s"%weight_list[-1]
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000 
    print "[*] model loading Time: %.3f ms"%t_total

    imgInput = cv2.imread(flag.test_image_path, 0)
    input_data = imgInput.reshape((1,256,256,1))

    t_start = cv2.getTickCount()
    result = model.predict(input_data, 1)
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
    print "Predict Time: %.3f ms"%t_total

    imgMask = (result[0]*255).astype(np.uint8)
    imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
    _, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
    imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
    # imgZero = np.zeros((256,256), np.uint8)
    # imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
    imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
    output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
    cv2.imwrite(output_path, imgShow)
    print "SAVE:[%s]"%output_path
saliency.py 文件源码 项目:saliency 作者: shuuchen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def makeNormalizedColorChannels(image, thresholdRatio=10.):
    """
        Creates a version of the (3-channel color) input image in which each of
        the (4) channels is normalized.  Implements color opponencies as per 
        Itti et al. (1998).
        Arguments:
            image           : input image (3 color channels)
            thresholdRatio  : the threshold below which to set all color values
                                to zero.
        Returns:
            an output image with four normalized color channels for red, green,
            blue and yellow.
    """
    intens = intensity(image)
    threshold = intens.max() / thresholdRatio
    logger.debug("Threshold: %d", threshold)
    r,g,b = cv2.split(image)
    cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
    R = r - (g + b) / 2
    G = g - (r + b) / 2
    B = b - (g + r) / 2
    Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b

    # Negative values are set to zero.
    cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
    cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)

    image = cv2.merge((R,G,B,Y))
    return image
saliency.py 文件源码 项目:saliency 作者: shuuchen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def markMaxima(saliency):
    """
        Mark the maxima in a saliency map (a gray-scale image).
    """
    maxima = maximum_filter(saliency, size=(5, 5))
    maxima = numpy.array(saliency == maxima, dtype=numpy.float64) * 255
    g = cv2.max(saliency, maxima)
    r = saliency
    b = saliency
    marked = cv2.merge((b,g,r))
    return marked
color.py 文件源码 项目:carvana-challenge 作者: chplushsieh 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def transform(image):
    '''
    input:
      image: numpy array of shape (channels, height, width), in RGB code
    output:
      transformed: numpy array of shape (channels, height, width), in RGB code
    '''
    transformed = image

    hue_shift_limit = (-50, 50)
    sat_shift_limit = (-5, 5)
    val_shift_limit = (-15, 15)

    if np.random.random() < 0.5:
        transformed = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(transformed)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        transformed = cv2.merge((h, s, v))
        transformed = cv2.cvtColor(transformed, cv2.COLOR_HSV2BGR)

    return transformed
color_gray.py 文件源码 项目:python-image-processing 作者: karaage0703 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def color_gray(src):
    img_bgr = cv2.split(src)
    dst = cv2.merge(((img_bgr[0] + img_bgr[1] + img_bgr[2])/3, (img_bgr[0] + img_bgr[1] + img_bgr[2])/3, (img_bgr[0] + img_bgr[1] + img_bgr[2])/3))

    return dst


问题


面经


文章

微信
公众号

扫码关注公众号