python类COLOR_GRAY2RGB的实例源码

vis_light_points.py 文件源码 项目:esys-pbi 作者: fsxfreak 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe")
CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
lsun_bedroom_line2color.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color
main.py 文件源码 项目:pynephoscope 作者: neXyon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def selectImage(self, index):
        if index >= len(self.files) or index < 0:
            self.ui.imageView.setText("No images found.")
            return

        self.index = index
        self.image = cv2.imread(self.files[index], 1)

        image = self.modes[self.current_mode].getImage()

        if len(image.shape) < 3 or image.shape[2] == 1:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        height, width, byteValue = self.image.shape
        byteValue = byteValue * width

        qimage = QtGui.QImage(image, width, height, byteValue, QtGui.QImage.Format_RGB888)

        self.ui.imageView.setPixmap(QtGui.QPixmap.fromImage(qimage))
CV_opening.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
CV_canny.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
        edges = cv2.Canny(img,obj.minVal,obj.maxVal)
        color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        edges=color

        if True:
            print "zeige"
            cv2.imshow(obj.Label,edges)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=edges
CV_closing.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
CV2.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def execute_CannyEdge(proxy,obj):
    ''' create Canny Edge image with two parameters'''

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    edges = cv2.Canny(img,obj.minVal,obj.maxVal)
    obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
    say(["Canny Edge image updated",obj.minVal,obj.maxVal])
model.py 文件源码 项目:ConditionalGAN 作者: seungjooli 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def detect_edges(images):
        def blur(image):
            return cv2.GaussianBlur(image, (5, 5), 0)

        def canny_otsu(image):
            scale_factor = 255
            scaled_image = np.uint8(image * scale_factor)

            otsu_threshold = cv2.threshold(
                cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0]
            lower_threshold = max(0, int(otsu_threshold * 0.5))
            upper_threshold = min(255, int(otsu_threshold))
            edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold)
            edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

            return np.float32(edges) * (1 / scale_factor)

        blurred = [blur(image) for image in images]
        canny_applied = [canny_otsu(image) for image in blurred]

        return canny_applied
image.py 文件源码 项目:tensorlight 作者: bsautermeister 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def to_rgb(image):
    """Converts a grayscaled image to a colored one.
    Parameters
    ----------
    image: ndarray(uint8)
        A grayscaled image with the shape of [height, width, 1]
        or of shape [height, widht].
    Returns
    ---------
    image: ndarray(uint8)
        Returns a converted image with shape [height, width, 3].
    """
    image_shape = image.shape

    if len(image_shape) > 2:
        img_channels = image_shape[2]
        if img_channels == 1:
            image = np.squeeze(image, axis=2)

        if img_channels != 3:
            image = cast(image)
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    return image
gui_main.py 文件源码 项目:Farmbot_GeneralAP 作者: SpongeYao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def display_panel_mergeframe(self, arg_frame, arg_stepX, arg_stepY): 
        print '*** ',len(arg_frame.shape)
        if len(arg_frame.shape)==3:
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB)
        else: 
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_GRAY2RGB)

        tmp_frame= cv2.resize(tmp_frame,(self.mergeframe_splitX,self.mergeframe_splitY),interpolation=cv2.INTER_LINEAR)
        begX= gui_vars.interval_x+self.mergeframe_splitX*arg_stepX
        begY= self.mergeframe_spaceY+ self.mergeframe_splitY* arg_stepY 
        self.mergeframe[begY:begY+ self.mergeframe_splitY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        #begY= self.mergeframe_height- 50- self.mergeframe_splitY*arg_stepY
        #self.mergeframe[begY-self.mergeframe_splitY:begY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        self.mergeframe_stepX= arg_stepX
        self.mergeframe_stepY= arg_stepY
        print '>> mergeframe_splitY, splitX= ', self.mergeframe_splitY, ', ', self.mergeframe_splitX
        print '>> tmp_frame.shape[0,1]= ', tmp_frame.shape[0],', ',tmp_frame.shape[1]

        result = Image.fromarray(self.mergeframe)
        result = ImageTk.PhotoImage(result)
        self.panel_mergeframe.configure(image = result)
        self.panel_mergeframe.image = result
class_ImageProcessing.py 文件源码 项目:Farmbot_GeneralAP 作者: SpongeYao 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
        # Otsu's thresholding after Gaussian filtering
        tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
        blur = cv2.GaussianBlur(tmp,(5,5),0)
        if arg_binaryMethod== 0:
            ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
        elif arg_binaryMethod == 1:
            ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        elif arg_binaryMethod== 2:
            thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)

        result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
        ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)

        rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]

        for rect , cntr in rects:
            cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
        if arg_export_index:
            cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
        print "Get Contour success"
        return result
segimg_parsing.py 文件源码 项目:Semi-automatic-Annotation 作者: Luoyadan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def disp_segBI_on(self):
        print "displaying segBI ON"
        ## unable edit
        self.parent().mode = "view"
        self.img_arr_tmp = self.img_arr.copy()
        img_arr = self.ori_img.copy()

        ## display binary img
        segBI = np.zeros(img_arr.shape[:2], np.uint8)
        segBI[self.seg_arr == self.current_label] = 255
        segBI = cv2.cvtColor(segBI, cv2.COLOR_GRAY2RGB)


        if self.Zoomed == True:
            large_segBI = Image.fromarray(segBI).resize((self.w * self.zRate, self.h * self.zRate), Image.NEAREST)
            cropped_segBI = large_segBI.crop(tuple(self.zoom_pos))
            segBI = np.array(cropped_segBI)

        self.img_arr = segBI
        self.update()
filter.py 文件源码 项目:PicFilter 作者: dhuadaar 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
image_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def to_color(im, flip_rb=False): 
    if im.ndim == 2: 
        return cv2.cvtColor(im, cv2.COLOR_GRAY2RGB if flip_rb else cv2.COLOR_GRAY2BGR)
    else: 
        return cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if flip_rb else im.copy()
CV_cornerharris.py 文件源码 项目:reconstruction 作者: microelly2 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            img = obj.imageNode.ViewObject.Proxy.img.copy()

        print (obj.blockSize,obj.ksize,obj.k)
        try:
            gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            gray = np.float32(gray)
            print "normale"
        except:
            im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
            gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)
            print "except"

        dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
        dst = cv2.dilate(dst,None)

        img[dst>0.01*dst.max()]=[0,0,255]

        dst2=img.copy()
        dst2[dst<0.01*dst.max()]=[255,255,255]
        dst2[dst>0.01*dst.max()]=[0,0,255]

        if not obj.matplotlib:
            cv2.imshow(obj.Label,img)
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst2,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()

        self.img=img
detect.py 文件源码 项目:object-detection-python-opencv 作者: hasanaliqureshi 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def overlay_mask(mask, image):
    #make the mask rgb
    rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
    #calculates the weightes sum of two arrays. in our case image arrays
    #input, how much to weight each.
    #optional depth value set to 0 no need
    img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)
    return img
Bag.py 文件源码 项目:Bag-of-Visual-Words-Python 作者: kushalvyas 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testModel(self):
        """ 
        This method is to test the trained classifier

        read all images from testing path 
        use BOVHelpers.predict() function to obtain classes of each image

        """

        self.testImages, self.testImageCount = self.file_helper.getFiles(self.test_path)

        predictions = []

        for word, imlist in self.testImages.iteritems():
            print "processing " ,word
            for im in imlist:
                cl = self.recognize(im)
                predictions.append({
                    'image':im,
                    'class':cl,
                    'object_name':self.name_dict[str(int(cl[0]))]
                    })

        print predictions
        for each in predictions:
            # cv2.imshow(each['object_name'], each['image'])
            # cv2.waitKey()
            # cv2.destroyWindow(each['object_name'])
            # 
            plt.imshow(cv2.cvtColor(each['image'], cv2.COLOR_GRAY2RGB))
            plt.title(each['object_name'])
            plt.show()
image_processing.py 文件源码 项目:vehicle_detection 作者: AuzanMuh 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def cvtGRAY2RGB(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
    return frame
fingers_recog.py 文件源码 项目:tbotnav 作者: patilnabhi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def find(self, img):
        self.height, self.width = img.shape[:2]

        armImg = self._extract_arm(img) 
        armImg2 = armImg.copy()

        (contours, defects) = self._find_hull_defects(armImg)

        outImg = cv2.cvtColor(armImg2, cv2.COLOR_GRAY2RGB)

        (outImg, num_fingers) = self._detect_num_fingers(contours, defects, outImg)

        return (outImg, num_fingers)
        # return outImg
Sonar_data.py 文件源码 项目:GidroGraf-Sirius 作者: alf3r 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def find_contours(self):
        im2, contours, hierarchy = cv2.findContours(self.data, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        self.data = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGB)
        cv2.drawContours(self.data, contours, -1, (255, 0, 0), 20)
categorical_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,5*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
categorical_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,7*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    y_pred[:,:,1][y_pred[:,:,1] > 0.5] = 255
    res[:,5*IMAGE_W:6*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,6*IMAGE_W:7*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
binary_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,1])', np.max(mask[:,:,1]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,7*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = cv2.cvtColor(mask[:,:,1],cv2.COLOR_GRAY2RGB)
    res[:,3*IMAGE_W:4*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,4*IMAGE_W:5*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    y_pred[:,:,1][y_pred[:,:,1] > 0.5] = 255
    res[:,5*IMAGE_W:6*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,6*IMAGE_W:7*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,1],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
categorical_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,0])', np.max(mask[:,:,0]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,4*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    res[:,3*IMAGE_W:4*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
binary_crossentropy_example.py 文件源码 项目:keras-semantic-segmentation-example 作者: mrgloom 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def save_prediction():

    model = get_model()
    model.load_weights('model_weights_'+loss_name+'.h5')

    img,mask= gen_random_image()

    y_pred= model.predict(img[None,...].astype(np.float32))[0]

    print('y_pred.shape', y_pred.shape)

    y_pred= y_pred.reshape((IMAGE_H,IMAGE_W,NUMBER_OF_CLASSES))

    print('np.min(mask[:,:,0])', np.min(mask[:,:,0]))
    print('np.max(mask[:,:,0])', np.max(mask[:,:,0]))

    print('np.min(y_pred)', np.min(y_pred))
    print('np.max(y_pred)', np.max(y_pred))

    res = np.zeros((IMAGE_H,4*IMAGE_W,3),np.uint8)
    res[:,:IMAGE_W,:] = img
    res[:,IMAGE_W:2*IMAGE_W,:] = cv2.cvtColor(mask[:,:,0],cv2.COLOR_GRAY2RGB)
    res[:,2*IMAGE_W:3*IMAGE_W,:] = 255*cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)
    y_pred[:,:,0][y_pred[:,:,0] > 0.5] = 255
    res[:,3*IMAGE_W:4*IMAGE_W,:] = cv2.cvtColor(y_pred[:,:,0],cv2.COLOR_GRAY2RGB)

    cv2.imwrite(loss_name+'_result.png', res)
utils.py 文件源码 项目:pytorch_crowd_count 作者: BingzheWu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def read_gray_img(img_path):
    bgr = cv2.imread(img_path)
    #bgr = cv2.resize(bgr, (225, 225))
    gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
    gray_3 = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
    print gray.shape
    plt.imshow(gray_3)
    plt.show()
    return np.expand_dims(gray_3,0).transpose((0,3,1,2))
segimg.py 文件源码 项目:Semi-automatic-Annotation 作者: Luoyadan 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def disp_finalID_on(self):
        print "displaying self.final_ID ON"
        ## unable edit
        self.parent().mode = "view"
        self.img_arr_tmp = self.img_arr.copy()

        # img_arr = int8_to_uint8(self.final_ID, self.int8_to_uint8_OFFSET)
        img_arr = np.zeros(self.final_ID.shape, np.uint8)
        img_arr[self.final_ID == self.cur_line_ID] = 255
        img_arr = cv2.cvtColor(img_arr, cv2.COLOR_GRAY2RGB)

        self.img_arr = img_arr
        self.update()
pipeline.py 文件源码 项目:deepvisualminer 作者: pathbreak 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def _execute_pipeline_on_image(self, input_data):

        if input_data['img'].ndim == 3:
            # It *appears* imageio imread returns RGB or RGBA, not BGR...confirmed using a blue
            # filled rectangle that imageio is indeed RGB which is opposite of OpenCV's default BGR.
            # Use RGB consistently everywhere.
            if input_data['img'].shape[-1] == 4:
                input_data['gray'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGBA2GRAY)
                print("Input image seems to be 4-channel RGBA. Creating 3-channel RGB version")
                input_data['img'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGBA2RGB)
            else:
                input_data['gray'] = cv2.cvtColor(input_data['img'], cv2.COLOR_RGB2GRAY)

        elif input_data['img'].ndim == 2:
            # If input is a grayscale image, it'll have just 2 dimensions, 
            # but Darkflow code expects 3 dimensions. So always keep 'img' a 3 dimension
            # image no matter what.
            print("Input image is grayscale. Creating RGB version")
            input_data['gray'] = input_data['img'].copy()
            input_data['img'] = cv2.cvtColor(input_data['img'], cv2.COLOR_GRAY2RGB)

        else:
            raise "Unknown image format " + input_data['img'].shape

        print("Input image:", input_data['img'].shape)
        print("Grayscale image:", input_data['gray'].shape)

        for comp in self.components:
            print("Executing %s on %s frame %d" % (comp.name, input_data['file'], input_data.get('frame', 0)))
            comp_outputs = comp.execute(input_data, self.input_directory, self.output_directory)

            # At each stage of the pipeline, collect the component's outputs
            # and add them to the input data so that they're available for 
            # downstream components.
            input_data[comp.name] = comp_outputs


        # Release the image arrays.
        input_data['img'] = None
        input_data['gray'] = None
preprocessing.py 文件源码 项目:braid 作者: Arya-ai 项目源码 文件源码 阅读 63 收藏 0 点赞 0 评论 0
def create_fixed_image_shape(img, frame_size=(200, 200, 3), random_fill=True,
                             fill_val=0, mode='fit'):
    # if mode == 'fit':
    X1, Y1 = frame_size[1], frame_size[0]
    image_frame = np.ones(frame_size, dtype=np.uint8) * fill_val
    if random_fill:
        image_frame = np.random.randint(
            0, high=255, size=frame_size).astype(np.uint8)

    if ((img.ndim == 2 or img.shape[2] == 1) and
            (len(frame_size) == 3 and frame_size[2] == 3)):
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    X2, Y2 = img.shape[1], img.shape[0]

    if float(X1) / Y1 >= float(X2) / Y2:
        scale = float(Y1) / Y2
    else:
        scale = float(X1) / X2

    img = cv2.resize(img, None, fx=scale, fy=scale)
    sx, sy = img.shape[1], img.shape[0]

    yc = int(round((frame_size[0] - sy) / 2.))
    xc = int(round((frame_size[1] - sx) / 2.))
    image_frame[yc:yc + sy, xc:xc + sx] = img
    assert image_frame.shape == frame_size

    return image_frame


问题


面经


文章

微信
公众号

扫码关注公众号