python类COLOR_BGR2RGB的实例源码

utils.py 文件源码 项目:image-segmentation-fcn 作者: ljanyst 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def draw_labels(img, labels, label_colors, convert=True):
    """
    Draw the labels on top of the input image
    :param img:          the image being classified
    :param labels:       the output of the neural network
    :param label_colors: the label color map defined in the source
    :param convert:      should the output be converted to RGB
    """
    labels_colored = np.zeros_like(img)
    for label in label_colors:
        label_mask = labels == label
        labels_colored[label_mask] = label_colors[label]
    img = cv2.addWeighted(img, 1, labels_colored, 0.8, 0)
    if not convert:
        return img
    return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

#-------------------------------------------------------------------------------
kerasext.py 文件源码 项目:kaggle_amazon 作者: asanakoy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def predict(model_name, model, images_dir, image_ids, batch_size=64, tile_size=224):
    x_test = np.zeros((len(image_ids), tile_size, tile_size, 3), dtype=np.float32)

    for idx, image_name in tqdm(enumerate(image_ids), total=len(image_ids)):
        # img = imread(join(images_dir, '{}.jpg'.format(image_name)))
        image_path = join(images_dir, '{}.jpg'.format(image_name))
        try:
            img = cv2.imread(image_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = np.asarray(cv2.resize(img, (tile_size, tile_size)), dtype=np.float32)
            x_test[idx, ...] = img
        except Exception as e:
            print e.message
            print 'image:', image_path
    x_test = get_preprocess_input_fn(model_name)(x_test)
    print(x_test.shape)
    predictions = model.predict(x_test, batch_size=batch_size, verbose=1)
    return predictions
image_analyser.py 文件源码 项目:RacingRobot 作者: sergionr2 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def extractInfo(self):
        try:
            while not self.exit:
                try:
                    frame = self.frame_queue.get(block=True, timeout=1)
                except queue.Empty:
                    print("Queue empty")
                    continue
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if self.debug:
                    self.out_queue.put(item=frame, block=False)
                else:
                    if self.frame_num % SAVE_EVERY == 0:
                        cv2.imwrite("debug/{}_{}.jpg".format(experiment_time, self.frame_num), frame)
                        pass
                    try:
                        turn_percent, centroids = processImage(frame)
                        self.out_queue.put(item=(turn_percent, centroids), block=False)
                    except Exception as e:
                        print("Exception in RBGAnalyser processing image: {}".format(e))
                self.frame_num += 1
        except Exception as e:
            print("Exception in RBGAnalyser after loop: {}".format(e))
predict_imagenet.py 文件源码 项目:ecs-deep-learning-workshop 作者: awslabs 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def predict(filename, mod, synsets):
  img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
  if img is None:
    return None
  img = cv2.resize(img, (224, 224))
  img = np.swapaxes(img, 0, 2)
  img = np.swapaxes(img, 1, 2) 
  img = img[np.newaxis, :] 

  mod.forward(Batch([mx.nd.array(img)]))
  prob = mod.get_outputs()[0].asnumpy()
  prob = np.squeeze(prob)

  a = np.argsort(prob)[::-1]    
  for i in a[0:5]:
    print('probability=%f, class=%s' %(prob[i], synsets[i]))
webcam.py 文件源码 项目:ssd_tensorflow 作者: seann999 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def start_stream(self):
        bytes = None

        print("starting stream...")
        stream = urllib2.urlopen(self.address) #'http://192.168.100.102:8080/video'
        bytes = b''

        while True:
            bytes += stream.read(1024)
            a = bytes.find(b'\xff\xd8')
            b = bytes.find(b'\xff\xd9')
            if a != -1 and b != -1:
                jpg = bytes[a:b+2]
                bytes = bytes[b+2:]
                self.image = cv2.cvtColor(cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
                #cv2.imshow('i', self.image)
                #cv2.waitKey(1)
hsvmoder.py 文件源码 项目:flight-stone 作者: asmateus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def hsvModer(self, index, hsv_valueT, hsv_value_B):
        img_BGR = self.img[index]
        img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
        img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)

        lower_red = np.array(hsv_value_B)
        upper_red = np.array(hsv_valueT)

        mask = cv2.inRange(img_HSV, lower_red, upper_red)
        res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
        if self.erosion:
            kernel = np.ones((5, 5), np.uint8)
            res = cv2.erode(res, kernel, iterations=1)
        if self.dilate:
            kernel = np.ones((9, 9), np.uint8)
            res = cv2.dilate(res, kernel, iterations=1)

        return res
custom_controllers.py 文件源码 项目:flight-stone 作者: asmateus 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def pullData(self):
        try:
            if self.pth:
                capture = cv2.VideoCapture(1)
                capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.device['baudrate'][1])
                capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.device['baudrate'][0])

                while True:
                    if self.endtr:
                        capture.release()
                        return

                    _, frame = capture.read()
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                    self.response.assignStatus(RESPONSE_STATUS['OK'])
                    self.response.assignData(frame)

                    yield self.response
        except Exception:
            traceback.print_exc(file=sys.stdout)
            self.endCommunication()
            print('Video ended or interrupted, dropped Buffer')
gui_main.py 文件源码 项目:Farmbot_GeneralAP 作者: SpongeYao 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def display_panel_mergeframe(self, arg_frame, arg_stepX, arg_stepY): 
        print '*** ',len(arg_frame.shape)
        if len(arg_frame.shape)==3:
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB)
        else: 
            tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_GRAY2RGB)

        tmp_frame= cv2.resize(tmp_frame,(self.mergeframe_splitX,self.mergeframe_splitY),interpolation=cv2.INTER_LINEAR)
        begX= gui_vars.interval_x+self.mergeframe_splitX*arg_stepX
        begY= self.mergeframe_spaceY+ self.mergeframe_splitY* arg_stepY 
        self.mergeframe[begY:begY+ self.mergeframe_splitY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        #begY= self.mergeframe_height- 50- self.mergeframe_splitY*arg_stepY
        #self.mergeframe[begY-self.mergeframe_splitY:begY, begX: begX+ self.mergeframe_splitX]= tmp_frame
        self.mergeframe_stepX= arg_stepX
        self.mergeframe_stepY= arg_stepY
        print '>> mergeframe_splitY, splitX= ', self.mergeframe_splitY, ', ', self.mergeframe_splitX
        print '>> tmp_frame.shape[0,1]= ', tmp_frame.shape[0],', ',tmp_frame.shape[1]

        result = Image.fromarray(self.mergeframe)
        result = ImageTk.PhotoImage(result)
        self.panel_mergeframe.configure(image = result)
        self.panel_mergeframe.image = result
caffeNetViewer.py 文件源码 项目:caffeNetViewer 作者: birolkuyumcu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def runCaffeModel(self):
        iname = str(self.ui.comboBoxImage.currentText())
        self.cImg = cv2.imread(iname)
        self.cImg = cv2.cvtColor(self.cImg, cv2.COLOR_BGR2RGB)
        self.ui.plainTextEdit.appendPlainText('Model Running ... ')
        self.ui.plainTextEdit.appendPlainText('  Image Name : '+iname)
        self.ui.plainTextEdit.appendPlainText("  Image Shape : " + str(self.cImg.shape))
        self.ui.plainTextEdit.appendPlainText("  Model Input Image Shape : " + str(self.net.blobs['data'].data.shape))  

        transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        transformer.set_transpose('data', (2,0,1))  # move image channels to outermost dimension
        #transformer.set_mean('data', mu)            # subtract the dataset-mean value in each channel
        transformer.set_raw_scale('data', 255)      # rescale from [0, 1] to [0, 255]
        transformer.set_channel_swap('data', (2,1,0))  # swap channels from RGB to BGR          

        image = caffe.io.load_image(iname)
        inData = transformer.preprocess('data', image)

        self.net.blobs['data'].data[...] = [inData]

        self.outClass = self.net.forward()
        self.on_comboBoxLayers_currentIndexChanged()
face_detect.py 文件源码 项目:Python_SelfLearning 作者: fukuit 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def facedetect(file):
    """ haar????????????????????????
    Args:
        file : ????????????
    """
    face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
    img = cv2.imread(file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray)
        for(ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()
demo.py 文件源码 项目:Lifting-from-the-Deep-release 作者: DenisTome 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main():
    image = cv2.imread(IMAGE_FILE_PATH)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    # Show 2D and 3D poses
    display_results(image, pose_2d, visibility, pose_3d)
visuals.py 文件源码 项目:VariationalAutoEncoder 作者: despoisj 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def imscatter(x, y, ax, imageData, zoom=1):
    images = []
    for i in range(len(x)):
        x0, y0 = x[i], y[i]
        # Convert to image
        img = imageData[i]*255.
        img = img.astype(np.uint8)
        # OpenCV uses BGR and plt uses RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        image = OffsetImage(img, zoom=zoom)
        ab = AnnotationBbox(image, (x0, y0), xycoords='data', frameon=False)
        images.append(ax.add_artist(ab))

    ax.update_datalim(np.column_stack([x, y]))
    ax.autoscale()

# Show dataset images with T-sne projection of latent space encoding
featuresFace.py 文件源码 项目:pyImageClassification 作者: tyiannak 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def getFeaturesFace(img):
    RGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
    (cascadeFrontal, cascadeProfile, storage) = initialize_face()
    facesFrontal = detect_faces(cv2.cv.fromarray(RGB), cascadeFrontal, cascadeProfile, storage)

    tempF = 0.0
    faceSizes = []
    for f in facesFrontal:
        faceSizes.append(f[2] * f[3] / float(img.shape[0] * img.shape[1]))

    F = []
    F.append(len(facesFrontal))
    if len(facesFrontal)>0:
        F.append(min(faceSizes))
        F.append(max(faceSizes))
        F.append(numpy.mean(faceSizes))
    else:
        F.extend([0, 0, 0]);

    Fnames = ["Faces-Total", "Faces-minSizePer", "Faces-maxSizePer", "Faces-meanSizePer"]
    return (F, Fnames)
    #print F
    #print tempF/len(facesFrontal)
dataRepresentation.py 文件源码 项目:saliency-salgan-2017 作者: imatge-upc 项目源码 文件源码 阅读 85 收藏 0 点赞 0 评论 0
def load(self):

        if self.imageType == InputType.image:
            self.data = cv2.cvtColor(cv2.imread(self.filePath, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
            self.state = LoadState.loaded
        if self.imageType == InputType.imageGrayscale:
            self.data = cv2.cvtColor(cv2.imread(self.filePath, cv2.IMREAD_COLOR), cv2.COLOR_BGR2GRAY)
            self.state = LoadState.loaded
        elif self.imageType == InputType.saliencyMapMatlab:
            self.data = (scipy.io.loadmat(self.filePath)['I'] * 255).astype(np.uint8)
            self.state = LoadState.loaded
        elif self.imageType == InputType.fixationMapMatlab:
            self.data = (scipy.io.loadmat(self.filePath)['I']).nonzero()
            self.state = LoadState.loaded
        elif self.imageType == InputType.empty:
            self.data = None
yolov2_demo.py 文件源码 项目:yolov2_tensorflow 作者: biyaa 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def detect(img):
    #print img
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (cfg.image_size, cfg.image_size)).astype(np.float32)

    #inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) 
    inputs = np.reshape(inputs, (1, cfg.image_size, cfg.image_size, 3))
    #inputs = np.transpose(inputs,(0,3,2,1))

    result = detect_from_cvmat(inputs)[0]

    for i in range(len(result)):
        left = (result[i][1] - result[i][3]/2)*img_w
        right = (result[i][1] + result[i][3]/2)*img_w
        top = (result[i][2] - result[i][4]/2)*img_h
        bot = (result[i][2] + result[i][4]/2)*img_h
        result[i][1] = left if left>0 else 0
        result[i][2] = right if right<img_w-1 else img_w-1
        result[i][3] = top if top>0 else 0
        result[i][4] = bot if bot<img_h-1 else img_h-1

    print "result:", result
    return result
SuironIO.py 文件源码 项目:suiron 作者: kendricktan 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_frame_prediction(self):
        ret, frame = self.cap.read()

        # if we get a frame
        if not ret:
            raise IOError('No image found!')

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
        frame = frame.astype('uint8')

        return frame


    # Normalizes inputs so we don't have to worry about weird
    # characters e.g. \r\n
video_demo.py 文件源码 项目:mxnet-yolo 作者: zhreshold 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def commit(self):
        def draw(img,bboxes):
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            for b in bboxes:
                xmin,ymin,xmax,ymax = b[:]
                cv2.rectangle(img, (xmin,ymin),  (xmax,ymax),(255,255,0) ,thickness=2)
            return img
        def make_frame(t):
            idx = t*(self.clip.fps/self.fps)
            frm = self.clip.get_frame(t)
            height ,width = frm.shape[:2]
            for t,bboxes in self.record:
                if t==idx:        
                    frm = draw(frm,bboxes)
                else:
                    pass
            return frm
        new_clip = VideoClip(make_frame, duration=self.clip.duration) # 3-second clip
        new_clip.fps=self.clip.fps
        new_clip.to_videofile(self.output_path)
demo_yolo_v1.py 文件源码 项目:yolov2-tensorflow 作者: shishichang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
utils.py 文件源码 项目:deep-learning-experiments 作者: raghakot 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def load_img(path, grayscale=False, target_size=None):
    """Utility function to load an image from disk.

    Args:
      path: The image file path.
      grayscale: True to convert to grayscale image (Default value = False)
      target_size: (w, h) to resize. (Default value = None)

    Returns:
        The loaded numpy image.
    """
    img = io.imread(path, grayscale)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    if target_size:
        img = cv2.resize(img, (target_size[1], target_size[0]))
    return img
image.py 文件源码 项目:perception 作者: BerkeleyAutomation 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def load_data(filename):
        """Loads a data matrix from a given file.

        Parameters
        ----------
        filename : :obj:`str`
            The file to load the data from. Must be one of .png, .jpg,
            .npy, or .npz.

        Returns
        -------
        :obj:`numpy.ndarray`
            The data array read from the file.
        """
        file_root, file_ext = os.path.splitext(filename)
        data = None
        if file_ext.lower() in COLOR_IMAGE_EXTS:
            data = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)
        elif file_ext == '.npy':
            data = np.load(filename)
        elif file_ext == '.npz':
            data = np.load(filename)['arr_0']
        else:
            raise ValueError('Extension %s not supported' % (file_ext))
        return data


问题


面经


文章

微信
公众号

扫码关注公众号