python类COLOR_BGR2RGB的实例源码

flo2img.py 文件源码 项目:Deep360Pilot-optical-flow 作者: yenchenlin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image
app.py 文件源码 项目:flask-app-for-mxnet-img-classifier 作者: XD-DENG 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_image(file_location, local=False):
    # users can either 
    # [1] upload a picture (local = True)
    # or
    # [2] provide the image URL (local = False)
    if local == True:
        fname = file_location
    else:
        fname = mx.test_utils.download(file_location, dirname="static/img_pool")
    img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)

    if img is None:
         return None

    # convert into format (batch, RGB, width, height)
    img = cv2.resize(img, (224, 224))
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 1, 2)
    img = img[np.newaxis, :]

    return img
main.py 文件源码 项目:pynephoscope 作者: neXyon 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def selectImage(self, index):
        if index >= len(self.files) or index < 0:
            self.ui.imageView.setText("No images found.")
            return

        self.index = index
        self.image = cv2.imread(self.files[index], 1)

        image = self.modes[self.current_mode].getImage()

        if len(image.shape) < 3 or image.shape[2] == 1:
            image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        height, width, byteValue = self.image.shape
        byteValue = byteValue * width

        qimage = QtGui.QImage(image, width, height, byteValue, QtGui.QImage.Format_RGB888)

        self.ui.imageView.setPixmap(QtGui.QPixmap.fromImage(qimage))
test.py 文件源码 项目:yolo_tensorflow 作者: hizhangp 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
main.py 文件源码 项目:guided-filter 作者: lisabug 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e))
camera_cv.py 文件源码 项目:STS-PiLot 作者: mark-orion 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
facedataset.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def plot_over_img(self, img, x, y, x_pr, y_pr, bb_gt):
        """Plot the landmarks over the image with the bbox."""
        plt.close("all")
        fig = plt.figure(frameon=False)  # , figsize=(15, 10.8), dpi=200
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), aspect="auto")
        ax.scatter(x, y, s=10, color='r')
        ax.scatter(x_pr, y_pr, s=10, color='g')
        rect = patches.Rectangle(
            (bb_gt[0], bb_gt[1]), bb_gt[2]-bb_gt[0], bb_gt[3]-bb_gt[1],
            linewidth=1, edgecolor='b', facecolor='none')
        ax.add_patch(rect)
        fig.add_axes(ax)

        return fig
process_predictions.py 文件源码 项目:traffic_detection_yolo2 作者: wAuner 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def create_heatmaps(img, pred):
    """
    Uses objectness probability to draw a heatmap on the image and returns it
    """
    # find anchors with highest prediction
    best_pred = np.max(pred[..., 0], axis=-1)
    # convert probabilities to colormap scale
    best_pred = np.uint8(best_pred * 255)
    # apply color map
    # cv2 colormaps create BGR, not RGB
    cmap = cv2.cvtColor(cv2.applyColorMap(best_pred, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)
    # resize the color map to fit image
    cmap = cv2.resize(cmap, img.shape[1::-1], interpolation=cv2.INTER_NEAREST)

    # overlay cmap with image
    return cv2.addWeighted(cmap, 1, img, 0.5, 0)
main.py 文件源码 项目:YOLO-Object-Detection-Tensorflow 作者: huseinzol05 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect(img):
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (settings.image_size, settings.image_size))
    inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) * 2.0 - 1.0
    inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
    result = detect_from_cvmat(inputs)[0]
    print result

    for i in range(len(result)):
        result[i][1] *= (1.0 * img_w / settings.image_size)
        result[i][2] *= (1.0 * img_h / settings.image_size)
        result[i][3] *= (1.0 * img_w / settings.image_size)
        result[i][4] *= (1.0 * img_h / settings.image_size)

    return result
camera_cv.py 文件源码 项目:PiWifiCam 作者: mark-orion 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
face_recognizer.py 文件源码 项目:image_recognition 作者: tue-robotics 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _get_representation(self, bgr_image):
        """
        Gets the vector of a face in the image
        :param bgr_image: The input image
        :return: The vector representation
        """
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

        bb = self._align.getLargestFaceBoundingBox(rgb_image)
        if bb is None:
            raise Exception("Unable to find a face in image")

        aligned_face = self._align.align(96, rgb_image, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        if aligned_face is None:
            raise Exception("Unable to align face bb image")

        return self._net.forward(aligned_face)
argumentation_utils.py 文件源码 项目:bot2017Fin 作者: AllanYiin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def equal_color(img: Image, color):
    arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
    boundaries = []
    boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
                       [min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
    for (lower, upper) in boundaries:
        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(arr_img, lower, upper)
        res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
        res = cv2.resize(res, (img.size[0], img.size[1]))
        cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(cv2_im)

        return output_img
opencv_utils.py 文件源码 项目:rekognition-video-utils 作者: awslabs 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
app2.py 文件源码 项目:ecs-mxnet-example 作者: awslabs 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def predict(url, mod, synsets):
     req = urllib2.urlopen(url)
     arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
     cv2_img = cv2.imdecode(arr, -1)
     img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
     if img is None:
         return None
     img = cv2.resize(img, (224, 224))
     img = np.swapaxes(img, 0, 2)
     img = np.swapaxes(img, 1, 2)
     img = img[np.newaxis, :]

     mod.forward(Batch([mx.nd.array(img)]))
     prob = mod.get_outputs()[0].asnumpy()
     prob = np.squeeze(prob)

     a = np.argsort(prob)[::-1]
     out = ''
     for i in a[0:5]:
         out += 'probability=%f, class=%s' %(prob[i], synsets[i])
     out += "\n"
     return out
datagen.py 文件源码 项目:hourglasstensorlfow 作者: wbenbihi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def open_img(self, name, color = 'RGB'):
        """ Open an image 
        Args:
            name    : Name of the sample
            color   : Color Mode (RGB/BGR/GRAY)
        """
        if name[-1] in self.letter:
            name = name[:-1]
        img = cv2.imread(os.path.join(self.img_dir, name))
        if color == 'RGB':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            return img
        elif color == 'BGR':
            return img
        elif color == 'GRAY':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            print('Color mode supported: RGB/BGR. If you need another mode do it yourself :p')
predictClass.py 文件源码 项目:hourglasstensorlfow 作者: wbenbihi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def detect(self, img):
        """ Method for Object Detection
        Args:
            img         : Input Image (BGR Image)
        Returns:
            result      : List of Bounding Boxes
        """
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))
        result = self.detect_from_cvmat(inputs)[0]
        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)
        return result
fcn_predict.py 文件源码 项目:fully-convolutional-network-semantic-segmentation 作者: alecng94 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def preprocessImg(imgPath, clipSize):
    if clipSize != 0:
        im = enhance(imgPath, clipSize)
    else:
        im = cv2.imread(imgPath)
        im = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))

    # switch to BGR, subtract mean
    in_ = np.array(im, dtype = np.float32)
    in_ = in_[:,:,::-1]
    in_ -= np.array((104.00698793,116.66876762,122.67891434))

    # make dims C x H x W for Caffe
    in_ = in_.transpose((2,0,1))

    return in_
detect.py 文件源码 项目:age-gender-classification 作者: yunsangq 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run(self, filename):
        img = cv2.imread(filename)
        self.h_img, self.w_img, _ = img.shape
        img_resized = cv2.resize(img, (448, 448))
        img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
        img_resized_np = np.asarray(img_RGB)
        inputs = np.zeros((1, 448, 448, 3), dtype='float32')
        inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
        in_dict = {self.x: inputs}
        net_output = self.sess.run(self.fc_19, feed_dict=in_dict)
        faces = self.interpret_output(net_output[0])
        images = []
        for i, (x, y, w, h, p) in enumerate(faces):
            images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))

        print('%d faces detected' % len(images))

        for (x, y, w, h, p) in faces:
            print('Face found [%d, %d, %d, %d] (%.2f)' % (x, y, w, h, p));
            self.draw_rect(img, x, y, w, h)
            # Fix in case nothing found in the image
        outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
        cv2.imwrite(outfile, img)
        return images, outfile
video_demo.py 文件源码 项目:single_shot_multibox_detector 作者: oarriaga 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def start_video(self, model):
        camera = cv2.VideoCapture(0)
        while True:
            frame = camera.read()[1]
            if frame is None:
                continue
            image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_array = cv2.resize(image_array, (300, 300))
            image_array = substract_mean(image_array)
            image_array = np.expand_dims(image_array, 0)
            predictions = model.predict(image_array)
            detections = detect(predictions, self.prior_boxes)
            plot_detections(detections, frame, 0.6,
                            self.arg_to_class, self.colors)
            cv2.imshow('webcam', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        camera.release()
        cv2.destroyAllWindows()
pytorch_datasets.py 文件源码 项目:single_shot_multibox_detector 作者: oarriaga 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def pull_item(self, index):
        img_id = self.ids[index]

        target = ET.parse(self._annopath % img_id).getroot()
        img = cv2.imread(self._imgpath % img_id)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # img = imread(self._imgpath % img_id)
        height, width, channels = img.shape

        if self.target_transform is not None:
            target = self.target_transform(target, width, height)

        if self.transform is not None:
            target = np.array(target)
            img, boxes, labels = self.transform(img, target[:, :4],
                                                target[:, 4])
            # to rgb
            img = img[:, :, (2, 1, 0)]
            # img = img.transpose(2, 0, 1)
            target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
        # i commented this uncomment for the pytorch_eval
        # return torch.from_numpy(img).permute(2, 0, 1), target, height, width
        return img, target, height, width
        # return torch.from_numpy(img), target, height, width # IDK WTF
pytorch_datasets.py 文件源码 项目:single_shot_multibox_detector 作者: oarriaga 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pull_image(self, index):
        '''Returns the original image object at index in PIL form

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to show
        Return:
            PIL img
        '''
        img_id = self.ids[index]
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        return img
        # return imread(self._imgpath % img_id)
VideoStream.py 文件源码 项目:dataArtist 作者: radjkarl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _grabImage(self):
        w = self.display.widget
        rval, img = self.vc.read()
        if rval:
            # COLOR
            if self.pGrayscale.value():
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            else:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            #img = cv2.transpose(img)
            if self.pFloat.value():
                img = toFloatArray(img)
            i = w.image
            b = self.pBuffer.value()
            if b:
                # BUFFER LAST N IMAGES
                if i is None or len(i) < b:
                    self.display.addLayer(data=img)
                else:
                    # TODO: implement as ring buffer using np.roll()
                    img = np.insert(i, 0, img, axis=0)
                    img = img[:self.pBuffer.value()]
                    w.setImage(img, autoRange=False, autoLevels=False)
            else:
                w.setImage(img, autoRange=False, autoLevels=False)
visual_mpc_server.py 文件源码 项目:visual_mpc 作者: febert 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def init_traj_visualmpc_handler(self, req):
        self.igrp = req.igrp
        self.i_traj = req.itr

        self.t = 0
        if 'use_goalimage' in self.policyparams:
            goal_main = self.bridge.imgmsg_to_cv2(req.goalmain)
            goal_main = cv2.cvtColor(goal_main, cv2.COLOR_BGR2RGB)
            # goal_aux1 = self.bridge.imgmsg_to_cv2(req.goalaux1)
            # goal_aux1 = cv2.cvtColor(goal_aux1, cv2.COLOR_BGR2RGB)
            Image.fromarray(goal_main).show()
            goal_main = goal_main.astype(np.float32) / 255.
            self.cem_controller.goal_image = goal_main

        print 'init traj{} group{}'.format(self.i_traj, self.igrp)

        if 'ndesig' in self.policyparams:
            self.initial_pix_distrib = []
        else:
            self.initial_pix_distrib1 = []
            self.initial_pix_distrib2 = []

        self.cem_controller = CEM_controller(self.agentparams, self.policyparams, self.predictor, save_subdir=req.save_subdir)
        self.save_subdir = req.save_subdir
        return init_traj_visualmpcResponse()
robot_recorder.py 文件源码 项目:visual_mpc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def save(self, i_save, action, endeffector_pose):
        self.t_savereq = rospy.get_time()
        assert self.instance_type == 'main'

        if self.use_aux:
            # request save at auxiliary recorders
            try:
                rospy.wait_for_service('get_kinectdata', 0.1)
                resp1 = self.save_kinectdata_func(i_save)
            except (rospy.ServiceException, rospy.ROSException), e:
                rospy.logerr("Service call failed: %s" % (e,))
                raise ValueError('get_kinectdata service failed')

        if self.save_images:
            self._save_img_local(i_save)

        if self.save_actions:
            self._save_state_actions(i_save, action, endeffector_pose)

        if self.save_gif:
            highres = cv2.cvtColor(self.ltob.img_cv2, cv2.COLOR_BGR2RGB)
            print 'highres dim',highres.shape
            self.highres_imglist.append(highres)
emotion.py 文件源码 项目:projectoxford 作者: zooba 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _renderResultOnImage(self, result, arr):
        """
            Draws boxes and text representing each face's emotion.
        """

        import operator, cv2

        img = cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            cv2.rectangle(img,(faceRectangle['left'],faceRectangle['top']),
                               (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                               color = (255,0,0), thickness = 5)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            currEmotion = max(iter(currFace['scores'].items()), key=operator.itemgetter(1))[0]

            textToWrite = '{0}'.format(currEmotion)
            cv2.putText(img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1)

        return img
video.py 文件源码 项目:vbcg 作者: nspi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_frame(self):
        """This function delivers frames from the camera or the hard disk for the GUI

            Returns:
            status -- False if user has not pressed ''start'' button. If pressed, returns True
            frame -- A black frame is the user has not pressed ''start'' button. Otherwise frame from camera or disk
        """

        # Waiting for the user to press the ''start'' button
        if self.eventVideoReady.is_set():

                # Read current frame from thread
                frame = self.currentFrame

                # Convert color to RGB
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                # Return status and frame
                return True, frame

        else:
            # Return false as status and black frame
            return False, np.zeros((480, 640, 3), np.uint8)
process_data.py 文件源码 项目:behavioral-cloning 作者: BillZito 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def change_brightness(img_arr):
  # print('change brightness called')
  adjusted_imgs = np.array([img_arr[0]])
  for img_num in range(0, len(img_arr)):
    img = img_arr[img_num]
    # print('array access')
    # show_image(img)
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) 
    # print('rgb2hsv')
    # show_image(hsv)
    rando = np.random.uniform()
    # print('rando is', rando)
    hsv[:,:, 2] = hsv[:,:, 2] * (.25 + rando)

    new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
    # print('hsv2rgb')
    # show_image(new_img)
    # new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
    # show_images(img.reshape((1,) + img.shape), new_img.reshape((1,) + new_img.shape))
    adjusted_imgs = np.append(adjusted_imgs, new_img.reshape((1,) + new_img.shape), axis=0)

  adjusted_imgs = np.delete(adjusted_imgs, 0, 0)
  return adjusted_imgs
Camera.py 文件源码 项目:MyoSEMG 作者: LuffyDai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def run(self):

        while True:
            if self.flag:
                ret, image = self.camera.read()
                if image is None:
                    break
                color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                height, width, _ = color_swapped_image.shape

                qt_image = QImage(color_swapped_image.data,
                                  width,
                                  height,
                                  color_swapped_image.strides[0],
                                  QImage.Format_RGB888)
                pixmap = QPixmap(qt_image)
                pixmap = pixmap.scaled(self.videoLabel.geometry().width(), self.videoLabel.geometry().height())
                if self.start_flag and self.support_flag:
                    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
                    self.path = "appdata/" + self.cap.guide.dataset_type + "/data/" + self.cap.date_str + "-" + str(
                        self.cap.guide.gesture_type) + ".avi"
                    self.out = cv2.VideoWriter(self.path, fourcc, 20.0, (640, 480))
                    self.support_flag = False
                if self.name == "Camera" and self.out is not None:
                    self.image_siganl.emit(image)
                self.videoLabel.setPixmap(pixmap)
                if self.name == "Video":
                    time.sleep(1/self.fps)
            else:
                pass
test_utils.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_get_palette_min_values(self):
        image = utils.Image.get_images([self.image_clean])[0].image
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        options = namedtuple(
            'options',
            ['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']
        )(
            quiet=True,
            sample_fraction=.01,
            value_threshold=.01,
            sat_threshold=.01,
        )
        samples = noteshrink.sample_pixels(rgb_image, options)
        palette = utils.get_palette(samples, 2, background_value=1,
                                    background_saturation=1)
        test_palette = np.array([[255, 123, 92], [193, 86, 64]])
        assert palette.shape <= test_palette.shape
        # background colors must coincide
        assert np.array_equal(palette[0], test_palette[0])
test_utils.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_get_palette_max_values(self):
        image = utils.Image.get_images([self.image_clean])[0].image
        rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        options = namedtuple(
            'options',
            ['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']
        )(
            quiet=True,
            sample_fraction=1,
            value_threshold=1,
            sat_threshold=1,
        )
        samples = noteshrink.sample_pixels(rgb_image, options)
        palette = utils.get_palette(samples, 128, background_value=100,
                                    background_saturation=100)
        background_color = np.array([255, 123, 92])
        assert palette.shape <= (128, 3)
        # background colors must coincide
        assert np.array_equal(palette[0], background_color)


问题


面经


文章

微信
公众号

扫码关注公众号