python类IMREAD_COLOR的实例源码

Imagehandler.py 文件源码 项目:QRCodeReader 作者: Griffintaur 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __convertImagetoBlackWhite(self):
        self.Image = cv.imread(self.ImagePath, cv.IMREAD_COLOR)
        self.imageOriginal = self.Image
        if self.Image is None:
            print 'some problem with the image'
        else:
            print 'Image Loaded'

        self.Image = cv.cvtColor(self.Image, cv.COLOR_BGR2GRAY)
        self.Image = cv.adaptiveThreshold(
            self.Image,
            255,                    # Value to assign
            cv.ADAPTIVE_THRESH_MEAN_C,# Mean threshold
            cv.THRESH_BINARY,
            11,                     # Block size of small area
            2,                      # Const to substract
        )

        return self.Image
tlight_node.py 文件源码 项目:yolo_light 作者: chrisgundling 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def updateImage(self, img):
        arr = self.bridge.imgmsg_to_cv2(img,"bgr8") 
        # Uncomment following two lines for CompressedImage topic
        #np_arr = np.fromstring(img.data, np.uint8)
        #arr = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        if self.image_lock.acquire(True):
            self.img = arr
            if self.model is None:
                self.model = self.get_model()
            self.img_out, self.boxes = self.predict(self.model, self.img)
            self.img_out = np.asarray(self.img_out[0,:,:,:])
            for box in self.boxes:
                if 'traffic light' in box['label']:
                    cv2.rectangle(self.img_out,(box['topleft']['x'], 
                                                box['topleft']['y']), 
                                                (box['bottomright']['x'], 
                                                box['bottomright']['y']), 
                                                (255,0,0), 6)
                    cv2.putText(self.img_out, box['label'], 
                               (box['topleft']['x'], 
                               box['topleft']['y'] - 12), 0, 0.6, (255,0,0) ,6//3)

            print(self.img_out.shape)
            self.image_lock.release()
cgan.py 文件源码 项目:shenlan 作者: vector-1127 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_data(datadir):
    #datadir = args.data
    # assume each image is 512x256 split to left and right
    imgs = glob.glob(os.path.join(datadir, '*.jpg'))
    data_X = np.zeros((len(imgs),3,img_cols,img_rows))
    data_Y = np.zeros((len(imgs),3,img_cols,img_rows))  
    i = 0
    for file in imgs:
        img = cv2.imread(file,cv2.IMREAD_COLOR)
        img = cv2.resize(img, (img_cols*2, img_rows)) 
        #print('{} {},{}'.format(i,np.shape(img)[0],np.shape(img)[1]))
        img = np.swapaxes(img,0,2)

        X, Y = split_input(img)

        data_X[i,:,:,:] = X
        data_Y[i,:,:,:] = Y
        i = i+1
    return data_X, data_Y
goal_test.py 文件源码 项目:Vision2016 作者: Team3309 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def check_image(name):
    expected_data = json.loads(open('./img/' + name + '.json').read())
    if not expected_data['enabled']:
        return

    expected_targets = expected_data['targets']

    img = cv2.imread('./img/' + name + '.jpg', cv2.IMREAD_COLOR)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    args = config.copy()
    args['img'] = hsv
    args['output_images'] = {}

    actual_targets = find(**args)

    # make sure same number of targets are detected
    assert len(expected_targets) == len(actual_targets)

    # targets is a list of 2-tuples with expected and actual results
    targets = zip(expected_targets, actual_targets)
    # compare all the different features of targets to make sure they match
    for pair in targets:
        expected, actual = pair
        # make sure that the targets are close to where they are supposed to be
        assert is_close(expected['pos']['x'], actual['pos']['x'], 0.02)
        assert is_close(expected['pos']['y'], actual['pos']['y'], 0.02)
        # make sure that the targets are close to the size they are supposed to be
        assert is_close(expected['size']['width'], actual['size']['width'], 0.02)
        assert is_close(expected['size']['height'], actual['size']['height'], 0.02)
silverhair.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_example(self, i):
        np.random.seed(None)
        idA = self.trainAkey[np.random.randint(0,len(self.trainAkey))]
        idB = self.trainBkey[np.random.randint(0,len(self.trainBkey))]
        #print(idA)

        imgA = cv2.imread(idA, cv2.IMREAD_COLOR)
        imgB = cv2.imread(idB, cv2.IMREAD_COLOR)

        imgA = self.do_augmentation(imgA)
        imgB = self.do_augmentation(imgB)

        imgA = self.preprocess_image(imgA)
        imgB = self.preprocess_image(imgB)

        return imgA, imgB
sbd_instance_segmentation_dataset.py 文件源码 项目:chainer-fcis 作者: knorth55 项目源码 文件源码 阅读 94 收藏 0 点赞 0 评论 0
def _load_data(self, data_id):
        imgpath = osp.join(
            self.data_dir, 'img/{}.jpg'.format(data_id))
        seg_imgpath = osp.join(
            self.data_dir, 'cls/{}.mat'.format(data_id))
        ins_imgpath = osp.join(
            self.data_dir, 'inst/{}.mat'.format(data_id))
        img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
        img = img.transpose((2, 0, 1))
        mat = scipy.io.loadmat(seg_imgpath)
        seg_img = mat['GTcls'][0]['Segmentation'][0].astype(np.int32)
        seg_img = np.array(seg_img, dtype=np.int32)
        seg_img[seg_img == 255] = -1
        mat = scipy.io.loadmat(ins_imgpath)
        ins_img = mat['GTinst'][0]['Segmentation'][0].astype(np.int32)
        ins_img[ins_img == 255] = -1
        ins_img[np.isin(seg_img, [-1, 0])] = -1
        return img, seg_img, ins_img
solve.py 文件源码 项目:brick-pop-solver 作者: LINKIWI 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_board(board_image_file_name):
    """
    Parse the input board screenshot into a Board object.

    :param board_image_file_name: Path to the screenshot of the board.
    :return: A Board instance representing the input board.
    """
    img = cv2.imread(board_image_file_name, cv2.IMREAD_COLOR)

    coordinate_map = {}
    for i in range(10):
        for j in range(10):
            pixel_i = IMAGE_BLOCK_START_I + i * IMAGE_BLOCK_OFFSET
            pixel_j = IMAGE_BLOCK_START_J + j * IMAGE_BLOCK_OFFSET
            bgr = img[pixel_i][pixel_j]
            color_code = struct.pack('BBB', *bgr).encode('hex')
            if color_code == 'e4eff7':
                coordinate_map[Coordinate(i, j)] = EmptyColor()
            else:
                coordinate_map[Coordinate(i, j)] = Color(color_code)

    return Board.from_coordinate_map(coordinate_map)
page.py 文件源码 项目:skastic 作者: mypalmike 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def load(self, filename, analyze_only):
    # Load image, then do various conversions and thresholding.
    self.img_orig = cv2.imread(filename, cv2.IMREAD_COLOR)

    if self.img_orig is None:
      raise CompilerException("File '{}' not found".format(filename))

    self.img_grey = cv2.cvtColor(self.img_orig, cv2.COLOR_BGR2GRAY)
    _, self.img_contour = cv2.threshold(self.img_grey, 250, 255, cv2.THRESH_BINARY_INV)
    _, self.img_text = cv2.threshold(self.img_grey, 150, 255, cv2.THRESH_BINARY)
    self.root_node = None

    self.contours = self.find_contours()

    self.contour_lines, self.contour_nodes = self.categorize_contours()

    self.build_graph()
    self.build_parse_tree()

    self.parse_nodes()

    if not analyze_only:
      self.python_ast = self.root_node.to_python_ast()
data_feeder.py 文件源码 项目:tf-lcnn 作者: ildoonet 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_data(self):
        idxs = np.arange(len(self.train_list))
        if self.shuffle:
            self.rng.shuffle(idxs)

        caches = {}
        for i, k in enumerate(idxs):
            path = self.train_list[k]
            label = self.lb_list[k]

            if i % self.preload == 0:
                try:
                    caches = ILSVRCTenth._read_tenth_batch(self.train_list[idxs[i:i+self.preload]])
                except Exception as e:
                    logging.warning('tenth local cache failed, err=%s' % str(e))

            content = caches.get(path, '')
            if not content:
                content = ILSVRCTenth._read_tenth(path)

            img = cv2.imdecode(np.fromstring(content, dtype=np.uint8), cv2.IMREAD_COLOR)
            yield [img, label]
buildsequence.py 文件源码 项目:illumeme 作者: josmcg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build(dirPath,filename = None):
    if filename is None:
        spook = Illumify(getVapor())
        filename = spook.getFilename()
        spooky_x, spooky_y = spook.generate()
    else:
        spook = Illumify(filename)
        filename = spook.getFilename()
        spooky_x, spooky_y = spook.generate()
    dst = copy = img = cv2.imread(filename, cv2.IMREAD_COLOR)
    orig_h, orig_w = img.shape[:2]
    #cv2.imwrite("image005."+filename.split(".")[1],dst)
    h_o,w_o = copy.shape[:2]
    step_h = h_o/5
    step_w = w_o/5
    for i in range(1,5):
        h = h_o - step_h*i
        w = w_o - step_w*i
        crop_image = img[spooky_x:spooky_x+h, spooky_y:spooky_y+h]
        dst = cv2.resize(crop_image,(orig_w,orig_h))
        cv2.imwrite(dirPath+"/image00" + str(i) +"."+get_filetype(filename),dst)
app.py 文件源码 项目:blcf 作者: willard-yuan 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def upload():
    # Get the name of the uploaded file
    file = request.files['file']
    # Check if the file is one of the allowed types/extensions
    if file and allowed_file(file.filename):
        # Make the filename safe, remove unsupported chars
        filename = secure_filename(file.filename)
        # Move the file form the temporal folder to
        # the upload folder we setup
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        # Redirect the user to the uploaded_file route, which
        # will basicaly show on the browser the uploaded file
        # CV2
        #img_np = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_UNCHANGED) # cv2.IMREAD_COLOR in OpenCV 3.1
        img_np = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename), -1)
        cv2.imshow("Image", img_np)
        return redirect(url_for('uploaded_file',
                                filename=filename))

# This route is expecting a parameter containing the name
# of a file. Then it will locate that file on the upload
# directory and show it on the browser, so if the user uploads
# an image, that image is going to be show after the upload
pytorch_datasets.py 文件源码 项目:single_shot_multibox_detector 作者: oarriaga 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def pull_image(self, index):
        '''Returns the original image object at index in PIL form

        Note: not using self.__getitem__(), as any transformations passed in
        could mess up this functionality.

        Argument:
            index (int): index of img to show
        Return:
            PIL img
        '''
        img_id = self.ids[index]
        img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        return img
        # return imread(self._imgpath % img_id)
__init__.py 文件源码 项目:Pytorch-Deeplab 作者: speedinghzl 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        datafiles = self.files[index]

        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]

        image = np.asarray(image, np.float32)
        image -= self.mean

        img_h, img_w, _ = image.shape

        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))

        image = image.transpose((2, 0, 1))

        return image, name, size
ucf101Loader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def hookTrainData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, label_list = [], [], []
        for idx in sampleIdxs:
            classList = self.trainDict[idx]
            img_pair = classList[np.random.choice(self.trainLenClass[idx], 1)]
            prev_img = img_pair[0]
            next_img = img_pair[1]
            label = idx
            # print prev_img, next_img, label
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            source_list.append(np.expand_dims(cv2.resize(source, (self.image_size[1], self.image_size[0])), 0))
            target_list.append(np.expand_dims(cv2.resize(target, (self.image_size[1], self.image_size[0])) ,0))
            label_list.append(np.expand_dims(label, 0))
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(label_list, axis=0)
        # Adding the channel dimension if images are read in grayscale
        # return np.expand_dims(source_list, axis = 3), np.expand_dims(target_list, axis = 3)
sintelLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def hookTrainData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        input_list, flow_list = [], []
        for idx in sampleIdxs:
            img_list = self.trainList[idx]
            multi_input = []
            multi_flow = []
            for time_idx in xrange(self.time_step):
                imgData = cv2.imread(os.path.join(self.img_path, img_list[time_idx]), cv2.IMREAD_COLOR)
                multi_input.append(np.expand_dims(cv2.resize(imgData, (self.image_size[1], self.image_size[0])), 0))
                # We have self.time_step images, but self.time_step - 1 flows.
                if time_idx != self.time_step - 1:
                    flow = utils.readFlow(os.path.join(self.data_path, 'training', "flow", (img_list[time_idx][:-4] + ".flo")))
                    multi_flow.append(np.expand_dims(flow, 0))
            input_list.append(np.concatenate(multi_input, axis=3))
            flow_list.append(np.concatenate(multi_flow, axis=3))
        return np.concatenate(input_list, axis=0), np.concatenate(flow_list, axis=0)
sintelLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def hookValData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        input_list, flow_list = [], []
        for idx in sampleIdxs:
            img_list = self.valList[idx]
            multi_input = []
            multi_flow = []
            for time_idx in xrange(self.time_step):
                imgData = cv2.imread(os.path.join(self.img_path, img_list[time_idx]), cv2.IMREAD_COLOR)
                multi_input.append(np.expand_dims(cv2.resize(imgData, (self.image_size[1], self.image_size[0])), 0))
                # We have self.time_step images, but self.time_step - 1 flows.
                if time_idx != self.time_step - 1:
                    flow = utils.readFlow(os.path.join(self.data_path, 'training', "flow", (img_list[time_idx][:-4] + ".flo")))
                    multi_flow.append(np.expand_dims(flow, 0))
            input_list.append(np.concatenate(multi_input, axis=3))
            flow_list.append(np.concatenate(multi_flow, axis=3))
        return np.concatenate(input_list, axis=0), np.concatenate(flow_list, axis=0)
sintelLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculateMean(self):
        numSamples = len(self.trainList)
        # OpenCV loads image as BGR order
        B, G, R = 0, 0, 0
        for idx in xrange(numSamples):
            frameID = self.trainList[idx]
            prev_img = frameID[0]
            next_img = frameID[1]
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            B += np.mean(source[:,:,0], axis=None)
            B += np.mean(target[:,:,0], axis=None)
            G += np.mean(source[:,:,1], axis=None)
            G += np.mean(target[:,:,1], axis=None)
            R += np.mean(source[:,:,2], axis=None)
            R += np.mean(target[:,:,2], axis=None)
        B = B / (2*numSamples)
        G = G / (2*numSamples)
        R = R / (2*numSamples)
        return (B,G,R)
flyingChairsLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def calculateMean(self):
        numSamples = len(self.trainList)
        # OpenCV loads image as BGR order
        B, G, R = 0, 0, 0
        for idx in xrange(numSamples):
            frameID = self.trainList[idx]
            prev_img = frameID + "_img1.ppm"
            next_img = frameID + "_img2.ppm"
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            B += np.mean(source[:,:,0], axis=None)
            B += np.mean(target[:,:,0], axis=None)
            G += np.mean(source[:,:,1], axis=None)
            G += np.mean(target[:,:,1], axis=None)
            R += np.mean(source[:,:,2], axis=None)
            R += np.mean(target[:,:,2], axis=None)
        B = B / (2*numSamples)
        G = G / (2*numSamples)
        R = R / (2*numSamples)
        return (B,G,R)
ucf101Loader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def hookTrainData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, label_list = [], [], []
        for idx in sampleIdxs:
            classList = self.trainDict[idx]
            img_pair = classList[np.random.choice(self.trainLenClass[idx], 1)]
            prev_img = img_pair[0]
            next_img = img_pair[1]
            label = idx
            # print prev_img, next_img, label
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            if self.is_crop:
                source = cv2.resize(source, (self.crop_size[1], self.crop_size[0]))
                target = cv2.resize(target, (self.crop_size[1], self.crop_size[0]))
            source_list.append(np.expand_dims(source, 0))
            target_list.append(np.expand_dims(target, 0))
            label_list.append(np.expand_dims(label, 0))
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(label_list, axis=0)
ucf101Loader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def hookValData(self, sampleIdxs, classID):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, label_list = [], [], []
        for idx in sampleIdxs:
            img_pair = self.testDict[classID][idx]
            prev_img = img_pair[0]
            next_img = img_pair[1]
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            if self.is_crop:
                source = cv2.resize(source, (self.crop_size[1], self.crop_size[0]))
                target = cv2.resize(target, (self.crop_size[1], self.crop_size[0]))
            source_list.append(np.expand_dims(source, 0))
            target_list.append(np.expand_dims(target, 0))
            label_list.append(np.expand_dims(classID, 0))
            # print prev_img, next_img, classID
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(label_list, axis=0)
sintelLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def hookTrainData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, flow_gt = [], [], []
        for idx in sampleIdxs:
            img_pair = self.trainList[idx]
            prev_img = img_pair[0]
            next_img = img_pair[1]
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            flow = utils.readFlow(os.path.join(self.data_path, 'training', "flow", (prev_img[:-4] + ".flo")))
            if self.is_crop:
                source = cv2.resize(source, (self.crop_size[1], self.crop_size[0]))
                target = cv2.resize(target, (self.crop_size[1], self.crop_size[0]))
            source_list.append(np.expand_dims(source, 0))
            target_list.append(np.expand_dims(target, 0))
            flow_gt.append(np.expand_dims(flow, 0))
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(flow_gt, axis=0)
sintelLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def calculateMean(self):
        numSamples = len(self.trainList)
        # OpenCV loads image as BGR order
        B, G, R = 0, 0, 0
        for idx in xrange(numSamples):
            frameID = self.trainList[idx]
            prev_img = frameID[0]
            next_img = frameID[1]
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            B += np.mean(source[:,:,0], axis=None)
            B += np.mean(target[:,:,0], axis=None)
            G += np.mean(source[:,:,1], axis=None)
            G += np.mean(target[:,:,1], axis=None)
            R += np.mean(source[:,:,2], axis=None)
            R += np.mean(target[:,:,2], axis=None)
        B = B / (2*numSamples)
        G = G / (2*numSamples)
        R = R / (2*numSamples)
        return (B,G,R)
flyingChairsLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def hookTrainData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, flow_gt = [], [], []
        for idx in sampleIdxs:
            frameID = self.trainList[idx]
            prev_img = frameID + "_img1.ppm"
            next_img = frameID + "_img2.ppm"
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            # print source.shape
            flow = utils.readFlow(os.path.join(self.img_path, (frameID + "_flow.flo")))
            # print flow.shape
            if self.is_crop:
                source = cv2.resize(source, (self.crop_size[1], self.crop_size[0]))
                target = cv2.resize(target, (self.crop_size[1], self.crop_size[0]))
            source_list.append(np.expand_dims(source, 0))
            target_list.append(np.expand_dims(target ,0))
            flow_gt.append(np.expand_dims(flow, 0))
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(flow_gt, axis=0)
flyingChairsLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def hookValData(self, sampleIdxs):
        assert len(sampleIdxs) > 0, 'we need a non-empty batch list'
        source_list, target_list, flow_gt = [], [], []
        for idx in sampleIdxs:
            frameID = self.valList[idx]
            prev_img = frameID + "_img1.ppm"
            next_img = frameID + "_img2.ppm"
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            flow = utils.readFlow(os.path.join(self.img_path, (frameID + "_flow.flo")))
            if self.is_crop:
                source = cv2.resize(source, (self.crop_size[1], self.crop_size[0]))
                target = cv2.resize(target, (self.crop_size[1], self.crop_size[0]))
            source_list.append(np.expand_dims(source, 0))
            target_list.append(np.expand_dims(target ,0))
            flow_gt.append(np.expand_dims(flow, 0))
        return np.concatenate(source_list, axis=0), np.concatenate(target_list, axis=0), np.concatenate(flow_gt, axis=0)
flyingChairsLoader.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def calculateMean(self):
        numSamples = self.trainNum
        # OpenCV loads image as BGR order
        B, G, R = 0, 0, 0
        for idx in xrange(numSamples):
            frameID = self.trainList[idx]
            prev_img = frameID + "_img1.ppm"
            next_img = frameID + "_img2.ppm"
            source = cv2.imread(os.path.join(self.img_path, prev_img), cv2.IMREAD_COLOR)
            target = cv2.imread(os.path.join(self.img_path, next_img), cv2.IMREAD_COLOR)
            B += np.mean(source[:,:,0], axis=None)
            B += np.mean(target[:,:,0], axis=None)
            G += np.mean(source[:,:,1], axis=None)
            G += np.mean(target[:,:,1], axis=None)
            R += np.mean(source[:,:,2], axis=None)
            R += np.mean(target[:,:,2], axis=None)
        B = B / (2*numSamples)
        G = G / (2*numSamples)
        R = R / (2*numSamples)
        return (B,G,R)
main.py 文件源码 项目:Deep_Learning_In_Action 作者: iFighting 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __iter__(self):
        for k in range(self.count / self.batch_size):
            data = []
            label = []
            for i in range(self.batch_size):
                num = gen_rand()
                img = self.captcha.generate(num)
                img = np.fromstring(img.getvalue(), dtype='uint8')
                img = cv2.imdecode(img, cv2.IMREAD_COLOR)
                img = cv2.resize(img, (self.width, self.height))
                cv2.imwrite("./tmp" + str(i % 10) + ".png", img)
                img = np.multiply(img, 1/255.0)
                img = img.transpose(2, 0, 1)
                data.append(img)
                label.append(get_label(num))

            data_all = [mx.nd.array(data)]
            label_all = [mx.nd.array(label)]
            data_names = ['data']
            label_names = ['softmax_label']

            data_batch = OCRBatch(data_names, data_all, label_names, label_all)
            yield data_batch
utils.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, content=None, image=None):
        self.image = None
        self.format = None
        if isinstance(image, Image):
            self.image = image.image
            self.format = image.format
        elif image is not None:
            self.image = image
        elif content:
            image_format = imghdr.what(file='', h=content)
            if image_format is not None:
                image_array = np.fromstring(content, np.uint8)
                self.image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
                self.format = image_format
        if self.image is None:
            raise click.BadParameter('Image format not supported')
views.py 文件源码 项目:opencv-api 作者: last-stand 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _grab_image(path=None, stream=None, url=None):
    # if the path is not None, then load the image from disk
    if path is not None:
        image = cv2.imread(path)

    # otherwise, the image does not reside on disk
    else:
        # if the URL is not None, then download the image
        if url is not None:
            resp = urllib.urlopen(url)
            data = resp.read()

        # if the stream is not None, then the image has been uploaded
        elif stream is not None:
            data = stream.read()

        # convert the image to a NumPy array and then read it into
        # OpenCV format
        image = np.asarray(bytearray(data), dtype="uint8")
        image = cv2.imdecode(image, cv2.IMREAD_COLOR)

    # return the image
    return image
cnn_db_loader.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_mean_image_path(self):
        if not os.path.exists(self.outputfolder+'/'+self.mean_image_filename):
            print('no mean image found. Creating...')
            isomap_size = cv2.imread(self.examples_train[0].images[0], cv2.IMREAD_COLOR).shape[0]
            mean = np.zeros([isomap_size, isomap_size, 3], dtype='float32')

            for example in self.examples_train:#
                try:
                    mean+=cv2.imread(example.images[0],cv2.IMREAD_COLOR).astype(dtype='float32')/len(self.examples_train)
                except:
                    e = sys.exc_info()[0]
                    print (str(e))
                    print ('image', example.images[0])
                    exit(0)
            #mean/=len(self.images_train)
            mean_uint8 = mean.astype(dtype='uint8')
            cv2.imwrite(self.outputfolder+'/'+self.mean_image_filename, mean_uint8)
        return self.outputfolder+'/'+self.mean_image_filename
augmentImages.py 文件源码 项目:cancer_nn 作者: tanmoyopenroot 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def augmentImages(train_or_valid, image_dir, img_save_dir, save_file):
    if train_or_valid == "train":
        # Training
        print("Augment Training Data")
    else:

        # Validation
        print("Augment Validation Data")
    image_set = glob.glob(image_dir + "*.jpg")
    aug_no = 16
    image_len = len(image_set)

    for index, img in enumerate(image_set):
        img_name = img.split("/")[-1] 
        x = cv2.imread(img, cv2.IMREAD_COLOR)
        # print x.shape
        print("Augmenting Image : {0} / {1} - {2}".format(index, image_len, img_name))
        augment(x, aug_no, img_save_dir, img_name)


问题


面经


文章

微信
公众号

扫码关注公众号