python类imread()的实例源码

color.py 文件源码 项目:BilibiliDraw 作者: TotoriKira 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("rwby.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
minibatch.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _generate_batch(self, meta):
    image = ndimage.imread(meta.image_path)
    height, width, _ = meta.shape
    if height > width:
      scale = self._image_scale_size / width
    else:
      scale = self._image_scale_size / height

    # TODO: the dimensions in caffe is (batch elem, channel, height, width)
    resized_image = ndimage.zoom(image, (scale, scale, 1))
    bboxes = np.empty((len(meta.objects), 5))
    for i, obj in enumerate(meta.objects):
      bboxes[i][:4] = obj['bbox']
      bboxes[i][4] = obj['class_index']

    return np.expand_dims(resized_image, 0), scale, bboxes
image_reader.py 文件源码 项目:deeplab_v1_tf1.0 作者: automan000 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def read_labeled_image_list(data_dir, data_list):
    """Reads txt file containing paths to images and ground truth masks.

    Args:
      data_dir: path to the directory with images and masks.
      data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.

    Returns:
      Two lists with all file names for images and masks, respectively.
    """
    f = open(data_list, 'r')
    images = []
    masks = []
    shape = []

    for line in f:
        image, mask = line.strip("\n").split(' ')
        images.append(data_dir + image)
        shape.append(ndimage.imread(data_dir + image).shape[:2])
        masks.append(data_dir + mask)
    return images, masks, shape
HornSchunck.py 文件源码 项目:pyoptflow 作者: scivision 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def demo(stem):
    flist = getimgfiles(stem)
    ext = flist[0].suffix

    for i in range(len(flist)-1):
        fn1 = f'{stem}.{i}{ext}'
        im1 = imread(fn1,flatten=True).astype(float)  #flatten=True is rgb2gray
 #       Iold = gaussian_filter(Iold,FILTER)

        fn2 = f'{stem}.{i+1}{ext}'
        im2 = imread(fn2,flatten=True).astype(float)
#        Inew = gaussian_filter(Inew,FILTER)

        U,V = HornSchunck(im1, im2, 1., 100)
        compareGraphs(U,V, im2)

    return U,V
LucasKanade.py 文件源码 项目:pyoptflow 作者: scivision 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def demo(stem, kernel=5,Nfilter=7):
    flist = getimgfiles(stem)
    ext = flist[0].suffix
#%% priming read
    im1 = imread(f'{stem}.0{ext}', flatten=True)
    Y,X = im1.shape
#%% evaluate the first frame's POI
    POI = getPOI(X,Y,kernel)
#% get the weights
    W = gaussianWeight(kernel)
#%% loop over all images in directory
    for i in range(1,len(flist)):
        im2 = imread(f'{stem}.{i}{ext}', flatten=True)
        im2 = gaussian_filter(im2, Nfilter)

        V = LucasKanade(im1, im2, POI, W, kernel)

        compareGraphsLK(im1, im2, POI, V)

        im1 = im2
download_data.py 文件源码 项目:arc-pytorch 作者: sanyam5 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def omniglot_folder_to_NDarray(path_im):
    alphbts = os.listdir(path_im)
    ALL_IMGS = []

    for alphbt in alphbts:
        chars = os.listdir(os.path.join(path_im, alphbt))
        for char in chars:
            img_filenames = os.listdir(os.path.join(path_im, alphbt, char))
            char_imgs = []
            for img_fn in img_filenames:
                fn = os.path.join(path_im, alphbt, char, img_fn)
                I = imread(fn)
                I = np.invert(I)
                char_imgs.append(I)
            ALL_IMGS.append(char_imgs)

    return np.array(ALL_IMGS)
demo_classification.py 文件源码 项目:One-Shot-Learning-Demo 作者: llSourcell 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def load_img_as_points(filename):
    # Load image file and return coordinates of black pixels in the binary image
    #
    # Input
    #  filename : string, absolute path to image
    #
    # Output:
    #  D : [n x 2] rows are coordinates
    #
    I = imread(filename, flatten=True)
    # Convert to boolean array and invert the pixel values
    I = ~np.array(I, dtype=np.bool)
    # Create a new array of all the non-zero element coordinates
    D = np.array(I.nonzero()).T
    return D - D.mean(axis=0)


# Main function
model_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_rgbd_file(self, dirname, offset):
        associations = self.seq_dir_map[dirname]['associations']

        if associations[offset, 1].startswith('depth'):
            rgb_filename = os.path.join(dirname, associations[offset, 3])
            depth_filename = os.path.join(dirname, associations[offset, 1])
        else:
            rgb_filename = os.path.join(dirname, associations[offset, 1])
            depth_filename = os.path.join(dirname, associations[offset, 3])

        rgb_img = ndimage.imread(rgb_filename)
        depth_img = ndimage.imread(depth_filename)
        width = height = 224

        # Reshape
        depth_img = np.reshape(depth_img, list(depth_img.shape) + [1])
        depth_img = 255 * depth_img / np.max(depth_img)

        rgbd_img = np.concatenate((rgb_img, depth_img), 2)

        # Resize
        rgbd_img = transform.resize(rgbd_img, [width, height], preserve_range=True)

        return rgb_filename, depth_filename, rgbd_img.astype(np.float32)
model_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def read_rgb_image(filepath):
    rgb_img = ndimage.imread(filepath)
    width = height = 224
    img_width = rgb_img.shape[1]
    img_height = rgb_img.shape[0]

    # scale such that smaller dimension is 256
    if img_width < img_height:
        factor = 256.0 / img_width
    else:
        factor = 256.0 / img_height
    rgb_img = transform.rescale(rgb_img, factor, preserve_range=True)

    # crop randomly
    width_start = np.random.randint(0, rgb_img.shape[1] - width)
    height_start = np.random.randint(0, rgb_img.shape[0] - height)

    rgb_img = rgb_img[height_start:height_start + height, width_start:width_start + width]
    return rgb_img
inputdata.py 文件源码 项目:cnn-bnn 作者: jpdz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def rotate_save(path):
  filepath = os.path.join(os.getcwd(),path)
  images_files = os.listdir(filepath)
  num_img = 0
  for i,image in enumerate(images_files):
    image_file =  os.path.join(filepath,image)
    try:
      img = ndimage.imread(image_file)
      img = img[:,:,0]
      new_im = img.fromarray(img)
      for j in range(0,360,72):
        im = new_im.rotate(j)
        filename = "%s%05d.png"%(path,num_img)
        im.save(filename)
        num_img+=1
    except Exception as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
image_handler.py 文件源码 项目:harpreif 作者: harpribot 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        sys.stderr.write('Loaded Image #' + str(self.image_ptr) + ' ...\n')
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.__break_into_jigzaw_pieces()
        self.image_ptr += 1
        self.tries = 1

        return True
image_loader.py 文件源码 项目:harpreif 作者: harpribot 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        print 'Loaded New Image'
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        self.image_name = self.image_list[self.image_ptr]

        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.image_ptr += 1

        return True
kitti.py 文件源码 项目:superpixelDepth 作者: slundqui 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def getMeanVar(self):
        depths = None
        for f in self.depthFiles:
            depthImg = imread(f).astype(np.float32)/256
            validDepths = depthImg[np.nonzero(depthImg != 0)]
            if(depths == None):
                depths = validDepths
            else:
                depths = np.concatenate((depths, validDepths))
        self.mean = np.mean(depths)
        self.std = np.std(depths)
        print "depth mean: ", self.mean
        print "depth std: ", self.std

    #Function to return new image and depth file
    #TODO generate random ranking and randomize images
data_utils.py 文件源码 项目:LSH_Memory 作者: RUSH-LAB 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def crawl_directory(directory, augment_with_rotations=False, first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)

    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for idx, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and idx > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + idx)
        info.append(full_file_name)

    if len(files) == 20:
      label_idx += 4 if augment_with_rotations else 1
  return images, labels, info
readImg.py 文件源码 项目:SerialPhotoMerge 作者: simon-r 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def read(self, file_name=None):

        if file_name != None:
            self.file_name = file_name
        elif self.file_name != None:
            pass
        else:
            raise Exception(" %s , Undefined file name: " %
                            sys._getframe().f_code.co_name)

        img_rgb = Image(color_depth=8)

        self.raw = ndimage.imread(self.file_name)
        img_rgb.image = np.array(self.raw, dtype=img_rgb.dtype)

        return img_rgb
readImg.py 文件源码 项目:SerialPhotoMerge 作者: simon-r 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read(self, file_name=None):

        if file_name != None:
            self.file_name = file_name
        elif self.file_name != None:
            pass
        else:
            raise Exception(" %s , Undefined file name: " %
                            sys._getframe().f_code.co_name)

        rgb = Image(color_depth=16)
        raw = None

        with rawpy.imread(self.file_name) as raw:
            self.raw = raw.postprocess(output_bps=16)
            rgb.image = np.array(self.raw, dtype=rgb.dtype)

        return rgb
image_handler.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def read_data(self, train_split=0.80, dev_split=0.10, test_split=0.10):
        """
        Class function to read images from `self.image_dir`and split them into three groups: train/dev/test.
        """
        assert (train_split + dev_split + test_split == 1.0)

        all_images = glob.glob(self.image_dir + "*.png")
        data = []

        for image_path in all_images:
            image = imread(image_path, flatten=True)
            image = image.reshape(IMAGE_WIDTH*IMAGE_HEIGHT)
            # image = np.multiply(image, 1.0 / 255.0) No scaling here

            data.append(image)

        data = np.array(data)
        data = data.astype(np.uint8)

        total_images = data.shape[0]

        train_limit = int(total_images * train_split)
        dev_limit = train_limit + int(total_images * dev_split)

        self.train = data[:train_limit]
        self.dev = data[train_limit:dev_limit]
        self.test = data[dev_limit:]

        # Only shuffling training data.
        random.shuffle(self.train)

        self.data_dict = {
            'train':    self.train,
            'dev':      self.dev,
            'test':     self.test}
export.py 文件源码 项目:BilibiliDraw 作者: TotoriKira 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("greytech.png", mode='RGB')

    print(len(im_array), len(im_array[0]))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
export.py 文件源码 项目:BilibiliDraw 作者: TotoriKira 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("ustc.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
export.py 文件源码 项目:BilibiliDraw 作者: TotoriKira 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("greytech.png", mode='RGB')

    print(len(im_array), len(im_array[0]))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
export.py 文件源码 项目:BilibiliDraw 作者: TotoriKira 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("ms.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
data_serialization.py 文件源码 项目:logodetect 作者: munibasad 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_logo(data_dir):
    image_files = os.listdir(data_dir)
    dataset = np.ndarray(
        shape=(len(image_files), CNN_IN_HEIGHT, CNN_IN_WIDTH, CNN_IN_CH),
        dtype=np.float32)
    print(data_dir)
    num_images = 0
    for image in image_files:
        image_file = os.path.join(data_dir, image)
        try:
            image_data = (ndimage.imread(image_file).astype(float) -
                          PIXEL_DEPTH / 2) / PIXEL_DEPTH
            if image_data.shape != (CNN_IN_HEIGHT, CNN_IN_WIDTH, CNN_IN_CH):
                raise Exception('Unexpected image shape: %s' %
                                str(image_data.shape))
            dataset[num_images, :, :] = image_data
            num_images = num_images + 1
        except IOError as e:
            print('Could not read:', image_file, ':', e,
                  '-it\'s ok, skipping.')

    dataset = dataset[0:num_images, :, :]
    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def reshape_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and reshape them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Resizing '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.reshape_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    image_resized = misc.imresize(image, (height, width))
                    misc.imsave(target_folder + "/" + filename, image_resized)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            image_resized = misc.imresize(image, (height, width))
                            misc.imsave(target_folder + "/" + filename, image_resized)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def crop_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and center crop them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and crop:
        print("Cropping '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.crop_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    [width_original, height_original, _] = image.shape
                    offset_w = (width_original - width) / 2
                    offset_h = (width_original - width) / 2
                    image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                    misc.imsave(target_folder + "/" + filename, image_cropped)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            [width_original, height_original, _] = image.shape
                            offset_w = (width_original - width) / 2
                            offset_h = (width_original - width) / 2
                            image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                            misc.imsave(target_folder + "/" + filename, image_cropped)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def convert_to_grayscale(cls, source_folder, target_folder,
                             extensions=('.jpg', '.jpeg', '.png')):
        """ convert images from RGB to Grayscale"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Convert '", source_folder, "' images to grayscale...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_grayscale(source_folder + '/' + filename,
                                         target_folder + '/' + filename,
                                         extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                    misc.imsave(target_folder + "/" + filename, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                            misc.imsave(target_folder + "/" + filename, image)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def convert_format(cls, source_folder, target_folder,
                       extensions=('.jpg', '.jpeg', '.png'), new_extension='.jpg'):
        """ change images from one format to another (eg. change png files to jpeg) """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Change format of '", source_folder, "' files...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_format(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   extensions=extensions, new_extension=new_extension)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename + new_extension)
                    image = ndimage.imread(target_folder + "/" + filename + new_extension)
                    misc.imsave(target_folder + "/" + filename + new_extension, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            new_filename = os.path.splitext(filename)[0] + new_extension
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + new_filename)
                            image = ndimage.imread(target_folder + "/" + new_filename)
                            misc.imsave(target_folder + "/" + new_filename, image)
dataset_builder.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def convert_to_array(cls, source_folder, target_folder, create_labels_file=False,
                       flatten=False, extensions=('.jpg', '.jpeg', '.png')):
        """ Read all images in subfolders and convert them to a single array """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and concatenate:
        print("Converting '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_array(source_folder + '/' + filename, target_folder, 
                    create_labels_file=create_labels_file, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                    if (flatten):
                        cls.data.append(image.flatten())
                    else:
                        cls.data.append(image)
                    if create_labels_file:
                        cls.labels.append(source_folder.replace('/', '_'))
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                            if (flatten):
                                cls.data.append(image.flatten())
                            else:
                                cls.data.append(image)
                            if create_labels_file:
                                cls.labels.append(source_folder.replace('/', '_'))
KITTI.py 文件源码 项目:FlowNetPytorch 作者: ClementPinard 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_flow_from_png(png_path):
    return(imread(png_path)[:,:,0:2].astype(float) - 128)
KITTI.py 文件源码 项目:FlowNetPytorch 作者: ClementPinard 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def KITTI_loader(root,path_imgs, path_flo):
    imgs = [os.path.join(root,path) for path in path_imgs]
    flo = os.path.join(root,path_flo)
    return [imread(img) for img in imgs],load_flow_from_png(flo)
listdataset.py 文件源码 项目:FlowNetPytorch 作者: ClementPinard 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def default_loader(root, path_imgs, path_flo):
    imgs = [os.path.join(root,path) for path in path_imgs]
    flo = os.path.join(root,path_flo)
    return [imread(img).astype(np.float32) for img in imgs],load_flo(flo)


问题


面经


文章

微信
公众号

扫码关注公众号