python类imread()的实例源码

prepro.py 文件源码 项目:auckland-ai-meetup-x-triage 作者: a-i-joe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def png_to_array(path, res=(256, 256)):
    img = imread(path, flatten=True)
    if (img.max() <= 0.0):
        raise ValueError("empty image. imgname: " + path)
    return preprocess_image(img, res)
vdsr.py 文件源码 项目:VDSR-Keras 作者: GeorgeSeif 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def load_images(directory):
    images = []
    for root, dirnames, filenames in os.walk(directory):
        for filename in filenames:
            if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
                filepath = os.path.join(root, filename)
                image = ndimage.imread(filepath, mode="L")
                images.append(image)

    images = np.array(images)
    array_shape = np.append(images.shape[0:3], 1)
    images = np.reshape(images, (array_shape))

    return images
image_processing.py 文件源码 项目:ML-Project 作者: Shiam-Chowdhury 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""

  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  print(folder)

  num_images = 0
  for image_index, image in enumerate(image_files):
    image_file = os.path.join(folder, image)
    try:
      image_data = (ndimage.imread(image_file).astype(float) -      # normalize data
                    pixel_depth / 2) / pixel_depth
      if image_data.shape != (image_size, image_size):
        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
      dataset[num_images, :, :] = image_data
      num_images = num_images + 1
    except IOError as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') # skip unreadable files

  dataset = dataset[0:num_images, :, :]
  if num_images < min_num_images:                                   # check if a given min. no. of images
    raise Exception('Many fewer images than expected: %d < %d' %    # has been loaded
                    (num_images, min_num_images))

  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  return dataset


# function to store the normalized tensors obtained from the load_letter function in
# .pickle files for later use
proc_images.py 文件源码 项目:neon_segnet 作者: NervanaSystems 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def main():
    assert os.path.isdir(args.image_path), '%s directory not found' % args.mage_path

    for dataset in ['train', 'test', 'val']:
        out_dir_im = os.path.join(args.output_path, dataset)
        if not os.path.isdir(out_dir_im):
            os.makedirs(out_dir_im)

        out_dir_an = os.path.join(args.output_path, dataset + 'annot')
        if not os.path.isdir(out_dir_an):
            os.makedirs(out_dir_an)

        fid = open(os.path.join(args.output_path, '%s_images.csv' % dataset), 'w')
        # print header
        fid.write('image,labels\n')
        fns = glob(os.path.join(args.image_path, dataset, '*.png'))

        for fn in fns:

            fn_image = os.path.abspath(fn)
            fn_annot = os.path.split(fn_image)
            fn_annot = os.path.join(fn_annot[0] + 'annot', fn_annot[1])

            im = imread(fn_image)
            annot = imread(fn_annot)
            out_size = (256, 512)
            im = imresize(im, out_size)
            annot = imresize(annot, out_size, interp='nearest')


            fn_image_out = os.path.abspath(os.path.join(out_dir_im,
                                                        os.path.basename(fn_image)))
            fn_annot_out = os.path.abspath(os.path.join(out_dir_an,
                                                        os.path.basename(fn_image)))
            imsave(fn_image_out, im)
            imsave(fn_annot_out, annot)

            fid.write('%s,%s\n' %(fn_image_out, fn_annot_out))
        fid.close()
constants.py 文件源码 项目:Adversarial_Video_Generation 作者: dyelax 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_test_frame_dims():
    img_path = glob(os.path.join(TEST_DIR, '*/*'))[0]
    img = imread(img_path, mode='RGB')
    shape = np.shape(img)

    return shape[0], shape[1]
constants.py 文件源码 项目:Adversarial_Video_Generation 作者: dyelax 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_train_frame_dims():
    img_path = glob(os.path.join(TRAIN_DIR, '*/*'))[0]
    img = imread(img_path, mode='RGB')
    shape = np.shape(img)

    return shape[0], shape[1]
utils.py 文件源码 项目:Adversarial_Video_Generation 作者: dyelax 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_full_clips(data_dir, num_clips, num_rec_out=1):
    """
    Loads a batch of random clips from the unprocessed train or test data.

    @param data_dir: The directory of the data to read. Should be either c.TRAIN_DIR or c.TEST_DIR.
    @param num_clips: The number of clips to read.
    @param num_rec_out: The number of outputs to predict. Outputs > 1 are computed recursively,
                        using the previously-generated frames as input. Default = 1.

    @return: An array of shape
             [num_clips, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, (3 * (c.HIST_LEN + num_rec_out))].
             A batch of frame sequences with values normalized in range [-1, 1].
    """
    clips = np.empty([num_clips,
                      c.FULL_HEIGHT,
                      c.FULL_WIDTH,
                      (3 * (c.HIST_LEN + num_rec_out))])

    # get num_clips random episodes
    ep_dirs = np.random.choice(glob(os.path.join(data_dir, '*')), num_clips)

    # get a random clip of length HIST_LEN + num_rec_out from each episode
    for clip_num, ep_dir in enumerate(ep_dirs):
        ep_frame_paths = sorted(glob(os.path.join(ep_dir, '*')))
        start_index = np.random.choice(len(ep_frame_paths) - (c.HIST_LEN + num_rec_out - 1))
        clip_frame_paths = ep_frame_paths[start_index:start_index + (c.HIST_LEN + num_rec_out)]

        # read in frames
        for frame_num, frame_path in enumerate(clip_frame_paths):
            frame = imread(frame_path, mode='RGB')
            norm_frame = normalize_frames(frame)

            clips[clip_num, :, :, frame_num * 3:(frame_num + 1) * 3] = norm_frame

    return clips
visualsearch_train.py 文件源码 项目:ml-deepranking 作者: urakozz 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def img(image_file):
    rgb = ndimage.imread(image_file).astype(float)
    rgb = (rgb - 255.0 / 2) / 255.0
    return rgb
visualsearch_serve.py 文件源码 项目:ml-deepranking 作者: urakozz 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def img(image_file):
    rgb = ndimage.imread(image_file).astype(float)
    rgb = (rgb - 255.0/2) / 255.0
    return rgb
nyu_dataset_loader.py 文件源码 项目:depth-semantic-fully-conv 作者: iapatil 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        img_name = self.listing[index]

        input_dir,target_depth_dir,target_label_dir = self.data_dir

        input_im, target_depth_im,target_label_im = imread(os.path.join(input_dir,img_name)),\
                                                    imread(os.path.join(target_depth_dir,img_name[:-3]+'png')),\
                                                    imread(os.path.join(target_label_dir,img_name[:-3]+'png'))


        if self.co_transform is not None:
            input_im, target_depth_im,target_label_im = self.co_transform(input_im,target_depth_im,target_label_im)

        if self.input_transform is not None:
            input_im = self.input_transform(input_im)

        if self.target_depth_transform is not None :
            target_depth_im = self.target_depth_transform(target_depth_im)

        if self.target_labels_transform is not None :
            target_label_im = self.target_labels_transform(target_label_im)

        input_rgb_im = input_im
        input_depth_im  = torch.cat((target_depth_im,target_depth_im,target_depth_im),dim = 0)
        target_im = target_label_im

        return input_rgb_im,input_depth_im,target_im
data_manager.py 文件源码 项目:deepmodels 作者: learningsociety 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_cv_imgs(img_fns, img_sz=(256, 256), use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1

  imgs = [
  ]  #np.ndarray((len(img_fns), img_sz[0], img_sz[1], nb_channels), np.float32)
  for i in range(len(img_fns)):
    try:
      im = cv2.imread(img_fns[i])
      if im is None:
        print 'cannot read image {}'.format(img_fns[i])
        continue
      if img_sz is not None:
        im = cv2.resize(im, img_sz)
      if use_bgr:
        imgs.append(im)
      else:
        # keep same dim
        curimg = np.ndarray((im.shape[0], im.shape[1], 1), np.uint8)
        curimg[:, :, 0] = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        imgs.append(curimg)
    except cv2.error as e:
      print 'img error: {}, {}'.format(img_fns[i], e.message)

  #print 'loaded {} cv images'.format(len(imgs))
  if len(imgs) == 0:
    print img_fns
  return np.asarray(imgs)


# img_fns is a numpy array with strings
data_manager.py 文件源码 项目:deepmodels 作者: learningsociety 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_scipy_imgs(img_fns, img_sz=(256, 256), use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1

  imgs = [
  ]  #np.ndarray((len(img_fns), img_sz[0], img_sz[1], nb_channels), np.float32)
  for i in range(len(img_fns)):
    try:
      #im = cv2.imread(img_fns[i])
      import scipy.ndimage as sni
      im = sni.imread(img_fns[i])
      if im is None:
        continue
      if img_sz is not None:
        im = cv2.resize(im, img_sz)
      if use_bgr:
        imgs.append(im)
      else:
        # keep same dim
        curimg = np.ndarray((im.shape[0], im.shape[1], 1), np.uint8)
        curimg[:, :, 0] = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        imgs.append(curimg)
    except cv2.error as e:
      print 'img error: {}, {}'.format(img_fns[i], e.message)
  #print 'loaded {} cv images'.format(len(imgs))
  return np.asarray(imgs)


# load images into a numpy array
data_manager.py 文件源码 项目:deepmodels 作者: learningsociety 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def load_crop_imgs(img_fns, img_bboxes, img_sz, use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1
  imgs = np.ndarray((len(img_fns), nb_channels, img_sz[0], img_sz[1]),
                    np.float32)
  print imgs.shape
  for i in range(len(img_fns)):
    im = cv2.imread(img_fns[i])
    imcrop = np.ndarray((img_sz[0], img_sz[1], nb_channels), np.float32)
    xs, ys, xe, ye = img_bboxes[i][0], img_bboxes[i][1], img_bboxes[i][
        0] + img_bboxes[i][2], img_bboxes[i][1] + img_bboxes[i][3]
    # Check if im is bgr or grayscale here?
    if use_bgr:
      imcrop = im[xs:xe, ys:ye, :]
    else:
      imcrop = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
      im = imcrop[xs:xe, ys:ye]
    im = cv2.resize(imcrop, img_sz)
    if use_bgr:
      imgs[i, :, :, :] = im
    else:
      imgs[i, 0, :, :] = im
  return imgs


# load images into a numpy array
data_manager.py 文件源码 项目:deepmodels 作者: learningsociety 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_mean_img(img_fns, img_sz):
  mean = None
  count = len(img_fns)
  for i in range(len(img_fns)):
    cv_img = cv2.imread(img_fns[i])
    if cv_img is None:
      raise ValueError(img_fns[i] + ' image read error')
    new_img = cv2.resize(cv_img, img_sz)
    if mean is None:
      mean = new_img.astype(np.float32)
    else:
      mean += new_img
  mean = mean / count
  return mean.astype(np.uint8)
queries.py 文件源码 项目:blcf 作者: willard-yuan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_mask_frame( self, frame, dim=None  ):
        '''
        return mask and name of the query frame. OUTDATED FUNCTION
        '''
        filename = frame.replace(".src.", ".mask.")

        # read image
        ima = cv2.imread(filename)

        # make sure is a mask
        if len(ima.shape)>2:
            ima = ima[:,:,0]

        # binarise
        ima[ima >0]=1.0

        # check dims
        if dim is not None:
            if ima.shape[0]>ima.shape[1]:
                dim_ = dim
            else:
                dim_ = (dim[1], dim[0])
        else:
            dim_ = ima.shape[:2]

        mask_r = reshape_maps_zoom( np.expand_dims(ima, axis=0 ) , dim_).squeeze()
        mask_r[mask_r >0]=1.0

        return mask_r
queries.py 文件源码 项目:blcf 作者: willard-yuan 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_image_frame( queries, topic, id_frame ):
        return imread( self.get_src_path_fromID(topic, id_frame) )




###########################################################
# Additional functions
###########################################################
preprocess.py 文件源码 项目:notmnist 作者: aidiary 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_letter(letter_dir, min_num_images):
    """Load the data for a single letter label."""
    image_files = os.listdir(letter_dir)
    # (num image, image width, image height)
    dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
    image_index = 0
    print(letter_dir)
    for image in image_files:
        image_file = os.path.join(letter_dir, image)
        try:
            # normalize image to [-0.5, 0.5]
            image_data = (ndimage.imread(image_file).astype(float) -
                          pixel_depth / 2) / pixel_depth
            if image_data.shape != (image_size, image_size):
                raise Exception('Unexpected image shape: %s' % str(image_data.shape))
            dataset[image_index, :, :] = image_data
            image_index += 1
        except IOError as e:
            print('Could not read:', image_file, ':', e, "- it's ok, skipping.")

    num_images = image_index
    dataset = dataset[0:num_images, :, :]
    if num_images < min_num_images:
        raise Exception('Many fewer images than expected: %d < %d'
                        % (num_images, min_num_images))

    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
preprocess.py 文件源码 项目:notmnist 作者: aidiary 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def draw_images(root_dir):
    """Draw sample images for each class"""
    assert len(root_dir) == num_classes  # A to J
    num_cols = 10
    pos = 1
    for i in range(num_classes):
        target_dir = root_dir[i]
        for j in range(num_cols):
            plt.subplot(num_classes, num_cols, pos)
            random_file = random.choice(os.listdir(target_dir))
            image = misc.imread(os.path.join(target_dir, random_file))
            plt.imshow(image, cmap=plt.get_cmap('gray'))
            plt.axis('off')
            pos += 1
    plt.show()
data_extractor.py 文件源码 项目:tensorflow_image_tutorial 作者: ybenoit 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def load_letter(folder, min_num_images, image_size, pixel_depth):
        """Load the data for a single letter label."""
        image_files = os.listdir(folder)
        dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                             dtype=np.float32)
        image_index = 0
        print(folder)
        for image in os.listdir(folder):
            image_file = os.path.join(folder, image)
            try:
                image_data = (ndimage.imread(image_file).astype(float) -
                              pixel_depth / 2) / pixel_depth
                if image_data.shape != (image_size, image_size):
                    raise Exception('Unexpected image shape: %s' % str(image_data.shape))
                dataset[image_index, :, :] = image_data
                image_index += 1
            except IOError as e:
                print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')

        num_images = image_index
        dataset = dataset[0:num_images, :, :]
        if num_images < min_num_images:
            raise Exception('Many fewer images than expected: %d < %d' %
                            (num_images, min_num_images))

        print('Full dataset tensor:', dataset.shape)
        print('Mean:', np.mean(dataset))
        print('Standard deviation:', np.std(dataset))
        return dataset
utils.py 文件源码 项目:ccvt 作者: inconvergent 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_dens_from_img(fn):

  return 1.0-imread(fn)/255.


问题


面经


文章

微信
公众号

扫码关注公众号