python类imread()的实例源码

analyse.py 文件源码 项目:lyricswordcloud 作者: qwertyyb 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def showData(self):
    print('???,????···')
    mask = imread(self.picfile)
    imgcolor = ImageColorGenerator(mask)
    wcc = WordCloud(font_path='./msyhl.ttc', 
    mask=mask, background_color='white', 
    max_font_size=200, 
    max_words=300,
    color_func=imgcolor
    )
    wc = wcc.generate_from_frequencies(self.data)
    plt.figure()
    plt.imshow(wc)
    plt.axis('off')
    print('?????')
    plt.show()
gallery.py 文件源码 项目:visual-search 作者: GYXie 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def main():
    args.input_data_dir = os.path.abspath(args.input_data_dir)
    if not os.path.exists(args.output_data_dir):
        os.mkdir(args.output_data_dir)
    for dir_path, dir_names, file_names in os.walk(args.input_data_dir):
        if len(file_names) > 0:
            print(dir_path)
            rows = int(math.ceil(len(file_names) / 6.0))
            print(rows)
            fig, axes = plt.subplots(4, 12, subplot_kw={'xticks': [], 'yticks': []})
            fig.subplots_adjust(hspace=0.01, wspace=0.01)
            for ax, file_name in zip(axes.flat, file_names):
                print(file_name)
                img = imread(dir_path + '/' + file_name)
                ax.imshow(img)
                # ax.set_title(os.path.splitext(file_name)[0].replace('.227x227', ''))
            plt.savefig(args.output_data_dir + dir_path.replace(args.input_data_dir, '') + '.pdf')
mit_sceneparsing_benchmark_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        """__getitem__

        :param index:
        """
        img_path = self.files[self.split][index].rstrip()
        lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path)[:-4] + '.png')

        img = m.imread(img_path)
        img = np.array(img, dtype=np.uint8)

        lbl = m.imread(lbl_path)
        lbl = np.array(lbl, dtype=np.uint8)

        if self.is_transform:
            img, lbl = self.transform(img, lbl)

        return img, lbl
camvid_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        img_name = self.files[self.split][index]
        img_path = self.root + '/' + self.split + '/' + img_name
        lbl_path = self.root + '/' + self.split + 'annot/' + img_name

        img = m.imread(img_path)
        img = np.array(img, dtype=np.uint8)

        lbl = m.imread(lbl_path)
        lbl = np.array(lbl, dtype=np.int8)

        if self.augmentations is not None:
            img, lbl = self.augmentations(img, lbl)

        if self.is_transform:
            img, lbl = self.transform(img, lbl)

        return img, lbl
cityscapes_loader.py 文件源码 项目:pytorch-semseg 作者: meetshah1995 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        """__getitem__

        :param index:
        """
        img_path = self.files[self.split][index].rstrip()
        lbl_path = os.path.join(self.annotations_base,
                                img_path.split(os.sep)[-2], 
                                os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')

        img = m.imread(img_path)
        img = np.array(img, dtype=np.uint8)

        lbl = m.imread(lbl_path)
        lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))

        if self.augmentations is not None:
            img, lbl = self.augmentations(img, lbl)

        if self.is_transform:
            img, lbl = self.transform(img, lbl)

        return img, lbl
keras_dataAug.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def data_augmentation(image_files, dir):
    image_list = []
    new_file_name = dir
    save_dir = "xxx" + new_file_name

    for image_file in image_files:
        image_list.append(misc.imread(image_file))

    for image in image_list:
        x = img_to_array(image)  # this is a Numpy array with shape (3, 150, 150)
        x = x.reshape((1,) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)
        i = 0
        for batch in datagen.flow(x, batch_size=1, save_to_dir=save_dir,
                                  save_prefix=dir, save_format='jpg'):
            i += 1
            if i > 99:
                break
    return image_list

# List all the files
utils.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
utils.py 文件源码 项目:WGAN_GP 作者: daigo0927 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
Routines.py 文件源码 项目:structured-output-ae 作者: sbelharbi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test():

    path_text_for = 'D171.png'    
    path_text_back ='D771.png'
    # image forground/background
    im_for = misc.imread(path_text_for)
    im_back = misc.imread(path_text_back)
    size = im_for.shape
    s = size[0]    # size of the image (squared matrix)
    # number of images
    nbr_ims = 10
    train = True
    # generating the images
    data,data_labels = generate_brodatz_texture(nbr_ims, s, im_back, im_for)
    if train: # train
        sio.savemat('../data/train.mat', dict([('x_train', data), ('y_train', data_labels)]))    
    else:     # test
        sio.savemat('../data/test.mat', dict([('x_test', data), ('y_test', data_labels)]) )
Train.py 文件源码 项目:BirdProject 作者: ZlodeiBaal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def PrepareDataList(BASE, length):
    List = []
    for M in range(0,min(length,len(BASE))):
        img, text = BASE[M]
        image = misc.imread(img,mode='RGB')
        #image = misc.imresize(image, [227, 227])
        r1 = []
        if isfile(text):
            f = open(text, 'r')
            s = f.readline()
            st = s.split(' ')
            for i in range(0,2):
                r1.append(int(st[i]))
            f.close()
        else: #If there are no txt file - "no bird situation"
            r1.append(0);
            r1.append(0);
        List.append([image,r1])
    return List

# Random test and train list
kitti_new.py 文件源码 项目:learning-to-see-by-moving 作者: pulkitag 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def resize_images(prms):
    seqNum = range(11)
    rawStr = ['rawLeftImFile', 'rawRightImFile']
    imStr  = ['leftImFile', 'rightImFile']
    num    = ku.get_num_images()
    for raw, new in zip(rawStr, imStr):
        for seq in seqNum:
            N = num[seq]
            print seq, N, raw, new
            rawNames = [prms['paths'][raw] % (seq,i) for i in range(N)]          
            newNames = [prms['paths'][new] % (seq,i) for i in range(N)]
            dirName = os.path.dirname(newNames[0])
            if not os.path.exists(dirName):
                os.makedirs(dirName)
            for rawIm, newIm in zip(rawNames, newNames):
                im = scm.imread(rawIm)
                im = scm.imresize(im, [256, 256])   
                scm.imsave(newIm, im)

##
# Save images as jpgs.
kitti_new.py 文件源码 项目:learning-to-see-by-moving 作者: pulkitag 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def save_as_jpg(prms):
    seqNum = range(11)
    rawStr = ['rawLeftImFile', 'rawRightImFile']
    imStr  = ['leftImFile', 'rightImFile']
    num    = ku.get_num_images()
    for raw, new in zip(rawStr, imStr):
        for seq in seqNum:
            N = num[seq]
            print seq, N, raw, new
            rawNames = [prms['paths'][raw] % (seq,i) for i in range(N)]          
            newNames = [prms['paths'][new] % (seq,i) for i in range(N)]
            dirName = os.path.dirname(newNames[0])
            if not os.path.exists(dirName):
                os.makedirs(dirName)
            for rawIm, newIm in zip(rawNames, newNames):
                im = scm.imread(rawIm)
                scm.imsave(newIm, im)

##
# Get the names of images
preprocessing.py 文件源码 项目:metaqnn 作者: bowenbaker 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def preprocess(image_dir, new_image_dir, preprocess_fn):

    image_paths = []
    labels = []

    if os.path.isdir(new_image_dir):
        rmtree(new_image_dir)
    os.makedirs(new_image_dir)

    classes = os.listdir(image_dir)

    for clas in classes:
        class_dir = os.path.join(image_dir, str(clas))
        new_class_dir = os.path.join(new_image_dir, str(clas))
        os.makedirs(new_class_dir)

        for image_name in os.listdir(class_dir):
            image = misc.imread(os.path.join(class_dir, image_name))
            image = preprocess_fn(image)
            misc.imsave(os.path.join(new_class_dir, image_name), image)
layout_generate_dataset.py 文件源码 项目:DocumentSegmentation 作者: SeguinBe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def process_one(image_dir, page_dir, output_dir, basename, colormap, color_labels):
    image_filename = os.path.join(image_dir, "{}.jpg".format(basename))
    page_filename = os.path.join(page_dir, "{}.xml".format(basename))

    page = PAGE.parse_file(page_filename)
    text_lines = [tl for tr in page.text_regions for tl in tr.text_lines]
    graphic_regions = page.graphic_regions
    img = imread(image_filename, mode='RGB')

    gt = np.zeros_like(img[:, :, 0])
    mask1 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in text_lines if 'comment' in tl.id], 1)
    mask2 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in text_lines if not 'comment' in tl.id], 1)
    mask3 = cv2.fillPoly(gt.copy(), [PAGE.Point.list_to_cv2poly(tl.coords)
                                     for tl in graphic_regions], 1)
    arr = np.dstack([mask1, mask2, mask3])

    gt_img = convert_array_masks(arr, colormap, color_labels)
    save_and_resize(img, os.path.join(output_dir, 'images', '{}.jpg'.format(basename)))
    save_and_resize(gt_img, os.path.join(output_dir, 'labels', '{}.png'.format(basename)), nearest=True)
matting.py 文件源码 项目:Deep-Image-Matting 作者: Joker316701882 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def load_data(batch_alpha_paths,batch_eps_paths,batch_BG_paths):

    batch_size = batch_alpha_paths.shape[0]
    train_batch = []
    images_without_mean_reduction = []
    for i in range(batch_size):

        alpha = misc.imread(batch_alpha_paths[i],'L').astype(np.float32)

        eps = misc.imread(batch_eps_paths[i]).astype(np.float32)

        BG = misc.imread(batch_BG_paths[i]).astype(np.float32)

        batch_i,raw_RGB = preprocessing_single(alpha, BG, eps,batch_alpha_paths[i]) 
        train_batch.append(batch_i)
        images_without_mean_reduction.append(raw_RGB)
    train_batch = np.stack(train_batch)
    return train_batch[:,:,:,:3],np.expand_dims(train_batch[:,:,:,3],3),np.expand_dims(train_batch[:,:,:,4],3),train_batch[:,:,:,5:8],train_batch[:,:,:,8:],images_without_mean_reduction
matting.py 文件源码 项目:Deep-Image-Matting 作者: Joker316701882 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_alphamatting_data(test_alpha):
    rgb_path = os.path.join(test_alpha,'rgb')
    trimap_path = os.path.join(test_alpha,'trimap')
    alpha_path = os.path.join(test_alpha,'alpha')   
    images = os.listdir(trimap_path)
    test_num = len(images)
    all_shape = []
    rgb_batch = []
    tri_batch = []
    alp_batch = []
    for i in range(test_num):
        rgb = misc.imread(os.path.join(rgb_path,images[i]))
        trimap = misc.imread(os.path.join(trimap_path,images[i]),'L')
        alpha = misc.imread(os.path.join(alpha_path,images[i]),'L')/255.0
        all_shape.append(trimap.shape)
        rgb_batch.append(misc.imresize(rgb,[320,320,3])-g_mean)
        trimap = misc.imresize(trimap,[320,320],interp = 'nearest').astype(np.float32)
        tri_batch.append(np.expand_dims(trimap,2))
        alp_batch.append(alpha)
    return np.array(rgb_batch),np.array(tri_batch),np.array(alp_batch),all_shape,images
matting.py 文件源码 项目:Deep-Image-Matting 作者: Joker316701882 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load_validation_data(vali_root):
    alpha_dir = os.path.join(vali_root,'alpha')
    RGB_dir = os.path.join(vali_root,'RGB')
    images = os.listdir(alpha_dir)
    test_num = len(images)

    all_shape = []
    rgb_batch = []
    tri_batch = []
    alp_batch = []

    for i in range(test_num):
        rgb = misc.imread(os.path.join(RGB_dir,images[i]))
        alpha = misc.imread(os.path.join(alpha_dir,images[i]),'L') 
        trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0]
        alpha = alpha / 255.0
        all_shape.append(trimap.shape)
        rgb_batch.append(misc.imresize(rgb,[320,320,3])-g_mean)
        trimap = misc.imresize(trimap,[320,320],interp = 'nearest').astype(np.float32)
        tri_batch.append(np.expand_dims(trimap,2))
        alp_batch.append(alpha)
    return np.array(rgb_batch),np.array(tri_batch),np.array(alp_batch),all_shape,images
test.py 文件源码 项目:Deep-Image-Matting 作者: Joker316701882 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main(args):

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = args.gpu_fraction)
    with tf.Session(config=tf.ConfigProto(gpu_options = gpu_options)) as sess:
        saver = tf.train.import_meta_graph('./meta_graph/my-model.meta')
        saver.restore(sess,tf.train.latest_checkpoint('./model'))
        image_batch = tf.get_collection('image_batch')[0]
        GT_trimap = tf.get_collection('GT_trimap')[0]
        pred_mattes = tf.get_collection('pred_mattes')[0]

        rgb = misc.imread(args.rgb)
        alpha = misc.imread(args.alpha,'L')
        trimap = generate_trimap(np.expand_dims(np.copy(alpha),2),np.expand_dims(alpha,2))[:,:,0]
        origin_shape = alpha.shape
        rgb = np.expand_dims(misc.imresize(rgb.astype(np.uint8),[320,320,3]).astype(np.float32)-g_mean,0)
        trimap = np.expand_dims(np.expand_dims(misc.imresize(trimap.astype(np.uint8),[320,320],interp = 'nearest').astype(np.float32),2),0)

        feed_dict = {image_batch:rgb,GT_trimap:trimap}
        pred_alpha = sess.run(pred_mattes,feed_dict = feed_dict)
        final_alpha = misc.imresize(np.squeeze(pred_alpha),origin_shape)
        # misc.imshow(final_alpha)
        misc.imsave('./alpha.png',final_alpha)
ColourNet.py 文件源码 项目:ColourNet 作者: raghavgupta0296 项目源码 文件源码 阅读 80 收藏 0 点赞 0 评论 0
def chooose_ims(batch_size):
    global index
    I1 = np.ndarray(shape=[1,256,256,1])
    U1 = np.ndarray(shape=[1,256,256,1])
    V1 = np.ndarray(shape=[1,256,256,1])
    for i in range(batch_size):
        if index>=len(files):
            index=0
        image = im.imread("/dataImages/dataset/"+files[index],mode='RGB')
        image = image/255
        I, U, V = imManipulation.rgb2yuv(image)
        I = np.reshape(I, (1,256, 256,1))
        U = np.reshape(U, (1,256, 256,1))
        V = np.reshape(V, (1,256, 256,1))
        I1 = np.concatenate((I1,I),axis=0)
        U1 = np.concatenate((U1,U),axis=0)
        V1 = np.concatenate((V1,V),axis=0)
        index+=1
    I1 = I1[1:,:,:,:]
    U1 = U1[1:,:,:,:]
    V1 = V1[1:,:,:,:]
    return I1,U1,V1
BatchDatsetReader.py 文件源码 项目:FCN-GoogLeNet 作者: DeepSegment 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _transform(self, filename, flag = False):
        if flag:
            image = np.array(Image.open(filename), dtype=np.uint8)
            image[image == 255] = 21
        else:
            image = misc.imread(filename)

        if self.__channels and len(image.shape) < 3:  # make sure images are of shape(h,w,3)
            image = np.array([image for i in range(3)])

        if self.image_options.get("resize", False) and self.image_options["resize"]:
            resize_size = int(self.image_options["resize_size"])
            resize_image = misc.imresize(image,
                                         [resize_size, resize_size], interp='nearest')
        else:
            resize_image = image

        return np.array(resize_image)
lddmm_theano.py 文件源码 项目:lddmm-ot 作者: jeanfeydy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) :
    "Loads regularly sampled curves from a .PNG image."
    # Find the contour lines
    img = misc.imread(fname, flatten = True) # Grayscale
    img = (img.T[:, ::-1])  / 255.
    img = gaussian_filter(img, smoothing, mode='nearest')
    lines = find_contours(img, level)

    # Compute the sampling ratio for every contour line
    lengths = np.array( [arclength(line) for line in lines] )
    points_per_line = np.ceil( npoints * lengths / np.sum(lengths) )

    # Interpolate accordingly
    points = [] ; connec = [] ; index_offset = 0
    for ppl, line in zip(points_per_line, lines) :
        (p, c) = resample(line, ppl)
        points.append(p)
        connec.append(c + index_offset)
        index_offset += len(p)

    size   = np.maximum(img.shape[0], img.shape[1])
    points = np.vstack(points) / size
    connec = np.vstack(connec)
    return Curve(points, connec)
# Pyplot Output =================================================================================
lddmm_pytorch.py 文件源码 项目:lddmm-ot 作者: jeanfeydy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def level_curves(fname, npoints = 200, smoothing = 10, level = 0.5) :
    "Loads regularly sampled curves from a .PNG image."
    # Find the contour lines
    img = misc.imread(fname, flatten = True) # Grayscale
    img = (img.T[:, ::-1])  / 255.
    img = gaussian_filter(img, smoothing, mode='nearest')
    lines = find_contours(img, level)

    # Compute the sampling ratio for every contour line
    lengths = np.array( [arclength(line) for line in lines] )
    points_per_line = np.ceil( npoints * lengths / np.sum(lengths) )

    # Interpolate accordingly
    points = [] ; connec = [] ; index_offset = 0
    for ppl, line in zip(points_per_line, lines) :
        (p, c) = resample(line, ppl)
        points.append(p)
        connec.append(c + index_offset)
        index_offset += len(p)

    size   = np.maximum(img.shape[0], img.shape[1])
    points = np.vstack(points) / size
    connec = np.vstack(connec)
    return Curve(points, connec)
# Pyplot Output =================================================================================
test_faceservice.py 文件源码 项目:icyface_api 作者: bupticybee 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_recognize(args):
    imdetect = args.detect
    im1 = args.im1
    im2 = args.im2

    payload = {'img':file2base64(imdetect)}
    import numpy as np
    imarr = np.array(misc.imread(imdetect))
    r = requests.get("http://face.icybee.cn/face/face_detect", data=payload)
    print(json.loads(r.text)['boxes'][0])
    box = json.loads(r.text)['boxes'][0]
    box = [int(i) for  i in box]
    misc.imsave('sample.jpg',imarr[box[1]:box[3],box[0]:box[2],:],)

    payload = {
            'img1':file2base64(im1),
            'img2':file2base64(im2)
            }
    r = requests.get("http://face.icybee.cn/face/face_recognize", data=payload)
    print(r.text)
    #print(json.loads(r.text)['dist'])
make_tiny_imagenet.py 文件源码 项目:tiny-imagenet 作者: jcjohnson 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def write_data_in_synset_folders(part_data, part, out_dir, image_size):
  part_dir = os.path.join(out_dir, part)
  os.mkdir(part_dir)
  num_wnids = len(part_data)
  for i, (wnid, wnid_data) in enumerate(part_data.iteritems()):
    print 'Writing images for synset %d / %d of %s' % (i + 1, num_wnids, part)
    wnid_dir = os.path.join(part_dir, wnid)
    os.mkdir(wnid_dir)
    image_dir = os.path.join(wnid_dir, 'images')
    os.mkdir(image_dir)
    boxes_filename = os.path.join(wnid_dir, '%s_boxes.txt' % wnid)
    boxes_file = open(boxes_filename, 'w')
    for i, (img_filename, bbox) in enumerate(wnid_data):
      out_img_filename = '%s_%d.JPEG' % (wnid, i)
      full_out_img_filename = os.path.join(image_dir, out_img_filename)
      img = imread(img_filename)
      img_resized, bbox_resized = resize_image(img, image_size, bbox)
      imsave(full_out_img_filename, img_resized)
      boxes_file.write('%s\t%d\t%d\t%d\t%d\n' % (out_img_filename,
                       bbox_resized[0], bbox_resized[1], bbox_resized[2], bbox_resized[3]))
    boxes_file.close()
ilsvrc_cls_multithread_scipy.py 文件源码 项目:tensorflow_yolo2 作者: wenxichen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def image_read(self, imname):
        image = misc.imread(imname, mode='RGB').astype(np.float)
        r,c,ch = image.shape
        if r < 299 or c < 299:
            # TODO: check too small images
            # print "##too small!!"
            image = misc.imresize(image, (299, 299, 3))
        elif r > 299 or c > 299:
            image = image[(r-299)/2 : (r-299)/2 + 299, (c-299)/2 : (c-299)/2 + 299, :]
        # print r, c, image.shape
        assert image.shape == (299, 299, 3)
        image = (image / 255.0) * 2.0 - 1.0
        if self.random_noise:
            add_noise = bool(random.getrandbits(1))
            if add_noise:
                eps = random.choice([4.0, 8.0, 12.0, 16.0]) / 255.0 * 2.0
                noise_image = image + eps * np.random.choice([-1, 1], (299,299,3))
                image = np.clip(noise_image, -1.0, 1.0)
        return image
getImgs.py 文件源码 项目:crawl-dataset 作者: e-lab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def resizeImg(imgPath,img_size):
    try:
        img = imread(imgPath)
        h, w, _ = img.shape
        scale = 1
        if w >= h:
            new_w = img_size
            if w  >= new_w:
                scale = float(new_w) / w
            new_h = int(h * scale)
        else:
            new_h = img_size
            if h >= new_h:
                scale = float(new_h) / h
            new_w = int(w * scale)
        new_img = imresize(img, (new_h, new_w), interp='bilinear')
        imsave(imgPath,new_img)
        print('Img Resized as {}'.format(img_size))
    except Exception as e:
        print(e)
getImages.py 文件源码 项目:crawl-dataset 作者: e-lab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def resizeImg(imgPath,img_size):
    img = imread(imgPath)
    h, w, _ = img.shape
    scale = 1
    if w >= h:
        new_w = img_size
        if w  >= new_w:
            scale = float(new_w) / w
        new_h = int(h * scale)
    else:
        new_h = img_size
        if h >= new_h:
            scale = float(new_h) / h
        new_w = int(w * scale)
    new_img = imresize(img, (new_h, new_w), interp='bilinear')
    imsave(imgPath,new_img)

#Download img
#Later we can do multi thread apply workers to do faster work
getImgs.py 文件源码 项目:crawl-dataset 作者: e-lab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def resizeImg(imgPath,img_size):
    img = imread(imgPath)
    h, w, _ = img.shape
    scale = 1
    if w >= h:
        new_w = img_size
        if w  >= new_w:
            scale = float(new_w) / w
        new_h = int(h * scale)
    else:
        new_h = img_size
        if h >= new_h:
            scale = float(new_h) / h
        new_w = int(w * scale)
    new_img = imresize(img, (new_h, new_w), interp='bilinear')
    imsave(imgPath,new_img)
    print('Img Resized as {}'.format(img_size))
model_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_rgbd_file(self, dirname, offset):
        associations = self.seq_dir_map[dirname]['associations']

        if associations[offset, 1].startswith('depth'):
            rgb_filename = os.path.join(dirname, associations[offset, 3])
            depth_filename = os.path.join(dirname, associations[offset, 1])
        else:
            rgb_filename = os.path.join(dirname, associations[offset, 1])
            depth_filename = os.path.join(dirname, associations[offset, 3])

        rgb_img = ndimage.imread(rgb_filename)
        depth_img = ndimage.imread(depth_filename)
        width = height = 224

        # Reshape
        depth_img = np.reshape(depth_img, list(depth_img.shape) + [1])
        depth_img = 255 * depth_img / np.max(depth_img)

        rgbd_img = np.concatenate((rgb_img, depth_img), 2)

        # Resize
        rgbd_img = transform.resize(rgbd_img, [width, height], preserve_range=True)

        return rgb_filename, depth_filename, rgbd_img.astype(np.float32)
model_input.py 文件源码 项目:SLAM 作者: sanjeevkumar42 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def read_rgb_image(filepath):
    rgb_img = ndimage.imread(filepath)
    width = height = 224
    img_width = rgb_img.shape[1]
    img_height = rgb_img.shape[0]

    # scale such that smaller dimension is 256
    if img_width < img_height:
        factor = 256.0 / img_width
    else:
        factor = 256.0 / img_height
    rgb_img = transform.rescale(rgb_img, factor, preserve_range=True)

    # crop randomly
    width_start = np.random.randint(0, rgb_img.shape[1] - width)
    height_start = np.random.randint(0, rgb_img.shape[0] - height)

    rgb_img = rgb_img[height_start:height_start + height, width_start:width_start + width]
    return rgb_img


问题


面经


文章

微信
公众号

扫码关注公众号