python类imread()的实例源码

car_notcar.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main():

    images = glob.glob('*.jpeg')
    cars = []
    notcars = []

    for image in images:
        if 'image' in image or 'extra' in image:
            notcars.append(image)
        else:
            cars.append(image)

    data_info = data_look(cars, notcars)
    # Just for fun choose random car / not-car indices and plot example images
    car_ind = np.random.randint(0, len(cars))
    notcar_ind = np.random.randint(0, len(notcars))

    # Read in car / not-car images
    car_image = mpimg.imread(cars[car_ind])
    notcar_image = mpimg.imread(notcars[notcar_ind])

    # Plot the examples

    fig = plt.figure()
    plt.subplot(121)
    plt.imshow(car_image)
    plt.title('Example Car Image')
    plt.subplot(122)
    plt.imshow(notcar_image)
    plt.title('Example Not-car Image')
    plt.show()
template_match.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def find_matches(img, template_list):
    # Make a copy of the image to draw on
    # Define an empty list to take bbox coords
    bbox_list = []
    # Iterate through template list
    # Read in templates one by one
    # Use cv2.matchTemplate() to search the image
    #     using whichever of the OpenCV search methods you prefer
    # Use cv2.minMaxLoc() to extract the location of the best match
    # Determine bounding box corners for the match
    # Return the list of bounding boxes
    method = cv2.TM_CCOEFF_NORMED
    for temp in templist:
        tmp = mpimg.imread(temp)
        # Apply template Matching
        res = cv2.matchTemplate(img,tmp,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        w, h = (tmp.shape[1], tmp.shape[0])

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc

        bottom_right = (top_left[0] + w, top_left[1] + h)
        bbox_list.append((top_left, bottom_right))
    return bbox_list
ColorSelector.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def ColorSelector():
    # Read in the image and print out some stats
    image = (mpimg.imread('test.png') * 255).astype('uint8')
    print('This image is: ', type(image),
          'with dimensions:', image.shape)

    # Grab the x and y size and make a copy of the image
    ysize = image.shape[0]
    xsize = image.shape[1]
    color_select = np.copy(image)

    # Define color selection criteria
    # MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
    red_threshold = 200
    green_threshold = 200
    blue_threshold = 200

    rgb_threshold = [red_threshold, green_threshold, blue_threshold]
    print('Esta es la variable rgb_threshold: ', rgb_threshold)

    # Do a bitwise or with the "|" character to identify
    # pixels below the thresholds
    thresholds = (image[:, :, 0] < rgb_threshold[0]) \
                  | (image[:, :, 1] < rgb_threshold[1]) \
                  | (image[:, :, 2] < rgb_threshold[2])

    print('Esta es la variable thresholds: ', thresholds)

    color_select[thresholds] = [0, 0, 0]
    # plt.imshow(color_select)

    # Uncomment the following code if you are running the code
    # locally and wish to save the image
    mpimg.imsave("test-after.png", color_select)

    # Display the image
    plt.imshow(color_select)
    plt.show()
compare_image_entropy.py 文件源码 项目:NuGridPy 作者: NuGrid 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compare_entropy(name_img1,name_img2,method="rmq"):
     '''Compare two images by the Kullback-Leibler divergence

     Parameters
     ----------
     name_img1 : string
       filename of image 1 (png format)

     name_img2 : string
       filename of image 2 (png format)

     Returns
     -------
     S : float
        Kullback-Leibler divergence S = sum(pk * log(pk / qk), axis=0)

     Note
     ----
     See http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html
     '''
     img1 = mpimg.imread(name_img1)
     img2 = mpimg.imread(name_img2)
     fimg1 = img1.flatten()
     fimg2 = img2.flatten()
     if method == "KL-div":
          eps = 0.0001
          S = stats.entropy(fimg2+eps,fimg1+eps)
          S = numpy.log10(S)
     elif method == "rmq":
          fdiff=fimg1-fimg2
          fdiff_sqr = fdiff**4
          S = (fdiff_sqr.sum())**(old_div(1.,4))

     return S,fimg1, fimg2
codalab.py 文件源码 项目:django-corenlp 作者: arunchaganty 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def load_img(self, img_path):
        """
        Return an image object that can be immediately plotted with matplotlib
        """
        with open_file(self.uuid, img_path) as f:
            return mpimg.imread(f)
codalab.py 文件源码 项目:django-corenlp 作者: arunchaganty 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def load_img(self, img_path):
        """
        Return an image object that can be immediately plotted with matplotlib
        """
        with open_file(self.uuid, img_path) as f:
            return mpimg.imread(f)
mask_to_submission.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def mask_to_submission_strings(image_filename):
    """Reads a single image and outputs the strings that should go into the submission file"""
    img_number = int(re.search(r"\d+", image_filename).group(0))
    im = mpimg.imread(image_filename)
    patch_size = 16
    for j in range(0, im.shape[1], patch_size):
        for i in range(0, im.shape[0], patch_size):
            patch = im[i:i + patch_size, j:j + patch_size]
            label = patch_to_label(patch)
            yield("{:03d}_{}_{},{}".format(img_number, j, i, label))
run_FCN.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_images(images_directory, groundtruths_directory, num_images): 
    #
    #   DESCRIPTION 
    #       Loads each training image and its ground truth and creates tensors [numImages, 400, 400, 3]
    #   
    #   INPUTS 
    #       images_directory path to training images directory  
    #       groundtruths_directory path to the groundtruth images directory
    #       num_images number of images to load 
    #       
    #   OUTPUTS
    #       images, ground_truth two tensors 
    #
    images = []
    ground_truth = [] 
    for i in num_images:
        image_id = "satImage_%.3d" % i
        image_filename = image_id + ".png"
        image_path = images_directory + image_filename;
        groundtruth_image_path = groundtruths_directory + image_filename;

        if ((os.path.isfile(image_path))&(os.path.isfile(groundtruth_image_path))):
            print ('Loading ' + image_filename) 
            loaded_image = mpimg.imread(image_path)
            loaded_gt_image = mpimg.imread(groundtruth_image_path) 

            if ordering == "th":
                loaded_image = np.rollaxis(loaded_image,2) 

            images.append(loaded_image) 
            ground_truth.append(loaded_gt_image)
        else:
            print ('File ' + image_path + ' does not exist')

    return images, ground_truth
run_FCN.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def predict_batch_test_images (model, batch_size = 1, max_image = 50):
    #
    #   DESCRIPTION 
    #       Generator function batching each of the 32 mapped image from one test image 
    #       Once the image have been loaded they are predicted using the specified Keras Model 
    #       Once predicted the generator finally yields the batch to be treated in a for loop in predict_and_rebuild
    #       
    #   INPUTS 
    #       model keras model 
    #       batch_size set to 1 
    #       max_image the max number of image loaded (50 test set)
    #
    #   OUTPUTS
    #       yield predictions a np.array of [:, 400, 400, 1]
    #
    images = np.zeros(shape=[8*4, 400, 400, 3], dtype=float) 

    for i in range(1,max_image+1):
        count = 0
        for rota_count in range(8):
            for patch_count in range(4):
                images[count, :,:,:] = mpimg.imread('test_set_images/test_'+str(i)+'/Test_'+str(i)+'_rota'+str(rota_count)+'_patch'+str(patch_count)+'.png')
                count += 1

        if (count == 32):
            preds = model.predict(images, batch_size = batch_size, verbose=1)
            yield preds
Training_run.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def rotate_training():
    ## Saves rotated versions of each image in the training set
    niter = 10
    k=5
    for i in np.linspace(1,100,100):

        truth = mpimg.imread('training/groundtruth/satImage_'+ '%.3d' % i  +'.png')

        image = mpimg.imread('training/images/satImage_'+ '%.3d' % i  +'.png')



        imgs = mk_rotations(image)
        truths = mk_rotations(truth)

        count =0
        for im in imgs:
            im = format_image(im)
            Image.fromarray(im).save('training_big/Images/satImage_'+ '%.3d' % i  +'_rota'+str(np.int(count))+'.png')
            count+=1
        count =0
        for im in imgs:
            im = format_image(im)
            Image.fromarray(im).save('training_big/Truth/satImage_'+ '%.3d' % i  +'_rota'+str(np.int(count))+'.png')
            count+=1


        print('Writing image ',i)
    return 0

##Generation (or not) of the extended training set
Training_run.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def extract_data(filename, num_images):
    imgs = []
    stars = []
    ridges = []
    print("loading, please wait")
    for i in range(1, num_images+1):
        imageid = 'satImage_'+ '%.3d' % i
        ##Load images
        for j in range(8):
            image_filename = 'training_big/Images/' + imageid + "_rota"+str(np.int(j))+".png"
            if os.path.isfile(image_filename):

                img = mpimg.imread(image_filename)
                n1,n2,n = img.shape

                imgs.append(img.astype(np.float32, copy=False))

            else:
                print ('File ' + image_filename + ' does not exist')

    ##Format images
    num_images = len(imgs)
    IMG_WIDTH = imgs[0].shape[0]
    IMG_HEIGHT = imgs[0].shape[1]
    N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)
    img_patches = [img_crop(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE) for i in range(num_images)]
    data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]
    return np.asarray(data)



# Assign a label to a patch v
VideoTools.py 文件源码 项目:SlidingWindowVideoTDA 作者: ctralie 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loadVideoFolder(foldername):
    N = len(os.listdir(foldername))
    #Assume numbering starts at zero
    f0 = scipy.misc.imread("%s/%i.png"%(foldername, 0))
    IDims = f0.shape
    dim = len(f0.flatten())
    I = np.zeros((N, dim))
    I[0, :] = np.array(f0.flatten(), dtype=np.float32)/255.0
    for i in range(1, N):
        f = scipy.misc.imread("%s/%i.png"%(foldername, i))
        I[i, :] = np.array(f.flatten(), dtype=np.float32)/255.0
    return (I, IDims)

#Output video
#I: PxN video array, IDims: Dimensions of each frame
io_object.py 文件源码 项目:MV3D-Pytorch 作者: dongwoohhh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def load_rgb(data_path):
    im_path = os.path.join(data_path,'image_2','*.png')
    im_files = sorted(glob.glob(im_path))

    im_all = []

    #tic = time.time()
    for iter_files in im_files:            
        if len(im_all)<100 :
            im = np.uint8(mpimg.imread(iter_files) * 255)
            im_all.append(im) 
            print(im.shape)
    return im_all
mnist_a2j_2pickle.py 文件源码 项目:Deep-Learning-Experiments 作者: roatienza 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def readfile(path):
    try:
        data = img.imread(path)
        return data
    except:
        print("Error reading: ", path)
        return np.array([])
mnist_library.py 文件源码 项目:Deep-Learning-Experiments 作者: roatienza 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def readfile(path):
    try:
        data = img.imread(path)
        return data
    except:
        return np.array([])
mnist_library.py 文件源码 项目:Deep-Learning-Experiments 作者: roatienza 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def displayimage(path):
    data = img.imread(path)
    plt.imshow(data)
    plt.show()
    return
tests.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_pre_process_image():
    matched, debug_gt, debug_matched_default_box, dbg_def_boxes ,dbg_cells, dbg_imgs = pre_process_images("tiny/data.pkl",["img00090526.jpg"])
    img = mpimg.imread("tiny/img00090526.jpg")
    debug_draw_boxes(img, dbg_cells["img00090526.jpg"], (255,255,255),1)
    debug_draw_boxes(img, debug_gt["img00090526.jpg"], (255,0,0),1)
    debug_draw_boxes(img, debug_matched_default_box["img00090526.jpg"], (1,255,1),2)
    debug_draw_boxes(img, dbg_def_boxes["img00090526.jpg"], (1,0,255),1)
    imgplot = plt.imshow(img)
    plt.show()
tests.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_process_image_set(dirname):
    matched, debug_gt, debug_matched_default_box, dbg_def_boxes ,dbg_cells,dbg_imgs = pre_process_images(dirname+"/data.pkl")

    for img_name in dbg_imgs:
        print("loading ... ",img_name)
        img = mpimg.imread(img_name)
        # debug_draw_boxes(img, dbg_cells[img_name], (255,255,255),1)
        debug_draw_boxes(img, debug_gt[img_name], (255,0,0),1)
        debug_draw_boxes(img, debug_matched_default_box[img_name], (1,255,1),1)
        # debug_draw_boxes(img, dbg_def_boxes[img_name], (1,0,255),1)
        imgplot = plt.imshow(img)
        plt.show()
train.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _get_image(self, fname):
        """
        Args fname : filename of image

        Returns the image as array with dim (w,h,channels) """
        img = imgs.get(fname)

        if  img == None:
            img = mpimg.imread(fname)
            img = (img-128)/128
            imgs[fname] = img

        return img
inference.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def predict_boxes(self,model_name="trained-model"):
        """ Given a directory containing the dataset and images_path show objects detected for
        a random image.

        Args    - dirname: Name of directory containing meta data of training run.
        Returns - Nothing. Shows the pic with detections.

        """ 
        images_path            = self.cfg.g("images_path")

        train_imgs             = pickle.load(open(self.dirname+"/train.pkl","rb"))

        image_info             = random.choice(list(train_imgs))
        image_name             = image_info['img_name']
        p_conf, p_loc, p_probs = self.run_inference(image_name,model_name)
        non_zero_indices       = np.where(p_conf > 0)[1]

        # DEBUGGING
        print("p_conf={} p_loc={} p_probs={}".format(p_conf.shape,p_loc.shape,p_probs.shape))
        print("Non zero indices",non_zero_indices)
        for i in non_zero_indices:
            print(i,") location",p_loc[0][i*4:i*4+4],"probs", p_probs[0][i],"conf",p_conf[0][i])

        boxes, confs = self.convert_coordinates_to_boxes(p_loc,p_conf,p_probs)
        print("Boxes BEFORE NMS")
        for i,a in enumerate(zip(boxes,confs)):
            print(i,a)

        boxes = non_max_suppression_fast(boxes,0.3)

        print("Boxes AFTER NMS")
        print(boxes)

        img   = mpimg.imread(images_path+"/"+image_name)

        self.debug_draw_boxes(img,boxes,(0,255,0),2)

        plt.figure(figsize=(8,8))
        plt.imshow(img)
        plt.show()


问题


面经


文章

微信
公众号

扫码关注公众号