python类INTER_AREA的实例源码

HOG.py 文件源码 项目:PaintingToArtists 作者: achintyagopal 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def createTrainingInstances(self, images):
        start = time.time()
        hog = cv2.HOGDescriptor()
        instances = []
        for img, label in images:
            # print img
            img = read_color_image(img)
            img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
            descriptor = hog.compute(img)
            if descriptor is None:
                descriptor = []
            else:
                descriptor = descriptor.ravel()
            pairing = Instance(descriptor, label)
            instances.append(pairing)
        end = time.time() - start
        self.training_instances = instances
        print "HOG TRAIN SERIAL: %d images -> %f" % (len(images), end)
HOG.py 文件源码 项目:PaintingToArtists 作者: achintyagopal 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def createTestingInstances(self, images):
        start = time.time()
        hog = cv2.HOGDescriptor()
        instances = []
        for img, label in images:
            # print img
            img = read_color_image(img)
            img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
            descriptor = hog.compute(img)
            if descriptor is None:
                descriptor = []
            else:
                descriptor = descriptor.ravel()
            pairing = Instance(descriptor, label)
            instances.append(pairing)
        end = time.time() - start
        self.testing_instances = instances
        print "HOG TEST SERIAL: %d images -> %f" % (len(images), end)
10-PiStorms_icontracker.py 文件源码 项目:PiStorms 作者: mindsensors 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
caffe_image.py 文件源码 项目:sail 作者: GemHunt 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_whole_rotated_image(crop, mask, angle, crop_size, before_rotate_size, scale):
    #Better for larger:
    #pixels_to_jitter = 35 * scale
    #For Dates:
    pixels_to_jitter = 4 #Old Way

    center_x = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter
    center_y = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter

    rot_image = crop.copy()
    rot_image = rotate(rot_image, angle, center_x, center_y, before_rotate_size, before_rotate_size)
    # This is hard coded for 28x28.
    rot_image = cv2.resize(rot_image, (41, 41), interpolation=cv2.INTER_AREA)
    rot_image = rot_image[6:34, 6:34]

    # rot_image = rot_image * mask
    return rot_image
soja_resize_image.py 文件源码 项目:soja_box 作者: iTaa 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    ??image
    :param img_path: image???
    :param mini_size: ??????
    :param jpeg_quality: jpeg?????
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params])
save_images.py 文件源码 项目:chainer-cyclegan 作者: Aixile 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def resize_to_nearest_aspect_ratio(img, divide_base=4, resize_base=256):
    w, h = img.shape[0], img.shape[1]
    #print(w,h)
    if w < h:
        if resize_base == 0:
            resize_base = w - w % divide_base
        s0 = resize_base
        s1 = int(h * resize_base / w)
        s1 = s1 - s1 % divide_base
    else:
        if resize_base == 0:
            resize_base = h - h % divide_base
        s1 = resize_base
        s0 = int(w * resize_base / h)
        s0 = s0 - s0 % divide_base
    #print(s1,s0)
    return cv2.resize(img, (s1, s0), interpolation=cv2.INTER_AREA)


# Input imgs format: (batch, channels, width, height)
gym_wrapper.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _modify_observation(self, observation):
        # convert color to grayscale using luma component
        observation = (
            observation[:, :, 0] * 0.299 + observation[:, :, 1] * 0.587 +
            observation[:, :, 2] * 0.114
        )

        observation = cv2.resize(
            observation, (84, 110), interpolation=cv2.INTER_AREA
        )
        observation = observation[18:102, :]
        assert observation.shape == (84, 84)

        # convert to values between 0 and 1
        observation = np.array(observation, dtype=np.uint8)

        return observation
alg.py 文件源码 项目:image-segmentation 作者: alexlouden 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def scale(self):

        self.original_image = self.image.copy()

        self.image_height, self.image_width = self.image.shape[:2]

        if max(self.image_width, self.image_height) > MAX_DIMENSION:
            # Need to shrink

            if self.image_width > self.image_height:
                new_width = MAX_DIMENSION
                new_height = int(self.image_height * new_width / self.image_width)
            else:
                new_height = MAX_DIMENSION
                new_width = int(self.image_width * new_height / self.image_height)

            print 'Resizing to {}x{}'.format(new_width, new_height)

            self.image = cv2.resize(self.image, (new_width, new_height), interpolation=cv2.INTER_AREA)
            self.image_height, self.image_width = self.image.shape[:2]
detect_util.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def imresample(img, sz):
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
    return im_data

    # This method is kept for debugging purpose
#     h=img.shape[0]
#     w=img.shape[1]
#     hs, ws = sz
#     dx = float(w) / ws
#     dy = float(h) / hs
#     im_data = np.zeros((hs,ws,3))
#     for a1 in range(0,hs):
#         for a2 in range(0,ws):
#             for a3 in range(0,3):
#                 im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
#     return im_data
ImageSlicer.py 文件源码 项目:Pedestrian-Recognition 作者: yugrocks 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,img):

        #making two copies of the same image
        original_img = np.array(img)
        new_img = np.array(img)

        #resizing keeping the aspect ratio constant
        a_ratio = new_img.shape[0]/new_img.shape[1]
        #new_row=int(new_img.shape[0])
        new_row = 128
        new_colm = int(new_row/a_ratio)
        new_img = cv2.resize(new_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
        original_img = cv2.resize(original_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
        #convert new_one to grayscale
        new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)


        self.original_img = original_img
        self.new_img = new_img
detect_face.py 文件源码 项目:faceNet_RealTime 作者: jack55436001 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def imresample(img, sz):
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
    return im_data

    # This method is kept for debugging purpose
#     h=img.shape[0]
#     w=img.shape[1]
#     hs, ws = sz
#     dx = float(w) / ws
#     dy = float(h) / hs
#     im_data = np.zeros((hs,ws,3))
#     for a1 in range(0,hs):
#         for a2 in range(0,ws):
#             for a3 in range(0,3):
#                 im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
#     return im_data
img_utils.py 文件源码 项目:kaggle-carvana 作者: ematvey 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def output_resized_mask():
    import pandas as pd
    os.makedirs('output/resize', exist_ok=True)
    imgs = [0, 10, 20, 30]
    df = pd.read_csv(TRAIN_INDEX)

    for i in imgs:
        fns = df.iloc[i]
        img_fn = fns['img']
        mask_fn = fns['mask']
        print('mask_fn', mask_fn)
        mask = carvana_pad_to_std(np.load(mask_fn))

        for downsample in [1.0, 1.5, 2.0, 4.0]:
            h = int(1280 / downsample)
            w = int(1920 / downsample)
            out_fn = os.path.join('output/resize/{}_{}x{}.png'.format(i, w, h))

            print(mask.shape)
            print((h, w))
            m = cv2.resize(mask, dsize=(w, h), interpolation=cv2.INTER_AREA)
            print(m.shape)

            draw_mask(out_fn, img_fn, mask_fn, m)
detect_face.py 文件源码 项目:icyface_api 作者: bupticybee 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def imresample(img, sz):
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
    return im_data

    # This method is kept for debugging purpose
#     h=img.shape[0]
#     w=img.shape[1]
#     hs, ws = sz
#     dx = float(w) / ws
#     dy = float(h) / hs
#     im_data = np.zeros((hs,ws,3))
#     for a1 in range(0,hs):
#         for a2 in range(0,ws):
#             for a3 in range(0,3):
#                 im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
#     return im_data
data_feeder.py 文件源码 项目:tf-lcnn 作者: ildoonet 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_mnist_data(is_train, image_size, batchsize):
    ds = MNISTCh('train' if is_train else 'test', shuffle=True)

    if is_train:
        augs = [
            imgaug.RandomApplyAug(imgaug.RandomResize((0.8, 1.2), (0.8, 1.2)), 0.3),
            imgaug.RandomApplyAug(imgaug.RotationAndCropValid(15), 0.5),
            imgaug.RandomApplyAug(imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01), 0.25),
            imgaug.Resize((224, 224), cv2.INTER_AREA)
        ]
        ds = AugmentImageComponent(ds, augs)
        ds = PrefetchData(ds, 128*10, multiprocessing.cpu_count())
        ds = BatchData(ds, batchsize)
        ds = PrefetchData(ds, 256, 4)
    else:
        # no augmentation, only resizing
        augs = [
            imgaug.Resize((image_size, image_size), cv2.INTER_CUBIC),
        ]
        ds = AugmentImageComponent(ds, augs)
        ds = BatchData(ds, batchsize)
        ds = PrefetchData(ds, 20, 2)
    return ds
pose_dataset.py 文件源码 项目:tf-openpose 作者: ildoonet 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_heatmap(self, target_size):
        heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width))

        for joints in self.joint_list:
            for idx, point in enumerate(joints):
                if point[0] < 0 or point[1] < 0:
                    continue
                CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma)

        heatmap = heatmap.transpose((1, 2, 0))

        # background
        heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0)

        if target_size:
            heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA)

        return heatmap
polarTransform.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def linearToPolar(img, center=None,
                  final_radius=None,
                  initial_radius=None,
                  phase_width=None,
                  interpolation=cv2.INTER_AREA, maps=None,
                  borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
    '''
    map a 2d (x,y) Cartesian array to a polar (r, phi) array
    using opencv.remap
    '''
    if maps is None:
        mapY, mapX = linearToPolarMaps(img.shape[:2], center, final_radius,
                                       initial_radius, phase_width)
    else:
        mapY, mapX = maps

    o = {'interpolation': interpolation,
         'borderValue': borderValue,
         'borderMode': borderMode}
    o.update(opts)

    return cv2.remap(img, mapY, mapX, **o)
polarTransform.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def polarToLinear(img, shape=None, center=None, maps=None,
                  interpolation=cv2.INTER_AREA,
                  borderValue=0, borderMode=cv2.BORDER_REFLECT, **opts):
    '''
    map a 2d polar (r, phi) polar array to a  Cartesian (x,y) array
    using opencv.remap
    '''

    if maps is None:
        mapY, mapX = polarToLinearMaps(img.shape[:2], shape, center)
    else:
        mapY, mapX = maps

    o = {'interpolation': interpolation,
         'borderValue': borderValue,
         'borderMode': borderMode}
    o.update(opts)

    return cv2.remap(img, mapY, mapX, **o)
canny_edge_histogram.py 文件源码 项目:opencv_edge_detection 作者: tasdikrahman 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def resize_to_screen(src, maxw=1380, maxh=600, copy=False):

    height, width = src.shape[:2]

    scl_x = float(width)/maxw
    scl_y = float(height)/maxh

    scl = int(np.ceil(max(scl_x, scl_y)))

    if scl > 1.0:
        inv_scl = 1.0/scl
        img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)
    elif copy:
        img = src.copy()
    else:
        img = src

    return img
laplace_and_sobel.py 文件源码 项目:opencv_edge_detection 作者: tasdikrahman 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def resize_to_screen(src, maxw=1280, maxh=700, copy=False):

    height, width = src.shape[:2]

    scl_x = float(width)/maxw
    scl_y = float(height)/maxh

    scl = int(np.ceil(max(scl_x, scl_y)))

    if scl > 1.0:
        inv_scl = 1.0/scl
        img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)
    elif copy:
        img = src.copy()
    else:
        img = src

    return img
canny_edge.py 文件源码 项目:opencv_edge_detection 作者: tasdikrahman 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def resize_to_screen(src, maxw=1380, maxh=600, copy=False):

    height, width = src.shape[:2]

    scl_x = float(width)/maxw
    scl_y = float(height)/maxh

    scl = int(np.ceil(max(scl_x, scl_y)))

    if scl > 1.0:
        inv_scl = 1.0/scl
        img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)
    elif copy:
        img = src.copy()
    else:
        img = src

    return img
laplace_and_sobel_histogram.py 文件源码 项目:opencv_edge_detection 作者: tasdikrahman 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def resize_to_screen(src, maxw=1280, maxh=700, copy=False):

    height, width = src.shape[:2]

    scl_x = float(width)/maxw
    scl_y = float(height)/maxh

    scl = int(np.ceil(max(scl_x, scl_y)))

    if scl > 1.0:
        inv_scl = 1.0/scl
        img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)
    elif copy:
        img = src.copy()
    else:
        img = src

    return img
robot_recorder.py 文件源码 项目:visual_mpc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def crop_lowres(self, cv_image):
        self.ltob.d_img_raw_npy = np.asarray(cv_image)
        if self.instance_type == 'main':
            img = cv2.resize(cv_image, (0, 0), fx=1 / 16., fy=1 / 16., interpolation=cv2.INTER_AREA)
            startrow = 3
            startcol = 27

            img = imutils.rotate_bound(img, 180)
        else:
            img = cv2.resize(cv_image, (0, 0), fx=1 / 15., fy=1 / 15., interpolation=cv2.INTER_AREA)
            startrow = 2
            startcol = 27
        endcol = startcol + 64
        endrow = startrow + 64

        # crop image:
        img = img[startrow:endrow, startcol:endcol]
        assert img.shape == (64,64,3)
        return img
detect_face.py 文件源码 项目:real_time_face_recognition 作者: shanren7 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def imresample(img, sz):
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #pylint: disable=no-member
    return im_data

    # This method is kept for debugging purpose
#     h=img.shape[0]
#     w=img.shape[1]
#     hs, ws = sz
#     dx = float(w) / ws
#     dy = float(h) / hs
#     im_data = np.zeros((hs,ws,3))
#     for a1 in range(0,hs):
#         for a2 in range(0,ws):
#             for a3 in range(0,3):
#                 im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
#     return im_data
utils.py 文件源码 项目:faststyle 作者: ghwatson 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def imresize(img, scale):
    """Depending on if we scale the image up or down, we use an interpolation
    technique as per OpenCV recommendation.

    :param img:
        3D numpy array of image.
    :param scale:
        float to scale image by in both axes.
    """
    if scale > 1.0:  # use cubic interpolation for upscale.
        img = cv2.resize(img, None, interpolation=cv2.INTER_CUBIC,
                         fx=scale, fy=scale)
    elif scale < 1.0:  # area relation sampling for downscale.
        img = cv2.resize(img, None, interpolation=cv2.INTER_AREA,
                         fx=scale, fy=scale)
    return img
detect_face.py 文件源码 项目:face 作者: xpzouying 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def imresample(img, sz):
    im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
    return im_data

    # This method is kept for debugging purpose
#     h=img.shape[0]
#     w=img.shape[1]
#     hs, ws = sz
#     dx = float(w) / ws
#     dy = float(h) / hs
#     im_data = np.zeros((hs,ws,3))
#     for a1 in range(0,hs):
#         for a2 in range(0,ws):
#             for a3 in range(0,3):
#                 im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
#     return im_data
utils_tests.py 文件源码 项目:object-detector 作者: penny4860 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_crop_bb():
    # Given one sample image and the following parameters
    image = helpers.get_one_sample_image()
    parameters = {"bb" : (0, 10, 10, 20),
        "pad" : 2,
        "desired_size" : (6,6),
    }

    # When perform crop_bb()
    patch = utils.crop_bb(image, bb=parameters["bb"], padding=parameters["pad"], dst_size=parameters["desired_size"])

    # Then it should be same with manually cropped one.
    bb = parameters["bb"]
    pad = parameters["pad"]
    desired_size = parameters["desired_size"]
    crop_manual = image[max(bb[0],bb[0]-pad) : min(image.shape[0],bb[1]+pad), max(bb[2],bb[2]-pad) : min(image.shape[1],bb[3]+pad)]
    crop_manual = cv2.resize(crop_manual, desired_size, interpolation=cv2.INTER_AREA)
    assert patch.all() == crop_manual.all(), "utils.crop_bb() unit test failed!!"
page_dewarp.py 文件源码 项目:page_dewarp 作者: mzucker 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def resize_to_screen(src, maxw=1280, maxh=700, copy=False):

    height, width = src.shape[:2]

    scl_x = float(width)/maxw
    scl_y = float(height)/maxh

    scl = int(np.ceil(max(scl_x, scl_y)))

    if scl > 1.0:
        inv_scl = 1.0/scl
        img = cv2.resize(src, (0, 0), None, inv_scl, inv_scl, cv2.INTER_AREA)
    elif copy:
        img = src.copy()
    else:
        img = src

    return img
Capture.py 文件源码 项目:GidroGraf-Sirius 作者: alf3r 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def CalculateDim(data, V, c, screen_width, screen_height, datarate):
    n_points = data.shape[1]
    m_lines  = data.shape[0]

    time0 = n_points / datarate
    Ltotal = points2range(n_points, datarate, c)

    Lpx   = Ltotal / n_points
    Hpx   = V * time0 / 2

    Htotal = Hpx * m_lines

    scale = Hpx / Lpx

    if screen_width == -1:
        screen_width = round(screen_height / scale)
    elif screen_height == -1:
        screen_height = round(screen_width * scale)

    dim = (screen_width,screen_height)
    # data = cv2.resize(data, dim, interpolation=cv2.INTER_AREA)
    return dim
simplifier2.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def read_img(path, s_size):
    image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if image1.shape[0] < image1.shape[1]:
        s0 = s_size
        s1 = int(image1.shape[1] * (s_size / image1.shape[0]))
        s1 = s1 - s1 % 16
    else:
        s1 = s_size
        s0 = int(image1.shape[0] * (s_size / image1.shape[1]))
        s0 = s0 - s0 % 16

    image1 = np.asarray(image1, np.float32)
    image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA)

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    return image1.transpose(2, 0, 1), False
simplifier1.py 文件源码 项目:SketchSimplification 作者: La4La 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def read_img(path, s_size):
    image1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    if image1.shape[0] < image1.shape[1]:
        s0 = s_size
        s1 = int(image1.shape[1] * (s_size / image1.shape[0]))
        s1 = s1 - s1 % 16
    else:
        s1 = s_size
        s0 = int(image1.shape[0] * (s_size / image1.shape[1]))
        s0 = s0 - s0 % 16

    image1 = np.asarray(image1, np.float32)
    image1 = cv2.resize(image1, (s1, s0), interpolation=cv2.INTER_AREA)

    if image1.ndim == 2:
        image1 = image1[:, :, np.newaxis]

    return image1.transpose(2, 0, 1), False


问题


面经


文章

微信
公众号

扫码关注公众号