python类BORDER_CONSTANT的实例源码

page.py 文件源码 项目:doc2text 作者: jlsutherland 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def rotate(image, theta):
    (h, w) = image.shape[:2]
    center = (w / 2, h / 2)
    M = cv2.getRotationMatrix2D(center, theta, 1)
    rotated = cv2.warpAffine(image, M, (int(w), int(h)), cv2.INTER_LINEAR,
                             borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
    return rotated
pose_augment.py 文件源码 项目:tf-openpose 作者: ildoonet 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def pose_rotation(meta):
    deg = random.uniform(-40.0, 40.0)
    img = meta.img

    center = (img.shape[1] * 0.5, img.shape[0] * 0.5)
    rot_m = cv2.getRotationMatrix2D((center[0] - 0.5, center[1] - 0.5), deg, 1)
    ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
    if img.ndim == 3 and ret.ndim == 2:
        ret = ret[:, :, np.newaxis]
    neww, newh = RotationAndCropValid.largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
    neww = min(neww, ret.shape[1])
    newh = min(newh, ret.shape[0])
    newx = int(center[0] - neww * 0.5)
    newy = int(center[1] - newh * 0.5)
    # print(ret.shape, deg, newx, newy, neww, newh)
    img = ret[newy:newy + newh, newx:newx + neww]

    # adjust meta data
    adjust_joint_list = []
    for joint in meta.joint_list:
        adjust_joint = []
        for point in joint:
            if point[0] < -100 or point[1] < -100:
                adjust_joint.append((-1000, -1000))
                continue
            # if point[0] <= 0 or point[1] <= 0:
            #     adjust_joint.append((-1, -1))
            #     continue
            x, y = _rotate_coord((meta.width, meta.height), (newx, newy), point, deg)
            adjust_joint.append((x, y))
        adjust_joint_list.append(adjust_joint)

    meta.joint_list = adjust_joint_list
    meta.width, meta.height = neww, newh
    meta.img = img

    return meta
LensDistortion.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def correct(self, image, keepSize=False, borderValue=0):
        '''
        remove lens distortion from given image
        '''
        image = imread(image)
        (h, w) = image.shape[:2]
        mapx, mapy = self.getUndistortRectifyMap(w, h)
        self.img = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
                             borderMode=cv2.BORDER_CONSTANT,
                             borderValue=borderValue
                             )
        if not keepSize:
            xx, yy, ww, hh = self.roi
            self.img = self.img[yy: yy + hh, xx: xx + ww]
        return self.img
datasets.py 文件源码 项目:Pytorch-Deeplab 作者: speedinghzl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]
        if self.scale:
            image, label = self.generate_scale_label(image, label)
        image = np.asarray(image, np.float32)
        image -= self.mean
        img_h, img_w = label.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
            label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT,
                value=(self.ignore_label,))
        else:
            img_pad, label_pad = image, label

        img_h, img_w = label_pad.shape
        h_off = random.randint(0, img_h - self.crop_h)
        w_off = random.randint(0, img_w - self.crop_w)
        # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
        image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        #image = image[:, :, ::-1]  # change to BGR
        image = image.transpose((2, 0, 1))
        if self.is_mirror:
            flip = np.random.choice(2) * 2 - 1
            image = image[:, :, ::flip]
            label = label[:, ::flip]

        return image.copy(), label.copy(), np.array(size), name
datasets.py 文件源码 项目:Pytorch-Deeplab 作者: speedinghzl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        datafiles = self.files[index]
        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        size = image.shape
        name = osp.splitext(osp.basename(datafiles["img"]))[0]
        image = np.asarray(image, np.float32)
        image -= self.mean

        img_h, img_w, _ = image.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            image = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
        image = image.transpose((2, 0, 1))
        return image, name, size
__init__.py 文件源码 项目:Pytorch-Deeplab 作者: speedinghzl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __getitem__(self, index):
        datafiles = self.files[index]

        image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
        label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
        size = image.shape
        name = datafiles["name"]

        if self.scale:
            image, label = self.generate_scale_label(image, label)

        image = np.asarray(image, np.float32)
        image -= self.mean
        img_h, img_w = label.shape
        pad_h = max(self.crop_h - img_h, 0)
        pad_w = max(self.crop_w - img_w, 0)
        if pad_h > 0 or pad_w > 0:
            img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT, 
                value=(0.0, 0.0, 0.0))
            label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 
                pad_w, cv2.BORDER_CONSTANT,
                value=(self.ignore_label,))
        else:
            img_pad, label_pad = image, label
        img_h, img_w = label_pad.shape

        h_off = random.randint(0, img_h - self.crop_h)
        w_off = random.randint(0, img_w - self.crop_w)

        # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
        image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
        #image = image[:, :, ::-1]  # change to BGR
        image = image.transpose((2, 0, 1))
        if self.is_mirror:
            flip = np.random.choice(2) * 2 - 1
            image = image[:, :, ::flip]
            label = label[:, ::flip]

        return image.copy(), label.copy(), np.array(size), name
image_processor.py 文件源码 项目:2017-robot 作者: frc1418 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def threshold(self, img):
        cv2.cvtColor(img, cv2.COLOR_BGR2HSV, dst=self.hsv)
        cv2.inRange(self.hsv, self.thresh_low, self.thresh_high, dst=self.bin)

        cv2.morphologyEx(self.bin, cv2.MORPH_CLOSE, self.morphKernel, dst=self.bin2, iterations=1)

        if self.draw_thresh:
            b = (self.bin2 != 0)
            cv2.copyMakeBorder(self.black, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=self.RED, dst=self.out)
            self.out[np.dstack((b, b, b))] = 255

        return self.bin2
generator.py 文件源码 项目:DeepPoseComparison 作者: ynaka81 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _pad_image(self, image, joint):
        height, width, _ = image.shape
        shape = np.array((width, height))
        residual = (self.image_size - shape).clip(0, self.image_size)
        left, top = residual/2
        right, bottom = residual - residual/2
        padded_image = cv2.copyMakeBorder(
            image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0)
        moved_joint = joint + (left, top, 0)
        return padded_image, moved_joint
transforms.py 文件源码 项目:GulpIO 作者: TwentyBN 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, size, padding=0, pad_method=cv2.BORDER_CONSTANT):
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = size
        self.padding = padding
        self.pad_method = pad_method
transforms.py 文件源码 项目:GulpIO 作者: TwentyBN 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, size, padding=0, pad_method=cv2.BORDER_CONSTANT):
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = size
        self.padding = padding
        self.pad_method = pad_method
train.py 文件源码 项目:unet-tensorflow 作者: timctho 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
psuedo_label_dataset_generator.py 文件源码 项目:unet-tensorflow 作者: timctho 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask_w, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
dehaze_pro.py 文件源码 项目:DeHaze 作者: XierHacker 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def getDarkMap(img,pitchSize=9):
    darkMap=np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
    #padding,and darkMap has the same shape with the img
   # print ("shape of darkmap",darkMap.shape[0],darkMap.shape[1])
    padSize=(pitchSize-1)//2
    #print ("type of pitchsize",type(padSize))
    pad=cv2.copyMakeBorder(img,padSize,padSize,padSize,padSize,cv2.BORDER_CONSTANT,value=255)
    for i in range(darkMap.shape[0]):
        for j in range(darkMap.shape[1]):
            darkMap[i,j]=pad[i:i+pitchSize,j:j+pitchSize].min()

    return pad,darkMap
dehaze.py 文件源码 项目:DeHaze 作者: XierHacker 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def getDarkMap(img,pitchSize=9):
    darkMap=np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
    #padding,and darkMap has the same shape with the img
   # print ("shape of darkmap",darkMap.shape[0],darkMap.shape[1])
    padSize=(pitchSize-1)//2
    #print ("type of pitchsize",type(padSize))
    pad=cv2.copyMakeBorder(img,padSize,padSize,padSize,padSize,cv2.BORDER_CONSTANT,value=255)
    for i in range(darkMap.shape[0]):
        for j in range(darkMap.shape[1]):
            darkMap[i,j]=pad[i:i+pitchSize,j:j+pitchSize].min()

    return pad,darkMap
handdetector.py 文件源码 项目:deep-prior-pp 作者: moberweger 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def rotateHand(self, dpt, cube, com, rot, joints3D, pad_value=0):
        """
        Rotate hand virtually in the image plane by a given angle
        :param dpt: cropped depth image with different CoM
        :param cube: metric cube of size (sx,sy,sz)
        :param com: original center of mass, in image coordinates (x,y,z)
        :param rot: rotation angle in deg
        :param joints3D: original joint coordinates, in 3D coordinates (x,y,z)
        :param pad_value: value of padding
        :return: adjusted image, new 3D joint coordinates, rotation angle in XXX
        """

        # if rot is 0, nothing to do
        if numpy.allclose(rot, 0.):
            return dpt, joints3D, rot

        rot = numpy.mod(rot, 360)

        M = cv2.getRotationMatrix2D((dpt.shape[1]//2, dpt.shape[0]//2), -rot, 1)
        new_dpt = cv2.warpAffine(dpt, M, (dpt.shape[1], dpt.shape[0]), flags=cv2.INTER_NEAREST,
                                 borderMode=cv2.BORDER_CONSTANT, borderValue=pad_value)

        com3D = self.importer.jointImgTo3D(com)
        joint_2D = self.importer.joints3DToImg(joints3D + com3D)
        data_2D = numpy.zeros_like(joint_2D)
        for k in xrange(data_2D.shape[0]):
            data_2D[k] = rotatePoint2D(joint_2D[k], com[0:2], rot)
        new_joints3D = (self.importer.jointsImgTo3D(data_2D) - com3D)

        return new_dpt, new_joints3D, rot
rescale_photos.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def pad_height(image):
    height, width, _ = image.shape
    border_x = 0
    border_y = int(round(1080-height)/2.)
    image = cv2.copyMakeBorder(image, border_y, border_y,
                               border_x, border_x,
                               cv2.BORDER_CONSTANT, value=[0,0,0,0])
    return image
rescale_photos.py 文件源码 项目:eclipse2017 作者: google 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pad_width(image):
    height, width, _ = image.shape
    border_x = int(round(1920-width)/2.)
    border_y = 0
    image = cv2.copyMakeBorder(image, border_y, border_y,
                               border_x, border_x,
                               cv2.BORDER_CONSTANT, value=[0,0,0,0])
    return image
helpers.py 文件源码 项目:opencv-helpers 作者: abarrak 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def frame(image, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, color=[255, 0, 0]):
  '''
  add borders around :image:
  :param image: has to be in RBG color scheme. Use `convert_to_rgb` if it's in opencv BGR scheme.
  :param color: array representing an RGB color.
  :param borderType: Other options are:
                                    cv.BORDER_REFLECT,
                                    cv.BORDER_REFLECT_101,
                                    cv.BORDER_DEFAULT,
                                    cv.BORDER_REPLICATE,
                                    cv.BORDER_WRAP
  '''
  return cv.copyMakeBorder(image, top, bottom, left, right, borderType, value=color)
motion_blur.py 文件源码 项目:CVtools 作者: Tyler-D 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def MotionBlur(img, steps):
    '''
    Parameters:
    -----------
    img: Ndarray. CV loaded image
    steps: tuple(step_x, step_y). Motion Velocity

    Return:
    -------
    img: Ndarray. Blurred image
    '''
    BLACK=[0,0,0]
    img_height, img_width, channel= img.shape
    step_x , step_y = steps

    hori = abs(step_x)
    vert = abs(step_y)
    img_mb = cv.copyMakeBorder(img, vert, vert, hori, hori, cv.BORDER_CONSTANT, value=BLACK)

    img_mask = np.zeros((img_height,img_width, channel))
    if step_x!=0 :
        sign_x = step_x / hori
        for x in xrange(0, hori):
            img_mask += img_mb[vert : vert+img_height, hori-x*sign_x : hori+img_width-x*sign_x]

    if step_y!=0 :
        sign_y = step_y / vert
        for y in xrange(0, vert):
            img_mask += img_mb[vert-y*sign_y : vert+img_height-y*sign_y, hori:hori+img_width]

    img_mask /= (hori+vert)
    return img_mask
bbox_labeling.py 文件源码 项目:dlcv_for_beginners 作者: frombeijingwithlove 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _draw_bbox(self, img):

        h, w = img.shape[:2]
        canvas = cv2.copyMakeBorder(img, 0, BAR_HEIGHT, 0, 0, cv2.BORDER_CONSTANT, value=COLOR_GRAY)

        label_msg = '{}: {}, {}'.format(self._cur_label, self._pt0, self._pt1) \
            if self._drawing \
            else 'Current label: {}'.format(self._cur_label)
        msg = '{}/{}: {} | {}'.format(self._index + 1, len(self._filelist), self._filelist[self._index], label_msg)

        cv2.putText(canvas, msg, (1, h+12),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (0, 0, 0), 1)
        for label, (bpt0, bpt1) in self._bboxes:
            label_color = self.label_colors[label] if label in self.label_colors else COLOR_GRAY
            cv2.rectangle(canvas, bpt0, bpt1, label_color, thickness=2)
            cv2.putText(canvas, label, (bpt0[0]+3, bpt0[1]+15),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, label_color, 2)
        if self._drawing:
            label_color = self.label_colors[self._cur_label] if self._cur_label in self.label_colors else COLOR_GRAY
            if self._pt1[0] >= self._pt0[0] and self._pt1[1] >= self._pt0[1]:
                cv2.rectangle(canvas, self._pt0, self._pt1, label_color, thickness=2)
            cv2.putText(canvas, self._cur_label, (self._pt0[0] + 3, self._pt0[1] + 15),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, label_color, 2)
        return canvas


问题


面经


文章

微信
公众号

扫码关注公众号