python类BORDER_TRANSPARENT的实例源码

main.py 文件源码 项目:FaceSwap 作者: Aravind-Suresh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask
faceswapper.py 文件源码 项目:FaceSwapper 作者: QuantumLiu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def warp_im(self,im, M, dshape):
        '''
        ????????
        '''
        output_im = np.zeros(dshape, dtype=im.dtype)
        cv2.warpAffine(im,
                       M[:2],
                       (dshape[1], dshape[0]),
                       dst=output_im,
                       borderMode=cv2.BORDER_TRANSPARENT,
                       flags=cv2.WARP_INVERSE_MAP)
        return output_im
aligner.py 文件源码 项目:masks-and-hats 作者: leoneckert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
apply_mask.py 文件源码 项目:masks-and-hats 作者: leoneckert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.ones(dshape, dtype=im.dtype)*0
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
face_align2.py 文件源码 项目:Kutils 作者: ishank26 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,M[:2],(dshape[1], dshape[0]),dst=output_im,borderMode=cv2.BORDER_TRANSPARENT,flags=cv2.WARP_INVERSE_MAP)
    return output_im
faceswap.py 文件源码 项目:Automatic_Group_Photography_Enhancement 作者: Yuliang-Zou 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
align.py 文件源码 项目:photo-a-day-aligner 作者: matthewearl 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
aligner.py 文件源码 项目:mask-generator 作者: antiboredom 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
face_swap.py 文件源码 项目:faceSwapPython 作者: arijitx 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
basics2.py 文件源码 项目:facemash-workshop 作者: leoneckert 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
facemash.py 文件源码 项目:facemash-workshop 作者: leoneckert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = np.ones(dshape, dtype=im.dtype)*255
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
FaceDetect.py 文件源码 项目:SwitchFace 作者: messcode 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    """
    Affine transformation with matrix M to dshape.
    """
    output_im = numpy.zeros(dshape, dtype=im.dtype)  # zero matrix
    cv2.warpAffine(im,
                   M[:2], # shape of M
                   (dshape[1], dshape[0]),
                   dst = output_im,
                   borderMode = cv2.BORDER_TRANSPARENT,
                   flags = cv2.WARP_INVERSE_MAP)
    return output_im
faceswap.py 文件源码 项目:deezer-album-face-swap 作者: xbenji 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
vignettingFromRandomSteps.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __next__(self):
        # THE IMAGED OBJECT WILL BE AVERAGED FROM ALL
        # INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS:
        obj = MaskedMovingAverage(shape=self.obj_shape)

        with np.errstate(divide='ignore', invalid='ignore'):
            for f, h in zip(self.fits, self.Hinvs):
                warpedflatField = cv2.warpPerspective(self.flatField,
                                                      h, (f.shape[1], f.shape[0]))
                obj.update(f / warpedflatField, warpedflatField != 0)

        self.object = obj.avg

        # THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT
        # BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT
        sh = self.flatField.shape
        s = MaskedMovingAverage(shape=sh)

        for f, mask, h in zip(self.fits, self._fit_masks, self.Hs):
            div = f / self.object
            # ->do not interpolate between background and image border
            div[mask] = np.nan
            div = cv2.warpPerspective(div, h, (sh[1], sh[0]),  # borderMode=cv2.BORDER_TRANSPARENT
                                      )
            div = np.nan_to_num(div)
            s.update(div, div != 0)

        new_flatField = s.avg

        # STOP ITERATION?
        # RMSE excluding NaNs:
        dev = np.nanmean((new_flatField[::10, ::10] -
                          self.flatField[::10, ::10])**2)**0.5
        print('residuum: %s' % dev)
        if self.n >= self.maxIter or (self._last_dev and (
                (self.n > 4 and dev > self._last_dev) or
                dev < self.maxDev)):
            raise StopIteration

        # remove erroneous values:
        self.flatField = np.clip(new_flatField, 0, 1)

        self.n += 1
        self._last_dev = dev
        return self.n


问题


面经


文章

微信
公众号

扫码关注公众号