def warp_image(img, tM, shape):
out = np.zeros(shape, dtype=img.dtype)
# cv2.warpAffine(img,
# tM[:2],
# (shape[1], shape[0]),
# dst=out,
# borderMode=cv2.BORDER_TRANSPARENT,
# flags=cv2.WARP_INVERSE_MAP)
cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return out
# TODO: Modify this method to get a better face contour mask
python类BORDER_TRANSPARENT的实例源码
def warp_im(self,im, M, dshape):
'''
????????
'''
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.ones(dshape, dtype=im.dtype)*0
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,M[:2],(dshape[1], dshape[0]),dst=output_im,borderMode=cv2.BORDER_TRANSPARENT,flags=cv2.WARP_INVERSE_MAP)
return output_im
faceswap.py 文件源码
项目:Automatic_Group_Photography_Enhancement
作者: Yuliang-Zou
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = np.ones(dshape, dtype=im.dtype)*255
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
"""
Affine transformation with matrix M to dshape.
"""
output_im = numpy.zeros(dshape, dtype=im.dtype) # zero matrix
cv2.warpAffine(im,
M[:2], # shape of M
(dshape[1], dshape[0]),
dst = output_im,
borderMode = cv2.BORDER_TRANSPARENT,
flags = cv2.WARP_INVERSE_MAP)
return output_im
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def __next__(self):
# THE IMAGED OBJECT WILL BE AVERAGED FROM ALL
# INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS:
obj = MaskedMovingAverage(shape=self.obj_shape)
with np.errstate(divide='ignore', invalid='ignore'):
for f, h in zip(self.fits, self.Hinvs):
warpedflatField = cv2.warpPerspective(self.flatField,
h, (f.shape[1], f.shape[0]))
obj.update(f / warpedflatField, warpedflatField != 0)
self.object = obj.avg
# THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT
# BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT
sh = self.flatField.shape
s = MaskedMovingAverage(shape=sh)
for f, mask, h in zip(self.fits, self._fit_masks, self.Hs):
div = f / self.object
# ->do not interpolate between background and image border
div[mask] = np.nan
div = cv2.warpPerspective(div, h, (sh[1], sh[0]), # borderMode=cv2.BORDER_TRANSPARENT
)
div = np.nan_to_num(div)
s.update(div, div != 0)
new_flatField = s.avg
# STOP ITERATION?
# RMSE excluding NaNs:
dev = np.nanmean((new_flatField[::10, ::10] -
self.flatField[::10, ::10])**2)**0.5
print('residuum: %s' % dev)
if self.n >= self.maxIter or (self._last_dev and (
(self.n > 4 and dev > self._last_dev) or
dev < self.maxDev)):
raise StopIteration
# remove erroneous values:
self.flatField = np.clip(new_flatField, 0, 1)
self.n += 1
self._last_dev = dev
return self.n