def _get_alignment(im_ref, im_to_align, key):
if key is not None:
cached_path = Path('align_cache').joinpath('{}.alignment'.format(key))
if cached_path.exists():
with cached_path.open('rb') as f:
return pickle.load(f)
logger.info('Getting alignment for {}'.format(key))
warp_mode = cv2.MOTION_TRANSLATION
warp_matrix = np.eye(2, 3, dtype=np.float32)
criteria = (
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-8)
cc, warp_matrix = cv2.findTransformECC(
im_ref, im_to_align, warp_matrix, warp_mode, criteria)
if key is not None:
with cached_path.open('wb') as f:
pickle.dump((cc, warp_matrix), f)
logger.info('Got alignment for {} with cc {:.3f}: {}'
.format(key, cc, str(warp_matrix).replace('\n', '')))
return cc, warp_matrix
python类findTransformECC()的实例源码
def affine(self):
warp_mode = cv2.MOTION_HOMOGRAPHY
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-10)
warp_matrix = np.eye(3, 3, dtype=np.float32)
while True:
try:
if self.ret[0] is not None and self.client[0].img is not None:
master_cam_grey = cv2.cvtColor(self.client[0].img, cv2.COLOR_BGR2GRAY)
else:
print("Image was none!")
for i in range(1,self.cams):
if self.ret[i] is not None:
print("Trying to calibrate")
slave_cam = cv2.cvtColor(self.client[i].img, cv2.COLOR_BGR2GRAY)
try:
(cc, warp_matrix) = cv2.findTransformECC (self.get_gradient(master_cam_grey), self.get_gradient(slave_cam),warp_matrix, warp_mode, criteria)
except Exception as e:
print(e)
print(warp_matrix)
else:
print("Image was none")
ti.sleep(5);
except:
ti.sleep(1)
def stackImagesECC(file_list):
M = np.eye(3, 3, dtype=np.float32)
first_image = None
stacked_image = None
for file in file_list:
image = cv2.imread(file,1).astype(np.float32) / 255
print(file)
if first_image is None:
# convert to gray scale floating point image
first_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
stacked_image = image
else:
# Estimate perspective transform
s, M = cv2.findTransformECC(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), first_image, M, cv2.MOTION_HOMOGRAPHY)
w, h, _ = image.shape
# Align image to first image
image = cv2.warpPerspective(image, M, (h, w))
stacked_image += image
stacked_image /= len(file_list)
stacked_image = (stacked_image*255).astype(np.uint8)
return stacked_image
# Align and stack images by matching ORB keypoints
# Faster but less accurate
def translationalThermalReg(im1,im2):
import cv2,numpy
#get dimensions
s1=im1.shape
s2=im2.shape
#check sizes agree as a sanity check for inputs
if s1!=s2:
raise TypeError('Array Inputs are of different sizes!')
#Select translation model in CV
warp_model = cv2.MOTION_AFFINE
#Define 2x3 Warp Matrix
warp_matrix = numpy.eye(2, 3, dtype=numpy.float32)
#Number of iterations allowed to converge on solution
num_it=10000
#Terminal Threshold
termTh = 1e-9
#Define Stopping Criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, num_it, termTh)
#Ensure images are of datatype float32 (for compatibility with transformation convergence)
im1=im1.astype(numpy.float32)
im2=im2.astype(numpy.float32)
#Find Ideal Transform given input parameters
(cc, warp_matrix) = cv2.findTransformECC(im1,im2,warp_matrix, warp_model, criteria)
#Apply Transform
aligned = cv2.warpAffine(im2, warp_matrix, (s1[1], s1[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
print('Calculated Affine Warp Matrix:')
print(warp_matrix)
return aligned, warp_matrix
#Test Harness for debugging and testing of functions