def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
python类minMaxLoc()的实例源码
def locate_img(image, template):
img = image.copy()
res = cv2.matchTemplate(img, template, method)
print res
print res.shape
cv2.imwrite('image/shape.png', res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
h, w = template.shape
bottom_right = (top_left[0] + w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 2)
cv2.imwrite('image/tt.jpg', img)
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200):
if roi is not None:
scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)]
if not scale:
scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr)
peaks = []
if scale:
scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)
canny_scene = cv2.Canny(scene, threshold_min, threshold_max)
canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max)
# Threshold for peaks.
corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED)
_, max_corr, _, max_loc = cv2.minMaxLoc(corr_map)
good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance)))
if debug:
print(max_corr, good_points)
clusters = self.get_clusters(good_points, max_distance=max_distance)
peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters]
return (scale, peaks)
def mkgray(self, msg):
"""
Convert a message into a 8-bit 1 channel monochrome OpenCV image
"""
# as cv_bridge automatically scales, we need to remove that behavior
# TODO: get a Python API in cv_bridge to check for the image depth.
if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
mono8 = numpy.array(numpy.clip(mono16, 0, 255), dtype=numpy.uint8)
return mono8
elif 'FC1' in msg.encoding:
# floating point image handling
img = self.br.imgmsg_to_cv2(msg, "passthrough")
_, max_val, _, _ = cv2.minMaxLoc(img)
if max_val > 0:
scale = 255.0 / max_val
mono_img = (img * scale).astype(np.uint8)
else:
mono_img = img.astype(np.uint8)
return mono_img
else:
return self.br.imgmsg_to_cv2(msg, "mono8")
def detectmarker(image):
grayscale = getgrayimage(image)
mkradius = getapproxmarkerradius(grayscale) # approximate marker radius
marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker
#template matching
matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32
#detect 4 greatest values
markerposarray = []
for i in range(4):
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc)))
cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc
return markerposarray
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
def locate_img(image, template):
img = image.copy()
res = cv2.matchTemplate(img, template, method)
print res
print res.shape
cv2.imwrite('image/shape.png', res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
print cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
h, w = template.shape
bottom_right = (top_left[0] + w, top_left[1]+h)
cv2.rectangle(img, top_left, bottom_right, 255, 2)
cv2.imwrite('image/tt.jpg', img)
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8):
best_corr = 0
best_scale = 0
for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)
result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
_, max_val, _, _ = cv2.minMaxLoc(result)
if max_val > best_corr:
best_corr = max_val
best_scale = scale
if best_corr > min_corr:
return best_scale
else:
return None
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8):
best_corr = 0
best_scale = 0
scale = min_scale
for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)
result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(result)
if max_val > best_corr:
best_corr = max_val
best_scale = scale
if best_corr > min_corr:
return best_scale
else:
return None
def test_crop_random():
# Given one sample image and the following parameters
image = helpers.get_one_sample_image()
parameters = {"dst_size" : (20, 20),
"n_patches" : 5,
}
# When perform crop_random()
patches = utils.crop_random(image, parameters["dst_size"], parameters["n_patches"])
# Then every patch should be included in an image.
match_cost = []
for patch in patches:
M = cv2.matchTemplate(image, patch, cv2.TM_SQDIFF)
min_cost, _, _, _ = cv2.minMaxLoc(M)
match_cost.append(min_cost)
assert np.array(match_cost).all() == 0, "utils.crop_random() unit test failed!!"
registration_stereo.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 45
收藏 0
点赞 0
评论 0
def mkgray(self, msg):
"""
Convert a message into a 8-bit 1 channel monochrome OpenCV image
"""
# as cv_bridge automatically scales, we need to remove that behavior
# TODO: get a Python API in cv_bridge to check for the image depth.
if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
return mono8
elif 'FC1' in msg.encoding:
# floating point image handling
img = self.br.imgmsg_to_cv2(msg, "passthrough")
_, max_val, _, _ = cv2.minMaxLoc(img)
if max_val > 0:
scale = 255.0 / max_val
mono_img = (img * scale).astype(np.uint8)
else:
mono_img = img.astype(np.uint8)
return mono_img
else:
return self.br.imgmsg_to_cv2(msg, "mono8")
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def mkgray(self, msg):
"""
Convert a message into a 8-bit 1 channel monochrome OpenCV image
"""
# as cv_bridge automatically scales, we need to remove that behavior
# TODO: get a Python API in cv_bridge to check for the image depth.
if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
return mono8
elif 'FC1' in msg.encoding:
# floating point image handling
img = self.br.imgmsg_to_cv2(msg, "passthrough")
_, max_val, _, _ = cv2.minMaxLoc(img)
if max_val > 0:
scale = 255.0 / max_val
mono_img = (img * scale).astype(np.uint8)
else:
mono_img = img.astype(np.uint8)
return mono_img
else:
return self.br.imgmsg_to_cv2(msg, "mono8")
pose_estimation.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def mkgray(self, msg):
"""
Convert a message into a 8-bit 1 channel monochrome OpenCV image
"""
# as cv_bridge automatically scales, we need to remove that behavior
# TODO: get a Python API in cv_bridge to check for the image depth.
if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
mono8 = np.array(np.clip(mono16, 0, 255), dtype=np.uint8)
return mono8
elif 'FC1' in msg.encoding:
# floating point image handling
img = self.br.imgmsg_to_cv2(msg, "passthrough")
_, max_val, _, _ = cv2.minMaxLoc(img)
if max_val > 0:
scale = 255.0 / max_val
mono_img = (img * scale).astype(np.uint8)
else:
mono_img = img.astype(np.uint8)
return mono_img
else:
return self.br.imgmsg_to_cv2(msg, "mono8")
def match_template(screenshot, template):
# Perform match template calculation
matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)
# Survey results
(min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches)
# Load template size
(template_height, template_width) = template.shape[:2]
return {
"x1": max_loc[0],
"y1": max_loc[1],
"x2": max_loc[0] + template_width,
"y2": max_loc[1] + template_height,
"center": {
"x": max_loc[0] + (template_width / 2),
"y": max_loc[1] + (template_height / 2)
},
"score": max_val
}
def findTarget(self):
result = cv2.matchTemplate(self.current_frame, self.root_patch.patch, self.match_method)
_, _, _, max_loc = cv2.minMaxLoc(result)
# Select found target
target_top_left = max_loc
target_bottom_right = (
target_top_left[0] + self.patch_w,
target_top_left[1] + self.patch_h)
# Update Patch with current info
patch = self.root_patch.copy()
patch.patch = self.current_frame[
target_top_left[1]: target_bottom_right[1] + 1,
target_top_left[0]: target_bottom_right[0] + 1, :]
patch.p1 = Point(x=target_top_left, y=target_bottom_right)
self.assignRootPatch(patch)
self.tracker = KCFTracker(True, True, True)
self.tracker.init(
[target_top_left[0], target_top_left[1], self.patch_w, self.patch_h],
self.current_frame)
return (target_top_left, target_bottom_right)
def detect(self, z, x):
k = self.gaussianCorrelation(x, z)
res = real(fftd(complexMultiplication(self._alphaf, fftd(k)), True))
_, pv, _, pi = cv2.minMaxLoc(res) # pv:float pi:tuple of int
p = [float(pi[0]), float(pi[1])] # cv::Point2f, [x,y] #[float,float]
if(pi[0] > 0 and pi[0] < res.shape[1] - 1):
p[0] += self.subPixelPeak(res[pi[1], pi[0] - 1], pv, res[pi[1], pi[0] + 1])
if(pi[1] > 0 and pi[1] < res.shape[0] - 1):
p[1] += self.subPixelPeak(res[pi[1] - 1, pi[0]], pv, res[pi[1] + 1, pi[0]])
p[0] -= res.shape[1] / 2.
p[1] -= res.shape[0] / 2.
return p, pv
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi):
logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id,
len(faces_roi)))
# First Time
if frame_prev.size == 0:
return len(faces_roi)
uniq_faces_curr_frame = 0
for template_roi in faces_roi:
# Apply template Matching
res = cv2.matchTemplate(frame_prev,
template_roi,
cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc))
logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame))
return uniq_faces_curr_frame
def match_one(template, image, options=None):
"""
Match template and find exactly one match in the Image using specified features.
:param template: Template Image
:param image: Search Image
:param options: Options include
- features: List of options for each feature
:return: (Box, Score) Bounding box of the matched object, Heatmap value
"""
heatmap, scale = multi_feat_match(template, image, options)
min_val, _, min_loc, _ = cv.minMaxLoc(heatmap)
top_left = tuple(scale * x for x in min_loc)
score = min_val
h, w = template.shape[:2]
return Box(top_left[0], top_left[1], w, h), score
def matchTemplate(img_full, img_template, meth):
w, h = img_template.shape[::-1]
img = img_full.copy()
# Apply template Matching
method = eval(meth)
res = cv2.matchTemplate(img,img_template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
return [top_left, bottom_right]
def matchTemplate(img_full, img_template, meth):
w, h = img_template.shape[::-1]
img = img_full.copy()
# Apply template Matching
method = eval(meth)
res = cv2.matchTemplate(img,img_template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
return [top_left, bottom_right]
def __init__(self):
t = ImageGrab.grab().convert("RGB")
self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)
self.ultLoader = ImageLoader('image/ult/')
if self.have('topleft'):
tl = self._imageLoader.get('topleft')
res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
x1, y1 = max_loc
rd = self._imageLoader.get('rightdown')
res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
x2, y2 = max_loc
# default 989
GameStatus().y = y2 - y1
GameStatus().use_Droid4X = True
def find_template(template):
method = 'cv2.TM_CCOEFF'
w, h = template.shape[::-1]
res = cv2.matchTemplate(image, template, eval(method))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
return top_left, bottom_right, res
def detectTemplateMatching(self, img):
self.templateMatchingCurrentTime = cv2.getTickCount()
duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency()
if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0:
self.foundFace = False
self.isTemplateMatchingRunning = False
return
faceTemplate = self.getSubRect(img, self.trackedFaceTemplate)
roi = self.getSubRect(img, self.trackedFaceROI)
match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED)
cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match)
foundTemplate = (
minLoc[0] + self.trackedFaceROI[0],
minLoc[1] + self.trackedFaceROI[1],
self.trackedFaceTemplate[2],
self.trackedFaceTemplate[3])
self.trackedFaceTemplate = foundTemplate
self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2)
self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
def test_templating(self):
bbox = self.wh.create_boundingbox()
scaled_bbox = self.wh.bbox_scale(bbox,0.5)
sub_image = self.px.grab_window(scaled_bbox)
sub_image = self.px.img_to_numpy(sub_image)
w, h = sub_image.shape[0:2]
main_image = cv2.imread('pytomatic/tests/assets/calc_clean.PNG')
res = cv2.matchTemplate(main_image, sub_image, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(main_image, top_left, bottom_right, 255, 2)
assert top_left == (89,89)
assert bottom_right == (290,290)
#plt.imshow(main_image)
#plt.show()
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
"""
Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
@threshold percentage of similarity
"""
__readDigitTemplates()
digit = digit.copy()
if digit.shape[2] == 3:
digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
bestDigit = -1
if method == REC_METHOD_TEMPLATE_MATCHING:
bestMatch = None
for i in range(len(__DIGIT_TEMPLATES)):
template = __DIGIT_TEMPLATES[i].copy()
if digit.shape[1] < template.shape[1]:
template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
else:
digit = cv2.resize(digit, (template.shape[1], template.shape[0]))
result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
(_, max_val, _, max_loc) = cv2.minMaxLoc(result)
if bestMatch is None or max_val > bestMatch:
bestMatch = max_val
bestDigit = i
print("New Best Match:", bestMatch, bestDigit)
if (bestMatch * 100) >= threshold:
return (bestDigit, bestMatch * 100)
return (-1, 0)
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def ocr():
img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2]
# img = cv2.equalizeHist(img)
index=0
for tmp in templates:
res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
ix,iy=max_loc[0]/pw,max_loc[1]/ph
strx=txtbox[iy][ix].get()
index=index+1
txtbox[iy][ix].insert(len(strx),str(index))
return
def getcroppedarea(img, markersize):
#use template matching to detect area to be cropped
grayimg = getgrayimage(img)
# detect top-left marker using template matching
marker_tl = cv2.resize(MARKER_TL, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_tl, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_tl = (mkrect.x+mkrect.w, mkrect.y+mkrect.h)
#pos_tl = (maxloc[0]+markersize, maxloc[1]+markersize)
# detect bottom-right marker using template matching
marker_br = cv2.resize(MARKER_BR, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_br, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_br = (mkrect.x, mkrect.y)
#pos_br = maxloc
#detect QR code
qrarea = img[pos_br[1]:,:img.shape[0]-pos_br[1]]
typ, val = passzbar.passzbar(qrarea)
if not typ:
return None, None
strval = val.decode('ascii').strip()
#print(strval)
#cv2.circle(img, pos_tl, 5, (255, 0, 0), -1)
#cv2.circle(img, pos_br, 5, (0, 255, 0), -1)
#print(pos_tl, pos_br
#cv2.imshow("hoge", img)
#cv2.imshow("hoge", img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]])
# crop and return detected area
return strval, img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]
def test_find_scene():
scenes = {}
for s in os.listdir('txxscene'):
if '-' in s: continue
i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
scenes[s] = i
# names = [os.path.join('scene', c) for c in os.listdir('scene')]
imgs = {}
for n in os.listdir('scene'):
i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
i = cv2.resize(i, (960, 540))
imgs[n] = i
for name, img in imgs.iteritems():
for scene, tmpl in scenes.iteritems():
res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < 0.6:
continue
x, y = max_loc
h, w = tmpl.shape
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
print name, scene, max_val, min_val
cv2.imshow('found', img)
cv2.waitKey()
def find_match(img, tmpl, rect=None, mask=None):
if rect is not None:
h, w = img.shape[:2]
x, y, x1, y1 = rect
if x1 > w or y1 > h:
return 0, None
img = img[y:y1, x:x1, :]
if mask is not None:
img = img.copy()
img[mask!=0] = 0
tmpl = tmpl.copy()
tmpl[mask!=0] = 0
s_bgr = cv2.split(tmpl) # Blue Green Red
i_bgr = cv2.split(img)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
confidence = max_val
x, y = max_loc
h, w = tmpl.shape[:2]
if rect is None:
rect = (x, y, x+w, y+h)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
# cv2.imshow('test', img)
# cv2.waitKey(20)
return confidence, rect