def test_templating(self):
bbox = self.wh.create_boundingbox()
scaled_bbox = self.wh.bbox_scale(bbox,0.5)
sub_image = self.px.grab_window(scaled_bbox)
sub_image = self.px.img_to_numpy(sub_image)
w, h = sub_image.shape[0:2]
main_image = cv2.imread('pytomatic/tests/assets/calc_clean.PNG')
res = cv2.matchTemplate(main_image, sub_image, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(main_image, top_left, bottom_right, 255, 2)
assert top_left == (89,89)
assert bottom_right == (290,290)
#plt.imshow(main_image)
#plt.show()
python类matchTemplate()的实例源码
def getMatchingScore(img,digit):
score = (cv2.matchTemplate(img,cv2.imread('Templates/' + 'T'+str(digit) + '.jpg',0),cv2.TM_SQDIFF)/2000)
return score
# Gets the best prediction of the digit in a cell using template matching
def removeInnerGridLines(img):
template = cv2.imread('cross_template.jpg',0)
(tx,ty) = np.shape(template)
res = cv2.matchTemplate(img,template,cv2.TM_SQDIFF_NORMED)
threshold = 0.1
loc = np.where( res <= threshold)
for pt in zip(*loc[::-1]):
x = pt[0]
y = pt[1]
img,area,dummy = customFloodFill(img,(x + int(tx/2),y + int(ty/2)),0,0)
return img
# Reads in image of sudoku and does processing
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
"""
Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
@threshold percentage of similarity
"""
__readDigitTemplates()
digit = digit.copy()
if digit.shape[2] == 3:
digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
bestDigit = -1
if method == REC_METHOD_TEMPLATE_MATCHING:
bestMatch = None
for i in range(len(__DIGIT_TEMPLATES)):
template = __DIGIT_TEMPLATES[i].copy()
if digit.shape[1] < template.shape[1]:
template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
else:
digit = cv2.resize(digit, (template.shape[1], template.shape[0]))
result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
(_, max_val, _, max_loc) = cv2.minMaxLoc(result)
if bestMatch is None or max_val > bestMatch:
bestMatch = max_val
bestDigit = i
print("New Best Match:", bestMatch, bestDigit)
if (bestMatch * 100) >= threshold:
return (bestDigit, bestMatch * 100)
return (-1, 0)
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
method = cv2.TM_CCOEFF_NORMED
for temp in templist:
tmp = mpimg.imread(temp)
# Apply template Matching
res = cv2.matchTemplate(img,tmp,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
w, h = (tmp.shape[1], tmp.shape[0])
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
bbox_list.append((top_left, bottom_right))
return bbox_list
def ocr():
img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2]
# img = cv2.equalizeHist(img)
index=0
for tmp in templates:
res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
ix,iy=max_loc[0]/pw,max_loc[1]/ph
strx=txtbox[iy][ix].get()
index=index+1
txtbox[iy][ix].insert(len(strx),str(index))
return
def getcroppedarea(img, markersize):
#use template matching to detect area to be cropped
grayimg = getgrayimage(img)
# detect top-left marker using template matching
marker_tl = cv2.resize(MARKER_TL, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_tl, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_tl = (mkrect.x+mkrect.w, mkrect.y+mkrect.h)
#pos_tl = (maxloc[0]+markersize, maxloc[1]+markersize)
# detect bottom-right marker using template matching
marker_br = cv2.resize(MARKER_BR, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_br, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_br = (mkrect.x, mkrect.y)
#pos_br = maxloc
#detect QR code
qrarea = img[pos_br[1]:,:img.shape[0]-pos_br[1]]
typ, val = passzbar.passzbar(qrarea)
if not typ:
return None, None
strval = val.decode('ascii').strip()
#print(strval)
#cv2.circle(img, pos_tl, 5, (255, 0, 0), -1)
#cv2.circle(img, pos_br, 5, (0, 255, 0), -1)
#print(pos_tl, pos_br
#cv2.imshow("hoge", img)
#cv2.imshow("hoge", img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]])
# crop and return detected area
return strval, img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]
def test_find_scene():
scenes = {}
for s in os.listdir('txxscene'):
if '-' in s: continue
i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
scenes[s] = i
# names = [os.path.join('scene', c) for c in os.listdir('scene')]
imgs = {}
for n in os.listdir('scene'):
i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
i = cv2.resize(i, (960, 540))
imgs[n] = i
for name, img in imgs.iteritems():
for scene, tmpl in scenes.iteritems():
res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < 0.6:
continue
x, y = max_loc
h, w = tmpl.shape
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
print name, scene, max_val, min_val
cv2.imshow('found', img)
cv2.waitKey()
def find_match(img, tmpl, rect=None, mask=None):
if rect is not None:
h, w = img.shape[:2]
x, y, x1, y1 = rect
if x1 > w or y1 > h:
return 0, None
img = img[y:y1, x:x1, :]
if mask is not None:
img = img.copy()
img[mask!=0] = 0
tmpl = tmpl.copy()
tmpl[mask!=0] = 0
s_bgr = cv2.split(tmpl) # Blue Green Red
i_bgr = cv2.split(img)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
confidence = max_val
x, y = max_loc
h, w = tmpl.shape[:2]
if rect is None:
rect = (x, y, x+w, y+h)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
# cv2.imshow('test', img)
# cv2.waitKey(20)
return confidence, rect
def test_find_scene():
scenes = {}
for s in os.listdir('txxscene'):
if '-' in s: continue
i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
scenes[s] = i
# names = [os.path.join('scene', c) for c in os.listdir('scene')]
imgs = {}
for n in os.listdir('scene'):
i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
i = cv2.resize(i, (960, 540))
imgs[n] = i
for name, img in imgs.iteritems():
for scene, tmpl in scenes.iteritems():
res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < 0.6:
continue
x, y = max_loc
h, w = tmpl.shape
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
print name, scene, max_val, min_val
cv2.imshow('found', img)
cv2.waitKey()
def find_match(img, tmpl, rect=None, mask=None):
if rect is not None:
h, w = img.shape[:2]
x, y, x1, y1 = rect
if x1 > w or y1 > h:
return 0, None
img = img[y:y1, x:x1, :]
if mask is not None:
img = img.copy()
img[mask!=0] = 0
tmpl = tmpl.copy()
tmpl[mask!=0] = 0
s_bgr = cv2.split(tmpl) # Blue Green Red
i_bgr = cv2.split(img)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
confidence = max_val
x, y = max_loc
h, w = tmpl.shape[:2]
if rect is None:
rect = (x, y, x+w, y+h)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
# cv2.imshow('test', img)
# cv2.waitKey(20)
return confidence, rect
def match(self, templateimage, threshold=0.8):
image = cv2.imread(self.sourceimage)
template = cv2.imread(templateimage)
result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
similarity = cv2.minMaxLoc(result)[1]
if similarity < threshold:
return similarity
else:
return np.unravel_index(result.argmax(), result.shape)
def template_match(img_master, img_slave, method = 'cv2.TM_CCOEFF_NORMED', mlx = 1, mly = 1, show=True):
# Apply image oversampling
img_master = cv2.resize(img_master,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)
img_slave = cv2.resize(img_slave,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)
res = cv2.matchTemplate(img_slave,img_master,eval(method))
w, h = img_master.shape[::-1]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
# Retrieve center coordinates
px = (top_left[0]+bottom_right[0])/(2.0*mlx)
py = (top_left[1]+bottom_right[1])/(2.0*mly)
# Scale images for visualization
img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0/500))
img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0/500))
cv2.rectangle(img_slave_scaled,top_left, bottom_right, 255, 2*mlx)
if show == True:
plt.figure(figsize=(20,10))
plt.subplot(131),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_master_scaled,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_slave_scaled, cmap = 'gray')
plt.suptitle(method)
plt.show()
return px, py, max_val
def fit(img, templates, start_percent, stop_percent, threshold):
img_width, img_height = img.shape[::-1]
best_location_count = -1
best_locations = []
best_scale = 1
plt.axis([0, 2, 0, 1])
plt.show(block=False)
x = []
y = []
for scale in [i/100.0 for i in range(start_percent, stop_percent + 1, 3)]:
locations = []
location_count = 0
for template in templates:
template = cv2.resize(template, None,
fx = scale, fy = scale, interpolation = cv2.INTER_CUBIC)
result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
result = np.where(result >= threshold)
location_count += len(result[0])
locations += [result]
print("scale: {0}, hits: {1}".format(scale, location_count))
x.append(location_count)
y.append(scale)
plt.plot(y, x)
plt.pause(0.00001)
if (location_count > best_location_count):
best_location_count = location_count
best_locations = locations
best_scale = scale
plt.axis([0, 2, 0, best_location_count])
elif (location_count < best_location_count):
pass
plt.close()
return best_locations, best_scale
def match_template_mask(image, template, mask=None, method=None, sigma=0.33):
"""Match template against image applying mask to template using method.
Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt',
'roberts', 'canny').
Returns locations to look for max values."""
if mask is not None:
if method:
kernel = np.ones((3, 3), np.uint8)
mask = cv2.erode(mask, kernel)
if method == 'laplacian':
# use CV_64F to not loose edges, convert to uint8 afterwards
edge_image = np.uint8(np.absolute(
cv2.Laplacian(image, cv2.CV_64F)))
edge_template = np.uint8(np.absolute(
cv2.Laplacian(template, cv2.CV_64F)
))
elif method in ('sobel', 'scharr', 'prewitt', 'roberts'):
filter_func = getattr(skfilters, method)
edge_image = filter_func(image)
edge_template = filter_func(template)
edge_image = convert(edge_image)
edge_template = convert(edge_template)
else: # method == 'canny'
values = np.hstack([image.ravel(), template.ravel()])
median = np.median(values)
lower = int(max(0, (1.0 - sigma) * median))
upper = int(min(255, (1.0 + sigma) * median))
edge_image = cv2.Canny(image, lower, upper)
edge_template = cv2.Canny(template, lower, upper)
results = cv2.matchTemplate(edge_image, edge_template & mask,
cv2.TM_CCOEFF_NORMED)
else:
results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED,
mask)
else:
results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
return results
def MatchTemplate(template, target):
"""Returns match score for given template"""
res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
return max_val
def matchTemplate(self, img, template):
res = cv2.matchTemplate(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),
template,
cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + template.shape[1],
top_left[1] + template.shape[0])
return top_left, bottom_right
def getScoreboard(self, img):
template_width = self.TEMPLATE_SCOREBOARD.shape[1]
img_width = img.shape[1]
template = imutils.resize(self.TEMPLATE_SCOREBOARD,
width=int(template_width/1280.0*img_width))
top_left, bottom_right = self.matchTemplate(img, template)
return img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
def getTopBar(self, img):
template_width = self.TEMPLATE_TOP.shape[1]
img_width = img.shape[1]
template = imutils.resize(self.TEMPLATE_TOP,
width=int(template_width/1280.0*img_width))
top_left, bottom_right = self.matchTemplate(img, template)
located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
h, w = located.shape[:2]
return located[:, int(w*0.125):int(w*0.5)]
def getTimeArea(self, img):
template_width = self.TEMPLATE_TIME.shape[1]
img_width = img.shape[1]
template = imutils.resize(self.TEMPLATE_TIME,
width=int(template_width/1280.0*img_width))
top_left, bottom_right = self.matchTemplate(img, template)
located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
h, w = located.shape[:2]
return located[int(h*0.16):int(h*0.84), int(w*0.42):int(w*0.58)]