def detectmarker(image):
grayscale = getgrayimage(image)
mkradius = getapproxmarkerradius(grayscale) # approximate marker radius
marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker
#template matching
matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32
#detect 4 greatest values
markerposarray = []
for i in range(4):
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc)))
cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc
return markerposarray
python类TM_CCORR_NORMED的实例源码
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
"""
Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
@threshold percentage of similarity
"""
__readDigitTemplates()
digit = digit.copy()
if digit.shape[2] == 3:
digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
bestDigit = -1
if method == REC_METHOD_TEMPLATE_MATCHING:
bestMatch = None
for i in range(len(__DIGIT_TEMPLATES)):
template = __DIGIT_TEMPLATES[i].copy()
if digit.shape[1] < template.shape[1]:
template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
else:
digit = cv2.resize(digit, (template.shape[1], template.shape[0]))
result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
(_, max_val, _, max_loc) = cv2.minMaxLoc(result)
if bestMatch is None or max_val > bestMatch:
bestMatch = max_val
bestDigit = i
print("New Best Match:", bestMatch, bestDigit)
if (bestMatch * 100) >= threshold:
return (bestDigit, bestMatch * 100)
return (-1, 0)
def ocr():
img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2]
# img = cv2.equalizeHist(img)
index=0
for tmp in templates:
res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
ix,iy=max_loc[0]/pw,max_loc[1]/ph
strx=txtbox[iy][ix].get()
index=index+1
txtbox[iy][ix].insert(len(strx),str(index))
return
def getcroppedarea(img, markersize):
#use template matching to detect area to be cropped
grayimg = getgrayimage(img)
# detect top-left marker using template matching
marker_tl = cv2.resize(MARKER_TL, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_tl, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_tl = (mkrect.x+mkrect.w, mkrect.y+mkrect.h)
#pos_tl = (maxloc[0]+markersize, maxloc[1]+markersize)
# detect bottom-right marker using template matching
marker_br = cv2.resize(MARKER_BR, (markersize, markersize))
matched = cv2.matchTemplate(grayimg, marker_br, cv2.TM_CCORR_NORMED) #returns float32
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
mkrect = getmarkerboundingrect(grayimg, maxloc, markersize)
pos_br = (mkrect.x, mkrect.y)
#pos_br = maxloc
#detect QR code
qrarea = img[pos_br[1]:,:img.shape[0]-pos_br[1]]
typ, val = passzbar.passzbar(qrarea)
if not typ:
return None, None
strval = val.decode('ascii').strip()
#print(strval)
#cv2.circle(img, pos_tl, 5, (255, 0, 0), -1)
#cv2.circle(img, pos_br, 5, (0, 255, 0), -1)
#print(pos_tl, pos_br
#cv2.imshow("hoge", img)
#cv2.imshow("hoge", img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]])
# crop and return detected area
return strval, img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]
def _findOverlap(self, img_rgb, overlap, overlapDeviation,
rotation, rotationDeviation):
'''
return offset(x,y) which fit best self._base_img
through template matching
'''
# get gray images
if len(img_rgb.shape) != len(img_rgb.shape):
raise Exception(
'number of channels(colors) for both images different')
if overlapDeviation == 0 and rotationDeviation == 0:
return (0, overlap, rotation)
s = self.base_img_rgb.shape
ho = int(round(overlap * 0.5))
overlap = int(round(overlap))
# create two image cuts to compare:
imgcut = self.base_img_rgb[s[0] - overlapDeviation - overlap:, :]
template = img_rgb[:overlap, ho:s[1] - ho]
def fn(angle):
rotTempl = self._rotate(template, angle)
# Apply template Matching
fn.res = cv2.matchTemplate(rotTempl.astype(np.float32),
imgcut.astype(np.float32),
cv2.TM_CCORR_NORMED)
return 1 / fn.res.mean()
if rotationDeviation == 0:
angle = rotation
fn(rotation)
else:
# find best rotation angle:
angle = brent(fn, brack=(rotation - rotationDeviation,
rotation + rotationDeviation))
loc = cv2.minMaxLoc(fn.res)[-1]
offsx = int(round(loc[0] - ho))
offsy = overlapDeviation + overlap - loc[1]
return offsx, offsy, angle
def match_template_opencv(template, image, options):
"""
Match template using OpenCV template matching implementation.
Limited by number of channels as maximum of 3.
Suitable for direct RGB or Gray-scale matching
:param options: Other options:
- distance: Distance measure to use. (euclidean | correlation | ccoeff).
Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
"""
# if image has more than 3 channels, use own implementation
if len(image.shape) > 3:
return match_template(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
method = cv.TM_CCORR_NORMED
if op['normalize'] and op['distance'] == 'euclidean':
method = cv.TM_SQDIFF_NORMED
elif op['distance'] == 'euclidean':
method = cv.TM_SQDIFF
elif op['normalize'] and op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF_NORMED
elif op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF
elif not op['normalize'] and op['distance'] == 'correlation':
method = cv.TM_CCORR
heatmap = cv.matchTemplate(image, template, method)
# make minimum peak heatmap
if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
heatmap = heatmap.max() - heatmap
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap
def find_subimage_in_array(self, sub_image, main_image, threshold=0.40, value=False, debug=False):
"""
http://docs.opencv.org/3.1.0/d4/dc6/tutorial_py_template_matching.html
Args:
sub_image: A numby matrix containing the template we are trying to match
main_image: A numpy array containing the main image we are trying to find the template in
value: If true: Similarity is sent back.
threshold: A treshhold regarding hos sensitive the matching should be.
Returns:
A list containing touples:
If value is true:
The touples got he following elements(left,top,right,down,similarity)
Where similarity is a measure toward one
Else:
The touples got he following elements(left,top,right,down)
"""
# TODO: Check the test_init_wnd test for how to implement this :)
logging.debug("Doing a template match with {} as threshold".format(threshold))
methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF,
cv2.TM_SQDIFF_NORMED]
method = methods[0]
h, w = sub_image.shape[0:2]
res = cv2.matchTemplate(main_image, sub_image, method)
loc = np.where(res >= threshold)
locations = []
for pt in zip(*loc[::-1]):
if value:
locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h, res[pt[1], pt[0]]))
else:
locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h))
logging.debug("Found {} locations".format(len(locations)))
if debug:
plt.subplot(121), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(main_image, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
for pt in zip(*loc[::-1]):
cv2.rectangle(main_image, pt, (pt[0] + w, pt[1] + h), (255, 0, 255), 2)
plt.imshow(main_image)
plt.show()
if value:
locations.sort(reverse=True, key=operator.itemgetter(4))
return list(map(operator.itemgetter(0, 1, 2, 3), locations))