def test_templating(self):
bbox = self.wh.create_boundingbox()
scaled_bbox = self.wh.bbox_scale(bbox,0.5)
sub_image = self.px.grab_window(scaled_bbox)
sub_image = self.px.img_to_numpy(sub_image)
w, h = sub_image.shape[0:2]
main_image = cv2.imread('pytomatic/tests/assets/calc_clean.PNG')
res = cv2.matchTemplate(main_image, sub_image, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(main_image, top_left, bottom_right, 255, 2)
assert top_left == (89,89)
assert bottom_right == (290,290)
#plt.imshow(main_image)
#plt.show()
python类TM_CCOEFF的实例源码
def __init__(self, root_patch=None):
super(TDLTracker, self).__init__(root_patch)
self.descriptor = ColorHistogramExtractor()
self.match_method = cv2.TM_CCOEFF
self.current_frame = None
self.patch_h, self.patch_w = 0, 0
self.tracker = None
self.dispatcher = {
TDLTracker.STATE_UNINITIATED: self.findTarget,
TDLTracker.STATE_INITIATED: self.updateTarget,
TDLTracker.STATE_INTERRUPTED: self.restartTarget,
TDLTracker.STATE_FINISHED: self.updateTarget,
}
def click_image(image, notify=True):
if notify:
_notify("starting to click " + image)
if isinstance(image, str) or isinstance(image, unicode):
template = cv2.imread(image, 0)
elif isinstance(image, PngImageFile):
pass # need to convert to cv2 image type
sleep(2)
#GET SCREENSHOT
call(["gnome-screenshot", "--file=/tmp/beryl.png"])
sleep(1)
#FIND LOCATION OF NAME
source = cv2.imread('/tmp/beryl.png', 0)
points = []
w, h = template.shape[::-1]
methods = [cv2.TM_CCOEFF,cv2.TM_CCOEFF_NORMED,cv2.TM_CCORR,cv2.TM_CCORR_NORMED,cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED]
for method in methods:
# Apply Template Matching
result = cv2.matchTemplate(source.copy(), template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
#If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
#bottom_right = (top_left[0] + w, top_left[1] + h)
# (x,y)
point = ( top_left[0] + (float(w)/2), top_left[1] + (float(h)/2) )
points.append(point)
best_point = sorted([(point, avg_distance(point, points)) for point in points], key=lambda tup: tup[1])[0][0]
click_location(best_point)
if notify:
_notify("finished clicking image")
def match_template_opencv(template, image, options):
"""
Match template using OpenCV template matching implementation.
Limited by number of channels as maximum of 3.
Suitable for direct RGB or Gray-scale matching
:param options: Other options:
- distance: Distance measure to use. (euclidean | correlation | ccoeff).
Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
"""
# if image has more than 3 channels, use own implementation
if len(image.shape) > 3:
return match_template(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
method = cv.TM_CCORR_NORMED
if op['normalize'] and op['distance'] == 'euclidean':
method = cv.TM_SQDIFF_NORMED
elif op['distance'] == 'euclidean':
method = cv.TM_SQDIFF
elif op['normalize'] and op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF_NORMED
elif op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF
elif not op['normalize'] and op['distance'] == 'correlation':
method = cv.TM_CCORR
heatmap = cv.matchTemplate(image, template, method)
# make minimum peak heatmap
if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
heatmap = heatmap.max() - heatmap
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap
def find_subimage_in_array(self, sub_image, main_image, threshold=0.40, value=False, debug=False):
"""
http://docs.opencv.org/3.1.0/d4/dc6/tutorial_py_template_matching.html
Args:
sub_image: A numby matrix containing the template we are trying to match
main_image: A numpy array containing the main image we are trying to find the template in
value: If true: Similarity is sent back.
threshold: A treshhold regarding hos sensitive the matching should be.
Returns:
A list containing touples:
If value is true:
The touples got he following elements(left,top,right,down,similarity)
Where similarity is a measure toward one
Else:
The touples got he following elements(left,top,right,down)
"""
# TODO: Check the test_init_wnd test for how to implement this :)
logging.debug("Doing a template match with {} as threshold".format(threshold))
methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF,
cv2.TM_SQDIFF_NORMED]
method = methods[0]
h, w = sub_image.shape[0:2]
res = cv2.matchTemplate(main_image, sub_image, method)
loc = np.where(res >= threshold)
locations = []
for pt in zip(*loc[::-1]):
if value:
locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h, res[pt[1], pt[0]]))
else:
locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h))
logging.debug("Found {} locations".format(len(locations)))
if debug:
plt.subplot(121), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(main_image, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
for pt in zip(*loc[::-1]):
cv2.rectangle(main_image, pt, (pt[0] + w, pt[1] + h), (255, 0, 255), 2)
plt.imshow(main_image)
plt.show()
if value:
locations.sort(reverse=True, key=operator.itemgetter(4))
return list(map(operator.itemgetter(0, 1, 2, 3), locations))