python类matchTemplate()的实例源码

scene_detector.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
pixelmatch.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
StreamParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200):
        if roi is not None:
            scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)]

        if not scale:
            scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr)
        peaks = []

        if scale:
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            canny_scene = cv2.Canny(scene, threshold_min, threshold_max)
            canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max)

            # Threshold for peaks.
            corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED)
            _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map)

            good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance)))
            if debug:
                print(max_corr, good_points)
            clusters = self.get_clusters(good_points, max_distance=max_distance)
            peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters]

        return (scale, peaks)
slantcorrection.py 文件源码 项目:handfontgen 作者: nixeneko 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def detectmarker(image):
    grayscale = getgrayimage(image)
    mkradius = getapproxmarkerradius(grayscale) # approximate marker radius
    marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker

    #template matching
    matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32

    #detect 4 greatest values
    markerposarray = []
    for i in range(4):
        (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
        markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc))) 
        cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc

    return markerposarray
scene_detector.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
pixelmatch.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img)
StreamParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8):
        best_corr = 0
        best_scale = 0

        for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
            _, max_val, _, _ = cv2.minMaxLoc(result)

            if max_val > best_corr:
                best_corr = max_val
                best_scale = scale

        if best_corr > min_corr:
            return best_scale
        else:
            return None
MatchParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8):
    best_corr = 0
    best_scale = 0

    scale = min_scale
    for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
        scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

        result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
        _, max_val, _, max_loc = cv2.minMaxLoc(result)

        if max_val > best_corr:
            best_corr = max_val
            best_scale = scale

    if best_corr > min_corr:
        return best_scale
    else:
        return None
fishing.py 文件源码 项目:wow-fishipy 作者: kioltk 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_float(img_name):
    print 'Looking for float'
    # todo: maybe make some universal float without background?
    for x in range(0, 7):
        template = cv2.imread('var/fishing_float_' + str(x) + '.png', 0)

        img_rgb = cv2.imread(img_name)
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
        # print('got images')
        w, h = template.shape[::-1]
        res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
        threshold = 0.6
        loc = np.where( res >= threshold)
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        if loc[0].any():
            print 'Found ' + str(x) + ' float'
            if dev:
                cv2.imwrite('var/fishing_session_' + str(int(time.time())) + '_success.png', img_rgb)
            return (loc[1][0] + w / 2) / 2, (loc[0][0] + h / 2) / 2
dino_api.py 文件源码 项目:go_dino 作者: pauloalves86 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def play_game(get_command_callback: Callable[[int, int, int], str]) -> int:
    with mss() as screenshotter:
        get_game_landscape_and_set_focus_or_die(screenshotter)
        reset_game()
        landscape = get_game_landscape_and_set_focus_or_die(screenshotter, .95)

        start_game()
        gameover_template = cv2.imread(os.path.join('templates', 'dino_gameover.png'), 0)
        start = time.time()
        last_distance = landscape['width']
        x1, x2, y1, y2 = compute_region_of_interest(landscape)
        speed = 0
        last_compute_speed = time.time()
        last_speeds = [3] * 30
        last_command_time = time.time()

        while True:
            buffer = screenshotter.grab(landscape)
            image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
            image = np.array(image)
            image += np.abs(247 - image[0, x2])
            roi = image[y1:y2, x1:x2]
            score = int(time.time() - start)
            distance, size = compute_distance_and_size(roi, x2)
            speed = compute_speed(distance, last_distance, speed, last_speeds, last_compute_speed)
            last_compute_speed = time.time()
            # Check GAME OVER
            if distance == last_distance or distance == 0:
                res = cv2.matchTemplate(image, gameover_template, cv2.TM_CCOEFF_NORMED)
                if np.where(res >= 0.7)[0]:
                    reset_game()
                    return score
            last_distance = distance
            if time.time() - last_command_time < 0.6:
                continue
            command = get_command_callback(distance, size, speed)
            if command:
                last_command_time = time.time()
                pyautogui.press(command)
dino_api.py 文件源码 项目:go_dino 作者: pauloalves86 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def find_game_position(screenshotter, threshold) -> Dict:
    dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0)
    w, h = dino_template.shape[::-1]
    landscape_template = cv2.imread(os.path.join('templates', 'dino_landscape.png'), 0)
    lw, lh = landscape_template.shape[::-1]
    monitor = screenshotter.monitors[0]
    buffer = screenshotter.grab(monitor)
    image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
    image = np.array(image)
    res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= threshold)
    if len(loc[0]):
        pt = next(zip(*loc[::-1]))
        return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw)
    return {}
utils_tests.py 文件源码 项目:object-detector 作者: penny4860 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_crop_random():
    # Given one sample image and the following parameters
    image = helpers.get_one_sample_image()
    parameters = {"dst_size" : (20, 20),
        "n_patches" : 5,
    }

    # When perform crop_random()
    patches = utils.crop_random(image, parameters["dst_size"], parameters["n_patches"])

    # Then every patch should be included in an image.
    match_cost = []
    for patch in patches:
        M = cv2.matchTemplate(image, patch, cv2.TM_SQDIFF)
        min_cost, _, _, _ = cv2.minMaxLoc(M)
        match_cost.append(min_cost)
    assert np.array(match_cost).all() == 0, "utils.crop_random() unit test failed!!"
myFunctions.py 文件源码 项目:Signal_Identification_Python 作者: camigomez35 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def reconocedor(img):
    fil, col = img.shape[:2]
    #cv2.imshow('Origin', img)
    contador = 0
    respuesta = 0
    for filename in glob.glob('seniales/*.jpg'):
        im= cv2.imread(filename)
        im = cv2.resize(im, (col,fil))
        res = cv2.matchTemplate(img,im,cv2.TM_CCORR)
        threshold = 0.9
        while ((res[0])[0] > 10): 
            (res[0])[0] = (res[0])[0] / 10;
        loc = (res[0])[0]/10 >= threshold
        contador = contador +1
        if(loc):
            respuesta = contador
        #cv2.imshow(filename, im)
    #cv2.waitKey() # Permanece la imagen en pantalla hasta presionar una tecla
    #cv2.destroyAllWindows() # Cierra todas las ventanas abiertas
    return respuesta;
cv.py 文件源码 项目:endless-lake-player 作者: joeydong 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def match_template(screenshot, template):
    # Perform match template calculation
    matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)

    # Survey results
    (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches)

    # Load template size
    (template_height, template_width) = template.shape[:2]

    return {
        "x1": max_loc[0],
        "y1": max_loc[1],
        "x2": max_loc[0] + template_width,
        "y2": max_loc[1] + template_height,
        "center": {
            "x": max_loc[0] + (template_width / 2),
            "y": max_loc[1] + (template_height / 2)
        },
        "score": max_val
    }
mechanics.py 文件源码 项目:flight-stone 作者: asmateus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def findTarget(self):
        result = cv2.matchTemplate(self.current_frame, self.root_patch.patch, self.match_method)
        _, _, _, max_loc = cv2.minMaxLoc(result)

        # Select found target
        target_top_left = max_loc
        target_bottom_right = (
            target_top_left[0] + self.patch_w,
            target_top_left[1] + self.patch_h)

        # Update Patch with current info
        patch = self.root_patch.copy()
        patch.patch = self.current_frame[
            target_top_left[1]: target_bottom_right[1] + 1,
            target_top_left[0]: target_bottom_right[0] + 1, :]
        patch.p1 = Point(x=target_top_left, y=target_bottom_right)
        self.assignRootPatch(patch)

        self.tracker = KCFTracker(True, True, True)
        self.tracker.init(
            [target_top_left[0], target_top_left[1], self.patch_w, self.patch_h],
            self.current_frame)

        return (target_top_left, target_bottom_right)
face_detection_v1.py 文件源码 项目:smart-cam 作者: smart-cam 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi):
        logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id,
                                                                                len(faces_roi)))
        # First Time
        if frame_prev.size == 0:
            return len(faces_roi)

        uniq_faces_curr_frame = 0

        for template_roi in faces_roi:
            # Apply template Matching
            res = cv2.matchTemplate(frame_prev,
                                    template_roi,
                                    cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc))



        logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame))
        return uniq_faces_curr_frame
finger_files_recognize.py 文件源码 项目:CameraTablet 作者: dmvlasenk 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def matchTemplate(img_full, img_template, meth):
    w, h = img_template.shape[::-1]
    img = img_full.copy()

    # Apply template Matching
    method = eval(meth)
    res = cv2.matchTemplate(img,img_template,method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return [top_left, bottom_right]
finger_recongize.py 文件源码 项目:CameraTablet 作者: dmvlasenk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def matchTemplate(img_full, img_template, meth):
    w, h = img_template.shape[::-1]
    img = img_full.copy()

    # Apply template Matching
    method = eval(meth)
    res = cv2.matchTemplate(img,img_template,method)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return [top_left, bottom_right]
Screen.py 文件源码 项目:fatego-auto 作者: lishunan246 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
Screen.py 文件源码 项目:fatego-auto 作者: lishunan246 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def find_list(self, name):
        cards = []
        res = cv2.matchTemplate(self.screen, self._imageLoader.get(name), cv2.TM_CCOEFF_NORMED)
        threshold = 0.8
        loc = numpy.where(res >= threshold)
        x = 0
        t = sorted(zip(*loc[::-1]))
        for pt in t:
            if abs(x - pt[0]) > 100 or x == 0:
                x = pt[0]
                cards.append((pt[0], pt[1]))
            else:
                continue
        self.log(name + ': ' + str(len(cards)))

        return cards
secretitem.py 文件源码 项目:ArkwoodAR 作者: rdmilligan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def detect(self, image):

        # convert image to grayscale
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # apply template matching
        result = cv2.matchTemplate(image_gray, self.template, cv2.TM_CCOEFF_NORMED)

        # obtain locations, where threshold met
        locations = np.where(result >= self.THRESHOLD)

        for item in locations:
            if len(item) == 0:
                return None

        return locations
denseMatter.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def check_list(self):
        items_dict = imd.ImageStorage()
        items_dict = items_dict.pickled_dict

        RS.press_button('equipment')
        time.sleep(1)
        for key in items_dict.keys():
            template = items_dict[key]
            #save for DEBUG
            #cv2.imwrite('debug_template_file', template_)
            w, h = template.shape[::-1]
            pattern = RS.get_bag('only','gray')
            res = cv2.matchTemplate(pattern,template,cv2.TM_CCOEFF_NORMED)
            threshold = .8 #default is 8 
            loc = np.where( res >= threshold)

            for pt in zip(*loc[::-1]):#goes through each found image
                print('{} found'.format(key))
                break
            else:
                print('{} not found'.format(key))
Match.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def this(img_pat, img_temp):
    """pass img_pat as a cv2 image format, img_temp as a file
    Passed Function to do w/e after finding img_temp"""
    cwd  = os.getcwd()
    if cwd not in img_temp:
        img_temp = cwd+img_temp
    if '.png' not in img_temp:
        img_temp = cwd+img_temp+'.png'
    #print for DEBUG
    #print(img_temp)
    #img_temp
    img_temp = cv2.imread(img_temp,0)
    #save for DEBUG
    #cv2.imwrite('img_temp', img_temp)
    w, h = img_temp.shape[::-1]
    res = cv2.matchTemplate(img_pat,img_temp,cv2.TM_CCOEFF_NORMED)
    threshold = .8 #default is 8 
    loc = np.where( res >= threshold)

    return loc, w, h
Match.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def images(img_pat, img_temp,x,y, func):
    w, h = img_temp.shape[::-1]
    try:
        res = cv2.matchTemplate(img_temp,img_pat,cv2.TM_CCOEFF_NORMED)

    except Exception as e:
        print("cannot match")
        print(e)
    threshold = .9 #default is 8 
    loc = np.where( res >= threshold)

    for pt in zip(*loc[::-1]):#goes through each found image
        func(img_pat, x, y, pt, w, h)
        return 0
    return 1

    #return loc to be iterable outisde the function
    #also sometimes width and height of image is needed
extract.py 文件源码 项目:SpaceX 作者: shahar603 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def exists(image, template, thresh):
    """
    Returns True if template is in Image with probability of at least thresh
    :param image: 
    :param template: 
    :param thresh: 
    :return: 
    """
    digit_res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(digit_res >= thresh)

    if len(loc[-1]) == 0:
        return False

    for pt in zip(*loc[::-1]):
        if digit_res[pt[1]][pt[0]] == 1:
            return False

    return True
detect_templates.py 文件源码 项目:awesome-opencv-scripts 作者: hs105 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def find_template(template):
    method = 'cv2.TM_CCOEFF'
    w, h = template.shape[::-1]
    res = cv2.matchTemplate(image, template, eval(method))
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    top_left = max_loc
    bottom_right = (top_left[0] + w, top_left[1] + h)
    return top_left, bottom_right, res
roll_handler.py 文件源码 项目:Grand-Order-Reroller 作者: chaosking121 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def identify_summons(image_path):
    import cv2
    import numpy as np

    image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY)
    summons = []
    points = 0

    for file_name, (point_value, actual_name) in possible_summons.items():
        template = cv2.imread(os.path.join('screenshots', 'summons', file_name + '.png'), cv2.IMREAD_GRAYSCALE)

        res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= CLOSENESS_THRESHOLD)

        for pt in zip(*loc[::-1]):

            # Due to weird behaviour, only add one instance of each summon
            if actual_name in summons:
                continue
            summons.append(actual_name)
            points += point_value

    return (summons, points)
main.py 文件源码 项目:Grand-Order-Reroller 作者: chaosking121 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def image_is_on_screen(template_name):
    template = cv2.imread(os.path.join(
                                'screenshots', 
                                template_name + '.png'), 
                    cv2.IMREAD_GRAYSCALE)
    image = cv2.cvtColor(
                np.array(pyautogui.screenshot(
                        region=(0, 0, 1300, 750))), 
                cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= CLOSENESS_THRESHOLD)

    # Not sure why this works but okay
    for pt in zip(*loc[::-1]):
        return True

    return False
face_detector.py 文件源码 项目:key-face 作者: gabrielilharco 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def detectTemplateMatching(self, img):
        self.templateMatchingCurrentTime = cv2.getTickCount()
        duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency()
        if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0:
            self.foundFace = False
            self.isTemplateMatchingRunning = False
            return

        faceTemplate = self.getSubRect(img, self.trackedFaceTemplate)
        roi = self.getSubRect(img, self.trackedFaceROI)
        match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED)
        cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1)


        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match)
        foundTemplate = (
            minLoc[0] + self.trackedFaceROI[0],
            minLoc[1] + self.trackedFaceROI[1],
            self.trackedFaceTemplate[2],
            self.trackedFaceTemplate[3])

        self.trackedFaceTemplate = foundTemplate
        self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2)
        self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
hero_detector.py 文件源码 项目:overwatch-counter-picker 作者: cheshire137 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def detect(self, template):
    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)

    if self.is_cards_screen:
      template = self.scale_template_for_cards_screen(template)

    result = cv2.matchTemplate(self.original, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(result >= self.threshold)
    points = zip(*loc[::-1])

    if len(points) > 0:
      return HeroDetector.combine_points(points)

    return None

  # Scale template down if we're on the game-over screen since the hero
  # portraits are smaller there than during the game.


问题


面经


文章

微信
公众号

扫码关注公众号