python类TM_CCOEFF_NORMED的实例源码

scene_detector.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
StreamParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200):
        if roi is not None:
            scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)]

        if not scale:
            scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr)
        peaks = []

        if scale:
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            canny_scene = cv2.Canny(scene, threshold_min, threshold_max)
            canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max)

            # Threshold for peaks.
            corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED)
            _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map)

            good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance)))
            if debug:
                print(max_corr, good_points)
            clusters = self.get_clusters(good_points, max_distance=max_distance)
            peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters]

        return (scale, peaks)
scene_detector.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_match_confidence(img1, img2, mask=None):
    if img1.shape != img2.shape:
        return False
    ## first try, using absdiff
    # diff = cv2.absdiff(img1, img2)
    # h, w, d = diff.shape
    # total = h*w*d
    # num = (diff<20).sum()
    # print 'is_match', total, num
    # return num > total*0.90
    if mask is not None:
        img1 = img1.copy()
        img1[mask!=0] = 0
        img2 = img2.copy()
        img2[mask!=0] = 0
    ## using match
    match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
    _, confidence, _, _ = cv2.minMaxLoc(match)
    # print confidence
    return confidence
StreamParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8):
        best_corr = 0
        best_scale = 0

        for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
            scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

            result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
            _, max_val, _, _ = cv2.minMaxLoc(result)

            if max_val > best_corr:
                best_corr = max_val
                best_scale = scale

        if best_corr > min_corr:
            return best_scale
        else:
            return None
MatchParser.py 文件源码 项目:meleedb-segment 作者: sashahashi 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8):
    best_corr = 0
    best_scale = 0

    scale = min_scale
    for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta):
        scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale)

        result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED)
        _, max_val, _, max_loc = cv2.minMaxLoc(result)

        if max_val > best_corr:
            best_corr = max_val
            best_scale = scale

    if best_corr > min_corr:
        return best_scale
    else:
        return None
fishing.py 文件源码 项目:wow-fishipy 作者: kioltk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find_float(img_name):
    print 'Looking for float'
    # todo: maybe make some universal float without background?
    for x in range(0, 7):
        template = cv2.imread('var/fishing_float_' + str(x) + '.png', 0)

        img_rgb = cv2.imread(img_name)
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
        # print('got images')
        w, h = template.shape[::-1]
        res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
        threshold = 0.6
        loc = np.where( res >= threshold)
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
        if loc[0].any():
            print 'Found ' + str(x) + ' float'
            if dev:
                cv2.imwrite('var/fishing_session_' + str(int(time.time())) + '_success.png', img_rgb)
            return (loc[1][0] + w / 2) / 2, (loc[0][0] + h / 2) / 2
dino_api.py 文件源码 项目:go_dino 作者: pauloalves86 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def play_game(get_command_callback: Callable[[int, int, int], str]) -> int:
    with mss() as screenshotter:
        get_game_landscape_and_set_focus_or_die(screenshotter)
        reset_game()
        landscape = get_game_landscape_and_set_focus_or_die(screenshotter, .95)

        start_game()
        gameover_template = cv2.imread(os.path.join('templates', 'dino_gameover.png'), 0)
        start = time.time()
        last_distance = landscape['width']
        x1, x2, y1, y2 = compute_region_of_interest(landscape)
        speed = 0
        last_compute_speed = time.time()
        last_speeds = [3] * 30
        last_command_time = time.time()

        while True:
            buffer = screenshotter.grab(landscape)
            image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
            image = np.array(image)
            image += np.abs(247 - image[0, x2])
            roi = image[y1:y2, x1:x2]
            score = int(time.time() - start)
            distance, size = compute_distance_and_size(roi, x2)
            speed = compute_speed(distance, last_distance, speed, last_speeds, last_compute_speed)
            last_compute_speed = time.time()
            # Check GAME OVER
            if distance == last_distance or distance == 0:
                res = cv2.matchTemplate(image, gameover_template, cv2.TM_CCOEFF_NORMED)
                if np.where(res >= 0.7)[0]:
                    reset_game()
                    return score
            last_distance = distance
            if time.time() - last_command_time < 0.6:
                continue
            command = get_command_callback(distance, size, speed)
            if command:
                last_command_time = time.time()
                pyautogui.press(command)
dino_api.py 文件源码 项目:go_dino 作者: pauloalves86 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def find_game_position(screenshotter, threshold) -> Dict:
    dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0)
    w, h = dino_template.shape[::-1]
    landscape_template = cv2.imread(os.path.join('templates', 'dino_landscape.png'), 0)
    lw, lh = landscape_template.shape[::-1]
    monitor = screenshotter.monitors[0]
    buffer = screenshotter.grab(monitor)
    image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
    image = np.array(image)
    res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= threshold)
    if len(loc[0]):
        pt = next(zip(*loc[::-1]))
        return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw)
    return {}
cv.py 文件源码 项目:endless-lake-player 作者: joeydong 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def match_template(screenshot, template):
    # Perform match template calculation
    matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)

    # Survey results
    (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches)

    # Load template size
    (template_height, template_width) = template.shape[:2]

    return {
        "x1": max_loc[0],
        "y1": max_loc[1],
        "x2": max_loc[0] + template_width,
        "y2": max_loc[1] + template_height,
        "center": {
            "x": max_loc[0] + (template_width / 2),
            "y": max_loc[1] + (template_height / 2)
        },
        "score": max_val
    }
face_detection_v1.py 文件源码 项目:smart-cam 作者: smart-cam 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi):
        logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id,
                                                                                len(faces_roi)))
        # First Time
        if frame_prev.size == 0:
            return len(faces_roi)

        uniq_faces_curr_frame = 0

        for template_roi in faces_roi:
            # Apply template Matching
            res = cv2.matchTemplate(frame_prev,
                                    template_roi,
                                    cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc))



        logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame))
        return uniq_faces_curr_frame
Screen.py 文件源码 项目:fatego-auto 作者: lishunan246 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
Screen.py 文件源码 项目:fatego-auto 作者: lishunan246 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def find_list(self, name):
        cards = []
        res = cv2.matchTemplate(self.screen, self._imageLoader.get(name), cv2.TM_CCOEFF_NORMED)
        threshold = 0.8
        loc = numpy.where(res >= threshold)
        x = 0
        t = sorted(zip(*loc[::-1]))
        for pt in t:
            if abs(x - pt[0]) > 100 or x == 0:
                x = pt[0]
                cards.append((pt[0], pt[1]))
            else:
                continue
        self.log(name + ': ' + str(len(cards)))

        return cards
secretitem.py 文件源码 项目:ArkwoodAR 作者: rdmilligan 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect(self, image):

        # convert image to grayscale
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # apply template matching
        result = cv2.matchTemplate(image_gray, self.template, cv2.TM_CCOEFF_NORMED)

        # obtain locations, where threshold met
        locations = np.where(result >= self.THRESHOLD)

        for item in locations:
            if len(item) == 0:
                return None

        return locations
denseMatter.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def check_list(self):
        items_dict = imd.ImageStorage()
        items_dict = items_dict.pickled_dict

        RS.press_button('equipment')
        time.sleep(1)
        for key in items_dict.keys():
            template = items_dict[key]
            #save for DEBUG
            #cv2.imwrite('debug_template_file', template_)
            w, h = template.shape[::-1]
            pattern = RS.get_bag('only','gray')
            res = cv2.matchTemplate(pattern,template,cv2.TM_CCOEFF_NORMED)
            threshold = .8 #default is 8 
            loc = np.where( res >= threshold)

            for pt in zip(*loc[::-1]):#goes through each found image
                print('{} found'.format(key))
                break
            else:
                print('{} not found'.format(key))
Match.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def this(img_pat, img_temp):
    """pass img_pat as a cv2 image format, img_temp as a file
    Passed Function to do w/e after finding img_temp"""
    cwd  = os.getcwd()
    if cwd not in img_temp:
        img_temp = cwd+img_temp
    if '.png' not in img_temp:
        img_temp = cwd+img_temp+'.png'
    #print for DEBUG
    #print(img_temp)
    #img_temp
    img_temp = cv2.imread(img_temp,0)
    #save for DEBUG
    #cv2.imwrite('img_temp', img_temp)
    w, h = img_temp.shape[::-1]
    res = cv2.matchTemplate(img_pat,img_temp,cv2.TM_CCOEFF_NORMED)
    threshold = .8 #default is 8 
    loc = np.where( res >= threshold)

    return loc, w, h
Match.py 文件源码 项目:osrmacro 作者: jjvilm 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def images(img_pat, img_temp,x,y, func):
    w, h = img_temp.shape[::-1]
    try:
        res = cv2.matchTemplate(img_temp,img_pat,cv2.TM_CCOEFF_NORMED)

    except Exception as e:
        print("cannot match")
        print(e)
    threshold = .9 #default is 8 
    loc = np.where( res >= threshold)

    for pt in zip(*loc[::-1]):#goes through each found image
        func(img_pat, x, y, pt, w, h)
        return 0
    return 1

    #return loc to be iterable outisde the function
    #also sometimes width and height of image is needed
extract.py 文件源码 项目:SpaceX 作者: shahar603 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def exists(image, template, thresh):
    """
    Returns True if template is in Image with probability of at least thresh
    :param image: 
    :param template: 
    :param thresh: 
    :return: 
    """
    digit_res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(digit_res >= thresh)

    if len(loc[-1]) == 0:
        return False

    for pt in zip(*loc[::-1]):
        if digit_res[pt[1]][pt[0]] == 1:
            return False

    return True
roll_handler.py 文件源码 项目:Grand-Order-Reroller 作者: chaosking121 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def identify_summons(image_path):
    import cv2
    import numpy as np

    image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY)
    summons = []
    points = 0

    for file_name, (point_value, actual_name) in possible_summons.items():
        template = cv2.imread(os.path.join('screenshots', 'summons', file_name + '.png'), cv2.IMREAD_GRAYSCALE)

        res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= CLOSENESS_THRESHOLD)

        for pt in zip(*loc[::-1]):

            # Due to weird behaviour, only add one instance of each summon
            if actual_name in summons:
                continue
            summons.append(actual_name)
            points += point_value

    return (summons, points)
main.py 文件源码 项目:Grand-Order-Reroller 作者: chaosking121 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def image_is_on_screen(template_name):
    template = cv2.imread(os.path.join(
                                'screenshots', 
                                template_name + '.png'), 
                    cv2.IMREAD_GRAYSCALE)
    image = cv2.cvtColor(
                np.array(pyautogui.screenshot(
                        region=(0, 0, 1300, 750))), 
                cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(res >= CLOSENESS_THRESHOLD)

    # Not sure why this works but okay
    for pt in zip(*loc[::-1]):
        return True

    return False
hero_detector.py 文件源码 项目:overwatch-counter-picker 作者: cheshire137 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def detect(self, template):
    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)

    if self.is_cards_screen:
      template = self.scale_template_for_cards_screen(template)

    result = cv2.matchTemplate(self.original, template, cv2.TM_CCOEFF_NORMED)
    loc = np.where(result >= self.threshold)
    points = zip(*loc[::-1])

    if len(points) > 0:
      return HeroDetector.combine_points(points)

    return None

  # Scale template down if we're on the game-over screen since the hero
  # portraits are smaller there than during the game.
tslsr.py 文件源码 项目:Speedy-TSLSR 作者: talhaHavadar 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55):
    """
        Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer.
        @threshold percentage of similarity
    """
    __readDigitTemplates()
    digit = digit.copy()
    if digit.shape[2] == 3:
        digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY)
    ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV)
    bestDigit = -1
    if method == REC_METHOD_TEMPLATE_MATCHING:
        bestMatch = None
        for i in range(len(__DIGIT_TEMPLATES)):
            template = __DIGIT_TEMPLATES[i].copy()

            if digit.shape[1] < template.shape[1]:
                template = cv2.resize(template, (digit.shape[1], digit.shape[0]))
            else:
                digit = cv2.resize(digit, (template.shape[1], template.shape[0]))

            result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED)
            (_, max_val, _, max_loc) = cv2.minMaxLoc(result)
            if bestMatch is None or max_val > bestMatch:
                bestMatch = max_val
                bestDigit = i
                print("New Best Match:", bestMatch, bestDigit)

    if (bestMatch * 100) >= threshold:
        return (bestDigit, bestMatch * 100)

    return (-1, 0)
template_match.py 文件源码 项目:SelfDrivingCar 作者: aguijarro 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def find_matches(img, template_list):
    # Make a copy of the image to draw on
    # Define an empty list to take bbox coords
    bbox_list = []
    # Iterate through template list
    # Read in templates one by one
    # Use cv2.matchTemplate() to search the image
    #     using whichever of the OpenCV search methods you prefer
    # Use cv2.minMaxLoc() to extract the location of the best match
    # Determine bounding box corners for the match
    # Return the list of bounding boxes
    method = cv2.TM_CCOEFF_NORMED
    for temp in templist:
        tmp = mpimg.imread(temp)
        # Apply template Matching
        res = cv2.matchTemplate(img,tmp,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        w, h = (tmp.shape[1], tmp.shape[0])

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc

        bottom_right = (top_left[0] + w, top_left[1] + h)
        bbox_list.append((top_left, bottom_right))
    return bbox_list
test_monkey.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
scene_detector.py 文件源码 项目:ATX 作者: NetEaseGame 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
test_monkey.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_find_scene():
    scenes = {}
    for s in os.listdir('txxscene'):
        if '-' in s: continue
        i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE)
        scenes[s] = i

    # names = [os.path.join('scene', c) for c in os.listdir('scene')]
    imgs = {}
    for n in os.listdir('scene'):
        i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE)
        i = cv2.resize(i, (960, 540))
        imgs[n] = i

    for name, img in imgs.iteritems():
        for scene, tmpl in scenes.iteritems():
            res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val < 0.6:
                continue
            x, y = max_loc
            h, w = tmpl.shape
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            print name, scene, max_val, min_val
            cv2.imshow('found', img)
            cv2.waitKey()
scene_detector.py 文件源码 项目:AutomatorX 作者: xiaoyaojjian 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
opencv-sample.py 文件源码 项目:cv-sample-python 作者: macaca-sample 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def match(self, templateimage, threshold=0.8):
        image = cv2.imread(self.sourceimage)
        template = cv2.imread(templateimage)
        result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
        similarity = cv2.minMaxLoc(result)[1]
        if similarity < threshold:
            return similarity
        else:
            return np.unravel_index(result.argmax(), result.shape)
best_fit.py 文件源码 项目:SheetVision 作者: cal-pratt 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fit(img, templates, start_percent, stop_percent, threshold):
    img_width, img_height = img.shape[::-1]
    best_location_count = -1
    best_locations = []
    best_scale = 1

    plt.axis([0, 2, 0, 1])
    plt.show(block=False)

    x = []
    y = []
    for scale in [i/100.0 for i in range(start_percent, stop_percent + 1, 3)]:
        locations = []
        location_count = 0
        for template in templates:
            template = cv2.resize(template, None,
                fx = scale, fy = scale, interpolation = cv2.INTER_CUBIC)
            result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
            result = np.where(result >= threshold)
            location_count += len(result[0])
            locations += [result]
        print("scale: {0}, hits: {1}".format(scale, location_count))
        x.append(location_count)
        y.append(scale)
        plt.plot(y, x)
        plt.pause(0.00001)
        if (location_count > best_location_count):
            best_location_count = location_count
            best_locations = locations
            best_scale = scale
            plt.axis([0, 2, 0, best_location_count])
        elif (location_count < best_location_count):
            pass
    plt.close()

    return best_locations, best_scale
utils.py 文件源码 项目:histonets-cv 作者: sul-cidr 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def match_template_mask(image, template, mask=None, method=None, sigma=0.33):
    """Match template against image applying mask to template using method.
    Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt',
    'roberts', 'canny').
    Returns locations to look for max values."""
    if mask is not None:
        if method:
            kernel = np.ones((3, 3), np.uint8)
            mask = cv2.erode(mask, kernel)
            if method == 'laplacian':
                # use CV_64F to not loose edges, convert to uint8 afterwards
                edge_image = np.uint8(np.absolute(
                    cv2.Laplacian(image, cv2.CV_64F)))
                edge_template = np.uint8(np.absolute(
                    cv2.Laplacian(template, cv2.CV_64F)
                ))
            elif method in ('sobel', 'scharr', 'prewitt', 'roberts'):
                filter_func = getattr(skfilters, method)
                edge_image = filter_func(image)
                edge_template = filter_func(template)
                edge_image = convert(edge_image)
                edge_template = convert(edge_template)
            else:  # method == 'canny'
                values = np.hstack([image.ravel(), template.ravel()])
                median = np.median(values)
                lower = int(max(0, (1.0 - sigma) * median))
                upper = int(min(255, (1.0 + sigma) * median))
                edge_image = cv2.Canny(image, lower, upper)
                edge_template = cv2.Canny(template, lower, upper)
            results = cv2.matchTemplate(edge_image, edge_template & mask,
                                        cv2.TM_CCOEFF_NORMED)
        else:
            results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED,
                                        mask)
    else:
        results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    return results
solver.py 文件源码 项目:airport 作者: cfircohen 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def MatchTemplate(template, target):
  """Returns match score for given template"""
  res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
  min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
  return max_val


问题


面经


文章

微信
公众号

扫码关注公众号