python类absdiff()的实例源码

10-PiStorms_icontracker.py 文件源码 项目:PiStorms 作者: mindsensors 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def imgdiff(self,img1,img2):

        img1 = cv2.GaussianBlur(img1,(5,5),5)
        img2 = cv2.GaussianBlur(img2,(5,5),5)
        diff = cv2.absdiff(img1,img2)
        diff = cv2.GaussianBlur(diff,(5,5),5)
        flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
        return np.sum(diff)
detect.py 文件源码 项目:scam 作者: zsims 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def run(self, context, next_run):
        if 'SOURCE_RAW_CONTENT' not in context:
            raise NoFrameContentError()
        current_array = numpy.frombuffer(context['SOURCE_RAW_CONTENT'], dtype=numpy.uint8)
        current_frame = cv2.imdecode(current_array, flags=cv2.IMREAD_COLOR)
        current_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        current_gray = cv2.GaussianBlur(current_gray, (ContourMatcher.BLUR_SIZE, ContourMatcher.BLUR_SIZE), 0)

        if ContourMatcher.PREVIOUS_FRAME not in context:
            context[ContourMatcher.PREVIOUS_FRAME] = current_frame
            context[ContourMatcher.PREVIOUS_FRAME_GRAY] = current_gray
            return

        # do the matching
        previous_frame = context[ContourMatcher.PREVIOUS_FRAME]
        previous_gray = context[ContourMatcher.PREVIOUS_FRAME_GRAY]
        frame_delta = cv2.absdiff(current_gray, previous_gray)
        _, threshold = cv2.threshold(frame_delta, ContourMatcher.THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY)

        # Fill in small shapes where possible
        threshold = cv2.dilate(threshold, None, iterations=2)

        # find the outer edges of the contours
        (im2, contours, hierarchy) = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        contoured_frame = current_frame.copy()
        matched = False
        for contour in contours:
            area = cv2.contourArea(contour)
            if area >= self.minimum_area:
                (x, y, w, h) = cv2.boundingRect(contour)
                cv2.rectangle(contoured_frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
                matched = True

        context[ContourMatcher.PREVIOUS_FRAME] = current_frame
        context[ContourMatcher.PREVIOUS_FRAME_GRAY] = current_gray

        if matched:
            if self.show_bounding_box:
                _, content = cv2.imencode(context['SOURCE_EXTENSION'], contoured_frame)
                context['SOURCE_RAW_CONTENT'] = content.tostring()
            return next_run()
feature_detection2.py 文件源码 项目:Python_SelfLearning 作者: fukuit 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def matchAB(fileA, fileB):
    '''
    fileA?fileB???????????????
    '''

    # ???????
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)

    # ?????
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # ????????
    height, width = grayA.shape
    # ?????????????????
    result_window = np.zeros((height, width), dtype=imgA.dtype)
    for start_y in range(0, height-100, 50):
        for start_x in range(0, width-100, 50):
            window = grayA[start_y:start_y+100, start_x:start_x+100]
            match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED)
            _, _, _, max_loc = cv2.minMaxLoc(match)
            matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100]
            result = cv2.absdiff(window, matched_window)
            result_window[start_y:start_y+100, start_x:start_x+100] = result

    # ?????????????????????????????
    _, result_window_bin = cv2.threshold(result_window, 127, 255, cv2.THRESH_BINARY)
    _, contours, _ = cv2.findContours(result_window_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    imgC = imgA.copy()
    for contour in contours:
        min = np.nanmin(contour, 0)
        max = np.nanmax(contour, 0)
        loc1 = (min[0][0], min[0][1])
        loc2 = (max[0][0], max[0][1])
        cv2.rectangle(imgC, loc1, loc2, 255, 2)

    # ??????
    plt.subplot(1, 3, 1), plt.imshow(cv2.cvtColor(imgA, cv2.COLOR_BGR2RGB)), plt.title('A'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 2), plt.imshow(cv2.cvtColor(imgB, cv2.COLOR_BGR2RGB)), plt.title('B'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 3, 3), plt.imshow(cv2.cvtColor(imgC, cv2.COLOR_BGR2RGB)), plt.title('Answer'), plt.xticks([]), plt.yticks([])
    plt.show()
QoM.py 文件源码 项目:PyKinectTk 作者: Qirky 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def MotionImage(self, threshold=200):
        """ Returns the image from the sum of the next window
        """

        # Initialise image

        img = np.zeros((self.w, self.h), dtype=np.uint8)

        # Get first frame of the window

        A = cv2.cvtColor(self.window[0], cv2.COLOR_BGR2GRAY)

        # Iterate over the rest of the window

        for i in range(1, self.length):

            # Load next frame

            B = cv2.cvtColor(self.window[i], cv2.COLOR_BGR2GRAY)

            # Image subtraction

            dif = cv2.absdiff(B, A)

            # Add to Motion Image

            img = cv2.add(img, dif)

            # Store the last grayscale frame

            A = B

        r, img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)

        # Try and get the next video frame

        try:

            self.window.append(self.read())
            self.window.popleft()

        except:

            self.hasData = False

        return img


问题


面经


文章

微信
公众号

扫码关注公众号