python类destroyAllWindows()的实例源码

simulation.py 文件源码 项目:trackingtermites 作者: dmrib 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def simulate(self):
        """Displays termite trail recorded points at a black arena.

        Args:
            None.
        Returns:
            None.
        """
        self.video_source = video.VideoPlayer(self.params['original_video_path'], self.params['output_path'],
                                         self.params['arena_size'], [], True, 'MOG')
        simulation_length = min(len(x.trail) for x in self.termites)
        self.current_step = 0

        while self.current_step < simulation_length:
            self.background = np.zeros((self.params['arena_size'][1], self.params['arena_size'][0],
                                        3), np.uint8)
            self.draw()
            self.show()

            self.current_step += 1
            self.video_source.next_frame()

        cv2.destroyAllWindows()
img_func.py 文件源码 项目:ghetto_omr 作者: pohzhiee 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def splitimg(im_inp,n_row,n_col):
    #determine size of input image
    h_img, w_img = im_inp.shape[:2]
    #determine size of each cropped image
    h_row = h_img / num_rows
    w_col = w_img / num_cols
    #declare fragmented image matrix
    img_frag = np.empty((num_rows, num_cols, h_row, w_col), dtype=np.uint8)
    #fragments input image and put it into matrix
    for i in range(0, num_rows):
        h0 = h_row * i
        h1 = h_row * (i + 1)
        for j in range(0, num_cols):
            w0 = w_col * j
            w1 = w_col * (j + 1)
            img_frag[i, j] = im_inp[h0:h1, w0:w1]
            #uncomment following lines for debugging to show image
            # cv2.imshow('image1', img_frag[i, j])
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
    return img_frag
interaction_updated_global.py 文件源码 项目:Interactive-object-tracking 作者: abhishekarya286 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def mask_bg(object_window,img) :
    ''' This function outputs the surrounding pixels
        Basically, image of background with masked target object'''
    global h_img,w_img
    x,y,w,h=object_window
    h_bg=h*2
    w_bg=2*w
    h_=0.5*h
    w_=0.5*w
    x_bg=int(max(x-(w_),0))
    y_bg=int(max(y-(h_),0))
    x_bg1=int(min(x_bg+w_bg,w_img-1))
    y_bg1=int(min(y_bg+h_bg,h_img-1))
    img[y:y+h,x:x+w]=0
    #print object_window
    #print x_bg,y_bg,x_bg1,y_bg1,img.shape
    bg_img=img[y_bg:y_bg1,x_bg:x_bg1]
    #cv2.imshow("masked_background",bg_img)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    return bg_img
lab_global_optimisation.py 文件源码 项目:Interactive-object-tracking 作者: abhishekarya286 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def mask_bg(object_window,img) :
    ''' This function outputs the surrounding pixels
        Basically, image of background with masked target object'''
    global h_img,w_img
    x,y,w,h=object_window
    h_bg=h*2
    w_bg=2*w
    h_=0.5*h
    w_=0.5*w
    x_bg=int(max(x-(w_),0))
    y_bg=int(max(y-(h_),0))
    x_bg1=int(min(x_bg+w_bg,w_img-1))
    y_bg1=int(min(y_bg+h_bg,h_img-1))
    img[y:y+h,x:x+w]=0
    #print object_window
    #print x_bg,y_bg,x_bg1,y_bg1,img.shape
    bg_img=img[y_bg:y_bg1,x_bg:x_bg1]
    #cv2.imshow("masked_background",bg_img)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    return bg_img
image_handler.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
    """Helper function to visualize and save any image"""
    image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
    image = image.astype(np.uint8)

    if resize: 
        image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))

    cv2.imshow(name, image)
    if cv2.waitKey(0) & 0xFF == ord('q'):
        cv2.destroyAllWindows()

    if save_image:
        assert path is not None
        cv2.imwrite(path, image)
helper_functions.py 文件源码 项目:SudokuSolver 作者: Anve94 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def image_preview(image):
    cv2.imshow('Image preview', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
helper_functions.py 文件源码 项目:SudokuSolver 作者: Anve94 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def display_solution(square_borders, start_grid, solution, image):
    """ Writes the solution to an image and displays said image.
        Params:
            square_borders  -- A list containing the borders of all squares
            start_grid      -- A list containing the sudoku starting values
            solution        -- A list containing the sudoku solution
            image           -- The image to write to """
    cur_row = 0
    cur_col = 0
    for i, b in enumerate(square_borders):
        x, y, x2, y2 = b  # Tuple unpacking
        # Calculate bottom-left position for text
        text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10
        # Bottom-left corner for text position
        org = (text_x, text_y)
        # Only write text if the position was not set in the start_grid
        if start_grid[cur_row][cur_col] is 0:
            value = str(solution[cur_row][cur_col])
            cv2.putText(
                img=image,
                text=value,
                org=org,
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                color=(0, 255, 0),
                thickness=2)
        cur_col += 1
        if cur_col % 9 == 0:
            cur_row += 1
            cur_col = 0

    cv2.imshow('Solution', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
webcam.py 文件源码 项目:moVi 作者: netsecIITK 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def close(self):
        self.cap.release()
        cv2.destroyAllWindows()
        print("Closing camera")
frame.py 文件源码 项目:moVi 作者: netsecIITK 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def close(self):
        cv2.destroyAllWindows()
        print("Closing window")
find_edges.py 文件源码 项目:opencv-gui-helper-tool 作者: maunesh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.')
    parser.add_argument('filename')

    args = parser.parse_args()

    img = cv2.imread(args.filename, cv2.IMREAD_GRAYSCALE)

    cv2.imshow('input', img)

    edge_finder = EdgeFinder(img, filter_size=13, threshold1=28, threshold2=115)

    print "Edge parameters:"
    print "GaussianBlur Filter Size: %f" % edge_finder.filterSize()
    print "Threshold1: %f" % edge_finder.threshold1()
    print "Threshold2: %f" % edge_finder.threshold2()

    (head, tail) = os.path.split(args.filename)

    (root, ext) = os.path.splitext(tail)

    smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext)
    edge_filename = os.path.join("output_images", root + "-edges" + ext)

    cv2.imwrite(smoothed_filename, edge_finder.smoothedImage())
    cv2.imwrite(edge_filename, edge_finder.edgeImage())

    cv2.destroyAllWindows()
createimage.py 文件源码 项目:LensCalibrator 作者: 1024jp 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def show_image(image, scale=1.0, window_title='Image'):
    """Display given image in a window.

    Arguments:
    image () -- Image to display.
    scale (float) -- Magnification of image.
    window_title (str) -- Title of window.
    """
    scaled_image = scale_image(image, scale)

    cv2.imshow(window_title, scaled_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
cv_util.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def show_image(im, name='image'):
    cv2.imshow(name, im)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
main.py 文件源码 项目:SudokuVisionSolver 作者: tusharsircar95 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def showImage(img,caption='image'):
    cv2.imshow(caption,img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# Matches a template of cross to detect inner grid lines and then removes them via flood filling
realtimehandposepipeline.py 文件源码 项目:deep-prior 作者: moberweger 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def threadConsumer(self):
        """
        Thread that consumes the frames, estimate the pose and display
        :return: None
        """

        while True:
            if self.stop.value:
                break
            try:
                frm = self.queue.get(block=False)
            except:
                if not self.stop.value:
                    continue
                else:
                    break

            startp = time.time()
            pose = self.estimatePose(frm['crop']) * self.config['cube'][2]/2. + frm['com3D']
            print("{}ms pose".format((time.time() - startp)*1000.))

            # Display the resulting frame
            starts = time.time()
            img = self.show(frm['frame'], pose, frm['M'])
            img = self.addStatusBar(img)
            cv2.imshow('frame', img)
            self.lastshow = time.time()
            self.processKey(cv2.waitKey(1) & 0xFF)
            print("{}ms display".format((time.time() - starts)*1000.))

        cv2.destroyAllWindows()
        print "Exiting consumer..."
        return True
show_label.py 文件源码 项目:mtcnn 作者: daikankan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def show_bbox_landmark(list_file, path_data):
  with open(list_file, 'r') as f:
    annotations = f.readlines()
  num = len(annotations)
  print "%d pics in total" % num
  # random.shuffle(annotations)

  for line in annotations:
    line_split = line.strip().split(' ')
    print line_split[0]
    path_full = os.path.join(path_data, line_split[0])
    datum = cv2.imread(path_full)
    classes = float(line_split[1])
    bbox = [float(x) for x in line_split[2:6]]
    landmarks = [float(x) for x in line_split[6:]]
    print classes
    print bbox
    print landmarks

    (h, w, c) = datum.shape

    if (bbox[0] != -1):
      x1 = bbox[0] * w
      y1 = bbox[1] * h
      x2 = bbox[2] * w + w
      y2 = bbox[3] * h + h
      cv2.rectangle(datum, (int(x1), int(y1)), (int(x2), int(y2)),
                    (0, 255, 0), 1)

    if (landmarks[0] != -1):
      for i in range(5):
        cv2.circle(datum, (int(landmarks[i] * w), int(landmarks[i + 5] * h)),
                   2, (255, 0, 0))
    cv2.imshow(str(line_split[0]), datum)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
__init__.py 文件源码 项目:garden.facelock 作者: kivy-garden 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def face_recognize(self):
        cap = cv2.VideoCapture(self.index)

        face_cascade = cv2.CascadeClassifier(self.cascade)
        '''
        face_cascade: cascade is entered here for further use.
        '''

        while(True):
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Converts coloured video to black and white(Grayscale).
            '''
            if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)):

                print("Cascade found")

                self.dispatch('on_match')

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break

            else:
                print("Not recognized")

            cv2.imshow('frame', frame)
            #Comment the above statement not to show the camera screen
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("Forcefully Closed")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
        cap.release()
UpperBoundary.py 文件源码 项目:SummerProject_MacularDegenerationDetection 作者: WDongYuan 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def ToGrayImage(path):
    image = cv2.imread(path)
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # cv2.imwrite('gray_image.jpg',gray_image)
    # cv2.imshow('color_image',image)
    # cv2.imshow('gray_image',gray_image)
    # cv2.waitKey(0)                 # Waits forever for user to press any key
    # cv2.destroyAllWindows()        # Closes displayed windows
    return gray_image
EdgeDetection.py 文件源码 项目:SummerProject_MacularDegenerationDetection 作者: WDongYuan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def ToGrayImage(path):
    image = cv2.imread(path)
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # cv2.imwrite('gray_image.jpg',gray_image)
    # cv2.imshow('color_image',image)
    # cv2.imshow('gray_image',gray_image)
    # cv2.waitKey(0)                 # Waits forever for user to press any key
    # cv2.destroyAllWindows()        # Closes displayed windows
    return gray_image
face_capture.py 文件源码 项目:face 作者: MOluwole 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, matric_num):
        WHITE = [255, 255, 255]

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        ID = NameFind.AddName(matric_num)
        Count = 0
        cap = cv2.VideoCapture(0)  # Camera object
        self.__trainer__ = None

        if not os.path.exists('dataSet'):
            os.makedirs('dataSet')

        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to grayScale
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                FaceImage = gray[y - int(h / 2): y + int(h * 1.5),
                            x - int(x / 2): x + int(w * 1.5)]  # The Face is isolated and cropped
                Img = (NameFind.DetectEyes(FaceImage))
                cv2.putText(gray, "FACE DETECTED", (x + (w / 2), y - 5), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)
                if Img is not None:
                    frame = Img  # Show the detected faces
                else:
                    frame = gray[y: y + h, x: x + w]
                cv2.imwrite("dataSet/" + matric_num.replace('/', '') + "." + str(ID) + "." + str(Count) + ".jpg", frame)
                Count = Count + 1
                # cv2.waitKey(300)
                cv2.imshow("CAPTURED PHOTO", frame)  # show the captured image
            cv2.imshow('Face Recognition System Capture Faces', gray)  # Show the video
            if Count == 150:
                Trainer()
                break
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        print 'FACE CAPTURE FOR THE SUBJECT IS COMPLETE'
        cap.release()
        cv2.destroyAllWindows()
recognizer.py 文件源码 项目:face 作者: MOluwole 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self):

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        recognise = cv2.face.createEigenFaceRecognizer(15, 4000)  # creating EIGEN FACE RECOGNISER
        recognise.load("Recogniser/trainingDataEigan.xml")  # Load the training data

        # -------------------------     START THE VIDEO FEED ------------------------------------------
        cap = cv2.VideoCapture(0)  # Camera object
        # cap = cv2.VideoCapture('TestVid.wmv')   # Video object
        ID = 0
        while True:
            ret, img = cap.read()  # Read the camera object
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to gray
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                # ------------ BY CONFIRMING THE EYES ARE INSIDE THE FACE BETTER FACE RECOGNITION IS GAINED ------------------
                gray_face = cv2.resize((gray[y: y + h, x: x + w]), (110, 110))  # The Face is isolated and cropped
                eyes = eye_cascade.detectMultiScale(gray_face)
                for (ex, ey, ew, eh) in eyes:
                    ID, conf = recognise.predict(gray_face)  # Determine the ID of the photo
                    NAME = NameFind.ID2Name(ID, conf)
                    NameFind.DispID(x, y, w, h, NAME, gray)
            cv2.imshow('EigenFace Face Recognition System', gray)  # Show the video
            if cv2.waitKey(1) & 0xFF == ord('q'):  # Quit if the key is Q
                break
        cap.release()
        cv2.destroyAllWindows()


问题


面经


文章

微信
公众号

扫码关注公众号