python类FONT_HERSHEY_SIMPLEX的实例源码

simple_motion_detection.py 文件源码 项目:IoT-Client 作者: suquark 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def draw_on_detected(frame, rects, timestamp):
    # Draw the bounding box on the frame
    for (x, y, w, h) in rects:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # draw the text and timestamp on the frame
    ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
    cv2.putText(frame, "Status: Open", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255),
                2)
    cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

    # write the image to temporary file

    # t = TempImage()
    # print('File saved at' + str(t.path))
    # cv2.imwrite(t.path, frame)

    # analyze
    # pi_surveillance_analyze.analyze(t.path)
ocv_detection.py 文件源码 项目:GLMF203 作者: GLMF 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def archive_with_items(self):
        """ Ecrit dans le dossier d'archive la frame complète avec des carrés dessinés autour
            des visages détectés
        """
        logging.info("Archive l'image avec les items trouvés...")
        # Dessine un carré autour de chaque item
        for f in self.items: 
            x, y, w, h = f #[ v for v in f ] 
            cv2.rectangle(self.frame, (x,y), (x+w,y+h), (0,255,0), 3) 

        # Ajoute la date et l'heure à l'image
        cv2.putText(self.frame, datetime.datetime.now().strftime("%c"), (5, 25), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 3) 

        # On affiche l'image qui va être archivée dans une fenêtre
        if self.debug:
            cv2.imshow("preview", self.frame) 
            cv2.waitKey() 

        # Ecriture du fichier
        archive_full_name = "{0}_full.jpg".format(self.images_prefix)
        logging.info("Archive file is : '{0}'".format(archive_full_name))
        cv2.imwrite(os.path.join(self.archive_folder,  archive_full_name), self.frame)
VideoThread.py 文件源码 项目:live-age-gender-estimator 作者: taipalma 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def annotate(self, frame):
         text = "Frame rate: %.1f" % self.frameRate
         textColor = (0,255,0)
         font = cv2.FONT_HERSHEY_SIMPLEX
         size = 0.5
         thickness = 2
         textSize = cv2.getTextSize(text, font, size, thickness)
         height = textSize[1]         
         location = (0,frame.shape[0] - 4*height)
         cv2.putText(frame, text, location, font, size, textColor,
            thickness=thickness)

         text = "Detection rate: %.1f" % self.detectionRate
         location = (0,frame.shape[0] - height)
         cv2.putText(frame, text, location, font, size, textColor,
            thickness=thickness)
YOLO_tiny_tf.py 文件源码 项目:YOLO_tensorflow 作者: gliese581gg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def show_results(self,img,results):
        img_cp = img.copy()
        if self.filewrite_txt :
            ftxt = open(self.tofile_txt,'w')
        for i in range(len(results)):
            x = int(results[i][1])
            y = int(results[i][2])
            w = int(results[i][3])//2
            h = int(results[i][4])//2
            if self.disp_console : print '    class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
            if self.filewrite_img or self.imshow:
                cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
                cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
                cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
            if self.filewrite_txt :             
                ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
        if self.filewrite_img : 
            if self.disp_console : print '    image file writed : ' + self.tofile_img
            cv2.imwrite(self.tofile_img,img_cp)         
        if self.imshow :
            cv2.imshow('YOLO_tiny detection',img_cp)
            cv2.waitKey(1)
        if self.filewrite_txt : 
            if self.disp_console : print '    txt file writed : ' + self.tofile_txt
            ftxt.close()
YOLO_face_tf.py 文件源码 项目:YOLO_tensorflow 作者: gliese581gg 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def show_results(self,img,results):
        img_cp = img.copy()
        if self.filewrite_txt :
            ftxt = open(self.tofile_txt,'w')
        for i in range(len(results)):
            x = int(results[i][1])
            y = int(results[i][2])
            w = int(results[i][3])//2
            h = int(results[i][4])//2
            if self.disp_console : print '    class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
            if self.filewrite_img or self.imshow:
                cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
                cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
                cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
            if self.filewrite_txt :             
                ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
        if self.filewrite_img : 
            if self.disp_console : print '    image file writed : ' + self.tofile_img
            cv2.imwrite(self.tofile_img,img_cp)         
        if self.imshow :
            cv2.imshow('YOLO_face detection',img_cp)
            cv2.waitKey(1)
        if self.filewrite_txt : 
            if self.disp_console : print '    txt file writed : ' + self.tofile_txt
            ftxt.close()
mtcnn_aligner.py 文件源码 项目:prepare-faces-zyf 作者: walkoncross 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cv2_put_text_to_image(img, text, x, y, font_pix_h=10, color=(255, 0, 0)):
    if font_pix_h < 10:
        font_pix_h = 10

    # print img.shape

    h = img.shape[0]

    if x < 0:
        x = 0

    if y > h - 1:
        y = h - font_pix_h

    if y < 0:
        y = font_pix_h

    font_size = font_pix_h / 30.0
    # print font_size
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                font_size, color, 1)
mtcnn_aligner.py 文件源码 项目:prepare-faces-zyf 作者: walkoncross 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def cv2_put_text_to_image(img, text, x, y, font_pix_h=10, color=(255, 0, 0)):
    if font_pix_h < 10:
        font_pix_h = 10

    # print img.shape

    h = img.shape[0]

    if x < 0:
        x = 0

    if y > h - 1:
        y = h - font_pix_h

    if y < 0:
        y = font_pix_h

    font_size = font_pix_h / 30.0
    # print font_size
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                font_size, color, 1)
mtcnn_detector.py 文件源码 项目:prepare-faces-zyf 作者: walkoncross 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def cv2_put_text_to_image(img, text, x, y, font_pix_h=10, color=(255, 0, 0)):
    if font_pix_h < 10:
        font_pix_h = 10

    y = y + font_pix_h
    # print img.shape

    h = img.shape[0]

    if x < 0:
        x = 0

    if y > h - 1:
        y = h - font_pix_h

    if y < 0:
        y = font_pix_h

    font_size = font_pix_h / 30.0
    # print font_size
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                font_size, color, 1, -1)
thesis_functions.py 文件源码 项目:Foveated-YOLT 作者: trigrass2 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def plot_image_final_label(path, top_class, m):

    original_image = cv2.imread(path)

    # Swap Red and Blue color channels BGR -> RGB
    red = original_image[:, :, 2].copy()
    blue = original_image[:, :, 0].copy()
    original_image[:, :, 0] = red
    original_image[:, :, 2] = blue

    cv2.putText(original_image, "Label: {}".format(top_class), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
    plt.figure()
    plt.imshow(original_image)
    plt.savefig('final_solution_' + str(m) + '.jpg')

#########################################################################################

#########################################################################################
# FUNCTION: DRAW BAR GRAPH                                                              #
# GOAL:     Draw input image and graph bar with top 5 labels                            #
# INPUT:    final_top_5, final_top_5_prob, path, m                                      #
# OUTPUT:   Graph bar                                                                   #
# RETURN:                                                                               #
#########################################################################################
cam.py 文件源码 项目:emojivis 作者: JustinShenk 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def analyze_emotions(im, landmarks):
    for landmark in landmarks:
        # Observe eyebrow height for surprise
        standheight = np.absolute(landmark[27, 1] - landmark[30, 1])
        eyebrowheight = np.absolute(landmark[27, 1] - landmark[19, 1])
        if standheight == 0:
            standheight += 0.01
        eyedist = float(eyebrowheight) / float(standheight)
        mouthheight = np.absolute(landmark[50, 1] - landmark[57, 1])
        if float(mouthheight) / float(standheight) > 30:
            cv2.putText(im, "mouthheight: " + str(mouthheight), (screenwidth - 80, 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.4,
                        color=(0, 0, 255),
                        thickness=2)
        eyedist += mouthheight / 30
        mouthwidth = np.absolute(landmark[48, 0] - landmark[50, 0])
        nosewidth = np.absolute(landmark[31, 0] - landmark[35, 0])
        mouthdist = float(mouthwidth) / nosewidth
        im = score_emotions(im, eyedist, mouthdist)

    return im
fasterRCNN.py 文件源码 项目:deel 作者: uei 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf):
    CV_AA = 16
    print clss.shape
    print bbox.shape
    for cls_id in range(1, 21):
        _cls = clss[:, cls_id][:, np.newaxis]
        _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4]
        dets = np.hstack((_bbx, _cls))
        keep = nms(dets, nms_thresh)
        dets = dets[keep, :]

        inds = np.where(dets[:, -1] >= conf)[0]
        for i in inds:
            x1, y1, x2, y2 = map(int, dets[i, :4])
            cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA)
            ret, baseline = cv.getTextSize(
                CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1)
            cv.rectangle(out, (x1, y2 - ret[1] - baseline),
                         (x1 + ret[0], y2), (0, 0, 255), -1)
            cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline),
                       cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA)

    return out
extract_silhouettes.py 文件源码 项目:AMBR 作者: Algomorph 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def draw_silhouette(self, foreground, bin_mask, tracked_object_stats, centroid):
        contours = cv2.findContours(bin_mask, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)[1]
        for i_contour in range(0, len(contours)):
            cv2.drawContours(foreground, contours, i_contour, (0, 255, 0))
        x1 = tracked_object_stats[cv2.CC_STAT_LEFT]
        x2 = x1 + tracked_object_stats[cv2.CC_STAT_WIDTH]+1
        y1 = tracked_object_stats[cv2.CC_STAT_TOP]
        y2 = y1 + tracked_object_stats[cv2.CC_STAT_HEIGHT]+1
        if SilhouetteExtractor.DRAW_BBOX:
            cv2.rectangle(foreground, (x1, y1), (x2, y2), color=(0, 0, 255))
            cv2.drawMarker(foreground, SilhouetteExtractor.__to_int_tuple(centroid), (0, 0, 255), cv2.MARKER_CROSS, 11)
            bbox_w_h_ratio = tracked_object_stats[cv2.CC_STAT_WIDTH] / tracked_object_stats[cv2.CC_STAT_HEIGHT]
            cv2.putText(foreground, "BBOX w/h ratio: {0:.4f}".format(bbox_w_h_ratio), (x1, y1 - 18),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
        if SilhouetteExtractor.SHOW_INTERSECTS:
            if self.intersects_frame_boundary(x1, x2, y1, y2):
                cv2.putText(foreground, "FRAME BORDER INTERSECT DETECTED", (0, 54), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                            (0, 0, 255))
detector.py 文件源码 项目:TA_example_labs 作者: mit-racecar 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def contour_match(self, img):
        '''
        Returns 1. Image with bounding boxes added
        '''
        # get filtered contours
        contours = self.get_filtered_contours(img)
        detection = Detection()
        height,width,channel = img.shape
        mean_color = (15,253,250)
        for i, (cnt, box)  in enumerate(contours): 
            # plot box and label around contour
            x,y,w,h = box
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img,"cone", (x,y), font, 1,mean_color,4)
            cv2.rectangle(img,(x,y),(x+w,y+h), mean_color,2)
            if i == 0:
                detection.x = x
                detection.y = y
                detection.w = w
                detection.h = h
                detection.error_center = 0.5 - (x/float(width))
                detection.error_size = (self.DESIRED_AREA-w*h)/float(width*height)
            cv2.putText(img,"center:%.2f, distance: %.2f" % (detection.error_center, detection.error_size), (x-w,y-h/2), font, 1,mean_color,4)
        # return the image with boxes around detected contours
        return img, detection
train.py 文件源码 项目:TFFRCNN 作者: CharlesShang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image
kitti2pascalvoc.py 文件源码 项目:TFFRCNN 作者: CharlesShang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _draw_on_image(img, objs, class_sets_dict):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    for ind, obj in enumerate(objs):
        if obj['box'] is None: continue
        x1, y1, x2, y2 = obj['box'].astype(int)
        cls_id = class_sets_dict[obj['class']]
        if obj['class'] == 'dontcare':
            cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1)
            continue
        cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1)
        text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3])
        text += '{:.1f}|'.format(obj['truncation'])
        text += str(obj['occlusion'])
        cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1)
    return img
predict.py 文件源码 项目:ppap_detect 作者: ashitani 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def draw_boxes(im_org,sorted_boxes,classes,block_x,block_y,biases,colors):
  im_marked=im_org.copy()
  im_size=np.shape(im_org)
  im_h=im_size[0]
  im_w=im_size[1]

  for sorted_box in sorted_boxes:
      b,j,class_id,p_class = sorted_box

      print classes[class_id], np.max(p_class)*100

      x=b.x
      y=b.y
      w=b.w
      h=b.h

      x0 = int(np.clip(x-w/2,0,im_w))
      y0 = int(np.clip(y-h/2,0,im_h))
      x1 = int(np.clip(x+w/2,0,im_w))
      y1 = int(np.clip(y+h/2,0,im_h))
      im_marked=cv2.rectangle(im_marked, (x0, y0),(x1, y1),colors[class_id],thickness=2)
#      im_marked=cv2.rectangle(im_marked, (x0, y0),(x0+100, y0+20) ,colors[class_id],thickness=-1)
#      cv2.putText(im_marked, classes[class_id],(x0+5,y0+15), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),thickness=2)
  return im_marked
tb_common.py 文件源码 项目:textboxes 作者: shinjayne 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def draw_rect(I, r, c, thickness=1):
    if abs(sum(r)) < 100: # conditional to prevent min/max error
        cv2.rectangle(I, (int(r[0] * image_size), int(r[1] * image_size)),
                      (int((r[0] + max(r[2], 0)) * image_size), int((r[1] + max(r[3], 0)) * image_size)),
                      c, thickness)

# def draw_ann(I, r, text, color=(255, 0, 255), confidence=-1):
#   draw_rect(I, r, color, 1)
#   cv2.rectangle(I, (int(r[0] * image_size), int(r[1] * image_size - 15)),
#                 (int(r[0] * image_size + 100), int(r[1] * image_size)),
#                 color, -1)

#   text_ = text

#   if confidence >= 0:
#       text_ += ": %0.2f" % confidence

#   cv2.putText(I, text_, (int(r[0] * image_size), int((r[1]) * image_size)),
#               cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
libs.py 文件源码 项目:Face-Recognition-for-Mobile-Robot 作者: gagolucasm 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def gui():
    size=100
    img = np.zeros((1000,700,3), np.uint8)
    cv2.namedWindow('GUI')
    xmar=ymar=50
    for i in range(6):
        for j in range(4):
            img1 = cv2.imread("faces/cara"+str(i+j+1)+".JPEG")
            img1=resize(img1,width = size,height=size)
            if (img1.shape[0] == 100 and img1.shape[1] == 100):
                img[ymar:ymar+size, xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+size)] = img1
            else:
                img[ymar:ymar+img1.shape[0], xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+img1.shape[1])] = img1
        ymar+=150
    cv2.putText(img, "Presiona Q para salir", (5, 25),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "TFG Lucas Gago", (500, 925),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "Version 3", (500, 950),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.imshow('GUI',img)
VOC_process_util.py 文件源码 项目:hellish 作者: unlimblue 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def proc_image(img, bboxes=None, border_size=1, font_size=0.3, xconfidence=0.5):
    # img = cv2.resize(img, (512, 512))
    if bboxes is not None:
        for bbox in bboxes:
            if bbox.shape[0] == 5:
                c, xmin, ymin, xmax, ymax = [int(x) for x in bbox]
                mx = int((xmin+xmax)/2)
                my = int((ymin+ymax)/2)
                cv2.putText(img, "%d: %s"%(c, rcmap[c]), (mx, my), cv2.FONT_HERSHEY_SIMPLEX, font_size, (0,255,155), border_size)
                cv2.rectangle(img,(xmin, ymax),(xmax, ymin),(255,15,5),border_size)
            else:
                c, xmin, ymin, xmax, ymax = [int(x) for x in bbox[:5]]
                confidence = bbox[5]
                if confidence > xconfidence:
                    mx = int((xmin+xmax)/2)
                    my = int((ymin+ymax)/2)
                    cv2.putText(img, str(confidence), (mx, my), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,0,155), border_size)
                    cv2.putText(img, "%d: %s"%(c, rcmap[c]), (mx, my + 15), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,0,155), border_size)
                    cv2.rectangle(img,(xmin, ymax),(xmax, ymin),(0,255,155),border_size)
    return img
main.py 文件源码 项目:FaceRec 作者: vudung45 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def camera_recog():
    print("[INFO] camera sensor warming up...")
    vs = cv2.VideoCapture(0); #get input from webcam
    while True:
        _,frame = vs.read();
        #u can certainly add a roi here but for the sake of a demo i'll just leave it as simple as this
        rects, landmarks = face_detect.detect_face(frame,80);#min face size is set to 80x80
        aligns = []
        positions = []
        for (i, rect) in enumerate(rects):
            aligned_face, face_pos = aligner.align(160,frame,landmarks[i])
            aligns.append(aligned_face)
            positions.append(face_pos)
        features_arr = extract_feature.get_features(aligns)
        recog_data = findPeople(features_arr,positions);
        for (i,rect) in enumerate(rects):
            cv2.rectangle(frame,(rect[0],rect[1]),(rect[0] + rect[2],rect[1]+rect[3]),(255,0,0)) #draw bounding box for the face
            cv2.putText(frame,recog_data[i][0]+" - "+str(recog_data[i][1])+"%",(rect[0],rect[1]),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),1,cv2.LINE_AA)

        cv2.imshow("Frame",frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break


问题


面经


文章

微信
公众号

扫码关注公众号