def display_shape():
global shape
if shape == 0:
cv2.putText(obj, 'Off', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 1:
cv2.putText(obj, 'Pencil', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 2:
cv2.putText(obj, 'Brush', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 3:
cv2.putText(obj, 'Eraser', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 4:
cv2.putText(obj, 'Line', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 5:
cv2.putText(obj, 'Rectangle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
elif shape == 6:
cv2.putText(obj, 'Circle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
python类putText()的实例源码
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
shape = img.shape
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors[classes[i]]
# Draw bounding box...
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
# Draw text...
s = '%s/%.3f' % (classes[i], scores[i])
p1 = (p1[0]-5, p1[1])
cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)
# =========================================================================== #
# Matplotlib show...
# =========================================================================== #
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def drawAxis(camera_parameters, markers, frame):
axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff
for marker in markers:
rvec, tvec = marker.rvec, marker.tvec
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
corners = marker.corners
corner = tuple(corners[0].ravel())
cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
def process_image(self, inImg):
(self.frame_width, self.frame_height) = (112, 92)
frame = cv2.flip(inImg,1,0)
grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cropped = cv2.resize(grayImg, (grayImg.shape[1] / self.size, grayImg.shape[0] / self.size))
faces = self.haar_cascade.detectMultiScale(cropped)
faces = sorted(faces, key=lambda x: x[3])
if faces:
face_i = faces[0]
x = face_i[0] * self.size
y = face_i[1] * self.size
w = face_i[2] * self.size
h = face_i[3] * self.size
face = grayImg[y:y + h, x:x + w]
face_resize = cv2.resize(face, (self.frame_width, self.frame_height))
img_no = sorted([int(fn[:fn.find('.')]) for fn in os.listdir(self.path) if fn[0]!='.' ]+[0])[-1] + 1
if self.count % self.cp_rate == 0:
cv2.imwrite('%s/%s.png' % (self.path, img_no), face_resize)
print "Captured Img: ", self.count/self.cp_rate + 1
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(frame, self.face_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1,(0, 255, 0))
self.count += 1
return frame
def process_image(self, inImg):
(self.frame_width, self.frame_height) = (112, 92)
frame = cv2.flip(inImg,1,0)
grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cropped = cv2.resize(grayImg, (grayImg.shape[1] / self.size, grayImg.shape[0] / self.size))
faces = self.haar_cascade.detectMultiScale(cropped)
faces = sorted(faces, key=lambda x: x[3])
if faces:
face_i = faces[0]
x = face_i[0] * self.size
y = face_i[1] * self.size
w = face_i[2] * self.size
h = face_i[3] * self.size
face = grayImg[y:y + h, x:x + w]
face_resize = cv2.resize(face, (self.frame_width, self.frame_height))
img_no = sorted([int(fn[:fn.find('.')]) for fn in os.listdir(self.path) if fn[0]!='.' ]+[0])[-1] + 1
if self.count % self.cp_rate == 0:
cv2.imwrite('%s/%s.png' % (self.path, img_no), face_resize)
print "Captured Img: ", self.count/self.cp_rate + 1
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.putText(frame, self.face_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1,(0, 255, 0))
self.count += 1
return frame
def _renderResultOnImage(self, result, arr):
"""
Draws boxes and text representing each face's emotion.
"""
import operator, cv2
img = cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB)
for currFace in result:
faceRectangle = currFace['faceRectangle']
cv2.rectangle(img,(faceRectangle['left'],faceRectangle['top']),
(faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
color = (255,0,0), thickness = 5)
for currFace in result:
faceRectangle = currFace['faceRectangle']
currEmotion = max(iter(currFace['scores'].items()), key=operator.itemgetter(1))[0]
textToWrite = '{0}'.format(currEmotion)
cv2.putText(img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1)
return img
def lipSegment(self, img):
# self.t1 = cv2.getTickCount()
lipHull = self.dlib_obj.get_landmarks(img)
cv2.drawContours(img, lipHull, -1, (255, 0, 0), 2)
(x, y), (MA, ma), angle = cv2.fitEllipse(lipHull)
a = ma/2
b = MA/2
eccentricity = sqrt(pow(a, 2)-pow(b, 2))
eccentricity = round(eccentricity/a, 2)
cv2.putText(img, 'E = '+str(round(eccentricity, 3)), (10, 350),
self.font, 1, (255, 0, 0), 1)
if(eccentricity < 0.9):
self.flags.cmd = 'b'
else:
self.flags.cmd = 'f'
if angle < 80:
self.flags.cmd = 'l'
elif angle > 100:
self.flags.cmd = 'r'
cv2.putText(img, 'Cmd = ' + self.flags.cmd, (10, 300), self.font, 1,
(0, 0, 255), 1, 16)
# self.t2 = cv2.getTickCount()
# print "Time = ", (self.t2-self.t1)/cv2.getTickFrequency()
return img
def photoRead(filename):
frame = cv2.imread(filename)
FaceArray = getFaceArray(frame)
for r in FaceArray:
img2 = cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
img3 = frame[r[1]:r[3], r[0]:r[2]] # ?????????????
feature = Tools.get_feature(img3)
name = readFace(feature)
font = cv2.FONT_HERSHEY_SIMPLEX
img2 = cv2.putText(img2, name, (r[1], r[3]), font, 1, (255, 255, 255), 2)
cv2.imshow('frame', frame)
cv2.waitKey(0)
def start():
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
FaceArray=getFaceArray(frame)
img2=frame
for r in FaceArray :
img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
img3 = frame[r[1]:r[3], r[0]:r[2]] # ?????????????
feature=Tools.get_feature(img3)
name=readFace(feature)
font=cv2.FONT_HERSHEY_SIMPLEX
img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2)
cv2.imshow('frame',img2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def renderResultOnImage( result, img ):
"""Display the obtained results onto the input image"""
for currFace in result:
faceRectangle = currFace['faceRectangle']
cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
(faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
color = (255,0,0), thickness = 5 )
for currFace in result:
faceRectangle = currFace['faceRectangle']
currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]
textToWrite = "%s" % ( currEmotion )
cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )
# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
def renderResultOnImage( result, img ):
"""Display the obtained results onto the input image"""
for currFace in result:
faceRectangle = currFace['faceRectangle']
cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
(faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
color = (255,0,0), thickness = 5 )
for currFace in result:
faceRectangle = currFace['faceRectangle']
currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]
textToWrite = "%s" % ( currEmotion )
cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )
# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
def renderResultOnImage( result, img ):
"""Display the obtained results onto the input image"""
for currFace in result:
faceRectangle = currFace['faceRectangle']
cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
(faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
color = (255,0,0), thickness = 5 )
for currFace in result:
faceRectangle = currFace['faceRectangle']
currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]
textToWrite = "%s" % ( currEmotion )
cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )
# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
l_bboxes = []
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
print ('Det: (x_min,y_min,W,H) = ({},{},{},{}), class_name = {:s}, score = {:.3f}').format(
int(bbox[0]),int(bbox[1]),int(bbox[2]-bbox[0]),int(bbox[3]-bbox[1]),class_name,score)
cv2.rectangle(im, (bbox[0], bbox[3]),(bbox[2],bbox[1]), (0,255,0),2)
cv2.putText(im,'{:s}:{:.3f}'.format(class_name, score),
(int(bbox[0]), int(bbox[1]) - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0),2)
l_bboxes.append({'x_min':int(bbox[0]),'y_min':int(bbox[1]),'x_max':bbox[2],'y_max':bbox[3],'cls':class_name,'score':score})
return l_bboxes
def _draw_boxes_to_image(im, res):
colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
(151, 0, 255), (243, 223, 48), (0, 117, 255),\
(58, 184, 14), (86, 67, 140), (121, 82, 6),\
(174, 29, 128), (115, 154, 81), (86, 255, 234)]
font = cv2.FONT_HERSHEY_SIMPLEX
image = np.copy(im)
cnt = 0
for ind, r in enumerate(res):
if r['dets'] is None: continue
dets = r['dets']
for i in range(0, dets.shape[0]):
(x1, y1, x2, y2, score) = dets[i, :]
cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
text = '{:s} {:.2f}'.format(r['class'], score)
cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
cnt = (cnt + 1)
return image
def _draw_on_image(img, objs, class_sets_dict):
colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
(151, 0, 255), (243, 223, 48), (0, 117, 255),\
(58, 184, 14), (86, 67, 140), (121, 82, 6),\
(174, 29, 128), (115, 154, 81), (86, 255, 234)]
font = cv2.FONT_HERSHEY_SIMPLEX
for ind, obj in enumerate(objs):
if obj['box'] is None: continue
x1, y1, x2, y2 = obj['box'].astype(int)
cls_id = class_sets_dict[obj['class']]
if obj['class'] == 'dontcare':
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1)
continue
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1)
text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3])
text += '{:.1f}|'.format(obj['truncation'])
text += str(obj['occlusion'])
cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1)
return img
def draw_on_detected(frame, rects, timestamp):
# Draw the bounding box on the frame
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Status: Open", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255),
2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# write the image to temporary file
# t = TempImage()
# print('File saved at' + str(t.path))
# cv2.imwrite(t.path, frame)
# analyze
# pi_surveillance_analyze.analyze(t.path)
def archive_with_items(self):
""" Ecrit dans le dossier d'archive la frame complète avec des carrés dessinés autour
des visages détectés
"""
logging.info("Archive l'image avec les items trouvés...")
# Dessine un carré autour de chaque item
for f in self.items:
x, y, w, h = f #[ v for v in f ]
cv2.rectangle(self.frame, (x,y), (x+w,y+h), (0,255,0), 3)
# Ajoute la date et l'heure à l'image
cv2.putText(self.frame, datetime.datetime.now().strftime("%c"), (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 3)
# On affiche l'image qui va être archivée dans une fenêtre
if self.debug:
cv2.imshow("preview", self.frame)
cv2.waitKey()
# Ecriture du fichier
archive_full_name = "{0}_full.jpg".format(self.images_prefix)
logging.info("Archive file is : '{0}'".format(archive_full_name))
cv2.imwrite(os.path.join(self.archive_folder, archive_full_name), self.frame)
def annotate(self, frame):
text = "Frame rate: %.1f" % self.frameRate
textColor = (0,255,0)
font = cv2.FONT_HERSHEY_SIMPLEX
size = 0.5
thickness = 2
textSize = cv2.getTextSize(text, font, size, thickness)
height = textSize[1]
location = (0,frame.shape[0] - 4*height)
cv2.putText(frame, text, location, font, size, textColor,
thickness=thickness)
text = "Detection rate: %.1f" % self.detectionRate
location = (0,frame.shape[0] - height)
cv2.putText(frame, text, location, font, size, textColor,
thickness=thickness)