def correlation_tracker(self):
flag = time.time()
for track in self.tracks:
if track.source == 'tracking':
if track.updated:
print '===!!!==='
print 'Track updated!!!'
# use .start_tracking method
track.tracker.start_track(self.img, \
dlib.rectangle(*track.updatebox))
track.updated = False
else:
# use .update method
track.tracker.update(self.img)
rect = track.tracker.get_position()
track.bbox = [int(rect.left()),\
int(rect.top()), \
int(rect.right()),\
int(rect.bottom())]
print 'Track one frame', time.time()-flag
else:
# do nothing
pass
python类rectangle()的实例源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def OPENCV_getAllFaceBoundingBoxes(self, rgbImg):
"""
Find all face bounding boxes in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:return: All face bounding boxes in an image.
:rtype: opencv.rectangles
"""
assert rgbImg is not None
lit=[]
try:
faces = self.OPENCV_Detector.detectMultiScale(rgbImg)
for (x, y, w, h) in faces:
lit.append(dlib.rectangle(int(x),int(y),int(x+w),int(y+h)))
return lit
except Exception as e:
print("Warning: {}".format(e))
# In rare cases, exceptions are thrown.
return []
def getLargestFaceBoundingBox(self, rgbImg):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.HOG_getAllFaceBoundingBoxes(rgbImg)
if (len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
return list(map(lambda p: (p.x, p.y), points.parts()))
def predictNewLocationsOfTracks(self):
global log, idx
if self.tracks:
#log[idx,0] = self.tracks[0].consecutiveInvisibleCount
#log[idx,:4] = self.tracks[1].bbox[:4]
#log[idx,4] = self.tracks[1].consecutiveInvisibleCount
if True:
img = cv2.imread('/home/yaos11/zhouyz/CTT/img/bolt/%04d.jpg'%self.idx)
assert img != None
# print '===!!!===!!!===', img.shape
for track in self.tracks:
box = track.bbox
pt1=(int(box[0]),int(box[1]))
pt2=(int(box[2]),int(box[3]))
cv2.rectangle(img,pt1,pt2,(255,255,255),3)
cv2.imshow('Vedio',img)
cv2.waitKey(1)
def CT_run(self, img, img_old):
""" CT_run:
-------
When called, it will // CT.update // the Correlation Tracker once.
if self.UPDATED, it will call // CT.start_track //.
"""
if self.UPDATED:
self.UPDATED = False # Ok, now updated
self.CT.start_track(img_old, dlib.rectangle(*self.CT_box_update))
self._CT_turn_new_to_old() # turn new to old
self.CT.update(img)
# get current position and update // CT_box //
rect = self.CT.get_position()
self.CT_box = [int(rect.left()), int(rect.top()), \
int(rect.right()), int(rect.bottom())]
# if self.UPDATED:
_CT_turn_new_to_old()
return
def CT_run(self, img, img_old):
""" CT_run:
-------
When called, it will // CT.update // the Correlation Tracker once.
if self.UPDATED, it will call // CT.start_track //.
"""
if self.UPDATED:
self.UPDATED = False # Ok, now updated
self.CT.start_track(img_old, dlib.rectangle(*self.CT_box_update))
self._CT_turn_new_to_old() # turn new to old
self.CT.update(img)
# get current position and update // CT_box //
rect = self.CT.get_position()
self.CT_box = [int(rect.left()), int(rect.top()), \
int(rect.right()), int(rect.bottom())]
# if self.UPDATED:
faceWarp.py 文件源码
项目:DelaunayVisualization-FacialWarp
作者: sneha-belkhale
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_landmarks(im):
#get a bounding rectangle for the primary face in the image
rects = cascade.detectMultiScale(im, 1.3, 5)
# only get the x y w h coordinates of the first face detected
x, y, w, h = rects[0].astype(long)
# define a rectangle that will contain the face
rect = dlib.rectangle(x, y, x + w, y + h)
# use our predictor to find the facial points within our bounding box
face_points = predictor(im, rect).parts()
#save our results in an array
landmarks = []
for p in face_points:
landmarks.append([p.x, p.y])
return landmarks
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def findLandmarks(self, rgbImg, bb):
"""
Find the landmarks of a face.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to find landmarks for.
:type bb: dlib.rectangle
:return: Detected landmark locations.
:rtype: list of (x,y) tuples
"""
assert rgbImg is not None
assert bb is not None
points = self.predictor(rgbImg, bb)
#return list(map(lambda p: (p.x, p.y), points.parts()))
return [(p.x, p.y) for p in points.parts()]
#pylint: disable=dangerous-default-value
def get_landmarks(img, detect_face=True):
""" Return the landmarks of the image """
w, h = img.shape[:2]
# default values
x = 0.08 * w
y = 0.08 * h
w = 0.84 * w
h = 0.84 * h
x, y, w, h = convert_to_long(x, y, w, h)
if detect_face:
rects = cascade.detectMultiScale(img, 1.3, 5)
if len(rects) >= 1:
rects = rects[np.argsort(rects[:, 3])[::-1]]
x, y, w, h = rects[0].astype(long)
x = x.item()
y = y.item()
w = w.item()
h = h.item()
rect = dlib.rectangle(x, y, x + w, y + h)
return np.array([(p.x, p.y) for p in predictor(img, rect).parts()])
def get_facial_landmarks_from_mask(img, pts):
rect = cv2.boundingRect(pts)
rect = dlib.rectangle(rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
return np.matrix([list(pt) for pt in pts]), rect
def toRoi(rect):
return dlib.rectangle(0, 0, rect.right() - rect.left(), rect.bottom() - rect.top())
def align(self, imgDim, rgbImg, bb,
landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP, scale=1.0):
r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)
Transform and align a face in an image.
:param imgDim: The edge length in pixels of the square the image is resized to.
:type imgDim: int
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param bb: Bounding box around the face to align. \
Defaults to the largest face.
:type bb: dlib.rectangle
:param landmarks: Detected landmark locations. \
Landmarks found on `bb` if not provided.
:type landmarks: list of (x,y) tuples
:param landmarkIndices: The indices to transform to.
:type landmarkIndices: list of ints
:param scale: Scale image before cropping to the size given by imgDim.
:type scale: float
:return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
:rtype: numpy.ndarray
"""
assert imgDim is not None
assert rgbImg is not None
assert landmarkIndices is not None
assert bb is not None
bb_dlib = dlib.rectangle(left=bb[0], top=bb[1], right=bb[2], bottom=bb[3])
if landmarks is None:
landmarks = self.findLandmarks(rgbImg, bb_dlib)
npLandmarks = np.float32(landmarks)
npLandmarkIndices = np.array(landmarkIndices)
#pylint: disable=maybe-no-member
H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
return thumbnail
def start(self,im,p0,p1):
# p0 is leftupper position of the obj, p1 is rightbottom of the obj
self.t.start_track(im, dlib.rectangle(p0[0],p0[1], p1[0], p1[1]))
def getFaceKeypoints(img, detector, predictor, maxImgSizeForDetection=640):
imgScale = 1
scaledImg = img
if max(img.shape) > maxImgSizeForDetection:
imgScale = maxImgSizeForDetection / float(max(img.shape))
scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))
#detekcja twarzy
dets = detector(scaledImg, 1)
if len(dets) == 0:
return None
shapes2D = []
for det in dets:
faceRectangle = rectangle(int(det.left() / imgScale), int(det.top() / imgScale), int(det.right() / imgScale), int(det.bottom() / imgScale))
#detekcja punktow charakterystycznych twarzy
dlibShape = predictor(img, faceRectangle)
shape2D = np.array([[p.x, p.y] for p in dlibShape.parts()])
#transpozycja, zeby ksztalt byl 2 x n a nie n x 2, pozniej ulatwia to obliczenia
shape2D = shape2D.T
shapes2D.append(shape2D)
return shapes2D
def get_faceim_shape(im):
rect=dlib.rectangle(0,0,im.shape[1],im.shape[0])
face_pts=np.ndarray((68,2))
shape = predictor(im, rect)
for i in range(68):
face_pts[i,0]=shape.part(i).x
face_pts[i,1]=shape.part(i).y
return face_pts
def get_landmarks(self,img,box=None,left=None,top=None,right=None,bottom=None):
if box is not None:
left,top,right,bottom = box
left = np.long(left)
top = np.long(top)
right = np.long(right)
bottom = np.long(bottom)
bb = dlib.rectangle(left,top,right,bottom)
landmarks = self.align_tool.findLandmarks(img,bb)
npLandmarks = np.float32(landmarks)
npLandmarkIndices = np.array(self.landmarkIndices)
return npLandmarks[npLandmarkIndices]
def segment(self,img,box,landmarks):
left,top,right,bottom = box
left,top,right,bottom = int(left),int(top),int(right),int(bottom)
bb = dlib.rectangle(left,top,right,bottom)
H = cv2.getAffineTransform(landmarks,
self.imgDim * MINMAX_TEMPLATE[self.npLandmarkIndices] * self.scale + self.imgDim * (1 - self.scale)/2)
thumbnail = cv2.warpAffine(img, H, (self.imgDim, self.imgDim))
return [('2d-align',thumbnail)]
def get_landmarks(im):
rects = cascade.detectMultiScale(im, 1.3,5)
#if len(rects) > 1:
# raise TooManyFaces
if len(rects) == 0:
raise NoFaces
print len(rects)
x,y,w,h =rects[0]
rect=dlib.rectangle(x,y,x+w,y+h)
return numpy.matrix([[p.x, p.y] for p in predictor(im, rect).parts()])