def createTrainingInstances(self, images):
start = time.time()
hog = cv2.HOGDescriptor()
instances = []
for img, label in images:
# print img
img = read_color_image(img)
img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
descriptor = hog.compute(img)
if descriptor is None:
descriptor = []
else:
descriptor = descriptor.ravel()
pairing = Instance(descriptor, label)
instances.append(pairing)
end = time.time() - start
self.training_instances = instances
print "HOG TRAIN SERIAL: %d images -> %f" % (len(images), end)
python类HOGDescriptor()的实例源码
def HogDescriptor(self,image):
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
(rects, weights) = hog.detectMultiScale(image, winStride=(5,5),padding=(16,16), scale=1.05, useMeanshiftGrouping=False)
return rects
def hog_compute(ims):
samples=[]
winSize = (64,64)
blockSize = (16,16)
blockStride = (8,8)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 2.0000000000000001e-01
gammaCorrection = 0
nlevels = 64
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,
histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
#compute(img[, winStride[, padding[, locations]]]) -> descriptors
winStride = (8,8)
padding = (8,8)
locations = ((10,20),(30,30),(50,50),(70,70),(90,90),(110,110),(130,130),(150,150),(170,170),(190,190))
for im in ims:
hist = hog.compute(im,winStride,padding,locations)
samples.append(hist)
return np.float32(samples)
def get_hog(image):
# winSize = (64,64)
winSize = (image.shape[1], image.shape[0])
blockSize = (8,8)
# blockSize = (16,16)
blockStride = (8,8)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 2.0000000000000001e-01
gammaCorrection = 0
nlevels = 64
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,
histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
#compute(img[, winStride[, padding[, locations]]]) -> descriptors
winStride = (8,8)
padding = (8,8)
locations = [] # (10, 10)# ((10,20),)
hist = hog.compute(image,winStride,padding,locations)
return hist
def createTestingInstances(self, images):
start = time.time()
hog = cv2.HOGDescriptor()
instances = []
for img, label in images:
# print img
img = read_color_image(img)
img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
descriptor = hog.compute(img)
if descriptor is None:
descriptor = []
else:
descriptor = descriptor.ravel()
pairing = Instance(descriptor, label)
instances.append(pairing)
end = time.time() - start
self.testing_instances = instances
print "HOG TEST SERIAL: %d images -> %f" % (len(images), end)
detect_from_camera.py 文件源码
项目:Pedestrian_Detector
作者: alexander-hamme
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def find_people(self, img):
'''
Detect people in image
:param img: numpy.ndarray
:return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture
'''
t = time.time()
# HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Chooses whichever size is less
image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1]))
# detect people in the image
(rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE,
padding=self.PADDING, scale=self.SCALE)
# apply non-maxima suppression to the bounding boxes but use a fairly large overlap threshold,
# to try to maintain overlapping boxes that are separate people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD)
print("Elapsed time: {} seconds".format(int((time.time() - t) * 100) / 100.0))
if self.SHOW_IMAGES:
# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
# Tighten the rectangle around each person by a small margin
shrinkW, shrinkH = int(0.05 * xB), int(0.15*yB)
cv2.rectangle(image, (xA+shrinkW, yA+shrinkH), (xB-shrinkW, yB-shrinkH), self.BOX_COLOR, 2)
cv2.imshow("People detection", image)
cv2.waitKey(self.IMAGE_WAIT_TIME)
cv2.destroyAllWindows()
return len(pick)
def __init__(self, scale=1.08):
script_path = common.get_script_path()
self.cascade = cv2.CascadeClassifier(script_path + "/haarcascade_frontalface_alt.xml")
self.cascade_profile = cv2.CascadeClassifier(script_path + '/haarcascade_profileface.xml')
self.scale = scale
self.hog = cv2.HOGDescriptor()
self.hog.load(script_path + '/hard_negative_svm/hog.xml')
self.svm = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_frontal.xml')
self.svm_profile = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_profile.xml')
def compute_hog(image, locations):
hog = cv2.HOGDescriptor()
winStride = (8, 8)
padding = (8, 8)
hist = hog.compute(image, winStride, padding, locations)
return hist
def get_hog_object(window_dims):
blockSize = (8,8)
# blockSize = (16,16)
blockStride = (8,8)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0 # HOGDescriptor::L2Hys
L2HysThreshold = 2.0000000000000001e-01
gammaCorrection = 0
nlevels = 64
hog = cv2.HOGDescriptor(window_dims,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,
histogramNormType,L2HysThreshold,gammaCorrection,nlevels)
return hog
def create_hog_from_info_dict(info):
print info
hog = cv2.HOGDescriptor(
tuple(info['winSize']),
tuple(info['blockSize']),
tuple(info['blockStride']),
tuple(info['cellSize']),
info['nbins'],
info['derivAperture'],
info['winSigma'],
info['histogramNormType'],
info['L2HysThreshold'],
info['gammaCorrection'],
info['nlevels'])
return hog
def hog_features(image):
hog_computer = cv2.HOGDescriptor()
return hog_computer.compute(image)
def createTrainingInstances(self, images):
hog = cv2.HOGDescriptor()
instances = []
for img, label in images:
print img
img = read_color_image(img)
img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
descriptor = hog.compute(img)
if descriptor is None:
descriptor = []
else:
descriptor = descriptor.ravel()
pairing = Instance(descriptor, label)
instances.append(pairing)
self.training_instances = instances
def createTestingInstances(self, images):
hog = cv2.HOGDescriptor()
instances = []
for img, label in images:
print img
img = read_color_image(img)
img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
descriptor = hog.compute(img)
if descriptor is None:
descriptor = []
else:
descriptor = descriptor.ravel()
pairing = Instance(descriptor, label)
instances.append(pairing)
self.testing_instances = instances
def local_hog(image):
HOGDESC = cv2.HOGDescriptor()
img, label = image
img = read_color_image(img)
img = cv2.resize(img, (128, 128), interpolation = cv2.INTER_AREA)
descriptor = HOGDESC.compute(img)
if descriptor is None:
descriptor = []
else:
descriptor = descriptor.ravel()
pairing = Instance(descriptor, label)
return pairing
def find_people(self, img):
'''
Detect people in image
:param img: numpy.ndarray
:return: count of rectangles after non-maxima suppression, corresponding to number of people detected in picture
'''
t = time.time()
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Chooses whichever size is less
image = imutils.resize(img, width=min(self.MIN_IMAGE_WIDTH, img.shape[1]))
# detect people in the image
(rects, wghts) = hog.detectMultiScale(image, winStride=self.WIN_STRIDE,
padding=self.PADDING, scale=self.SCALE)
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=self.OVERLAP_THRESHOLD)
print("Elapsed time of detection: {} seconds".format(int((time.time() - t) * 100) / 100.0))
if self.SHOW_IMAGES:
# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
# Tighten the rectangle around each person by a small margin
cv2.rectangle(image, (xA+5, yA+5), (xB-5, yB-10), self.BOX_COLOR, 2)
cv2.imshow("People detection", image)
cv2.waitKey(self.IMAGE_WAIT_TIME)
cv2.destroyAllWindows()
return len(pick)
def _extract_feature(X, feature):
"""Performs feature extraction
:param X: data (rows=images, cols=pixels)
:param feature: which feature to extract
- None: no feature is extracted
- "gray": grayscale features
- "rgb": RGB features
- "hsv": HSV features
- "surf": SURF features
- "hog": HOG features
:returns: X (rows=samples, cols=features)
"""
# transform color space
if feature == 'gray' or feature == 'surf':
X = [cv2.cvtColor(x, cv2.COLOR_BGR2GRAY) for x in X]
elif feature == 'hsv':
X = [cv2.cvtColor(x, cv2.COLOR_BGR2HSV) for x in X]
# operate on smaller image
small_size = (32, 32)
X = [cv2.resize(x, small_size) for x in X]
# extract features
if feature == 'surf':
surf = cv2.SURF(400)
surf.upright = True
surf.extended = True
num_surf_features = 36
# create dense grid of keypoints
dense = cv2.FeatureDetector_create("Dense")
kp = dense.detect(np.zeros(small_size).astype(np.uint8))
# compute keypoints and descriptors
kp_des = [surf.compute(x, kp) for x in X]
# the second element is descriptor: choose first num_surf_features
# elements
X = [d[1][:num_surf_features, :] for d in kp_des]
elif feature == 'hog':
# histogram of gradients
block_size = (small_size[0] / 2, small_size[1] / 2)
block_stride = (small_size[0] / 4, small_size[1] / 4)
cell_size = block_stride
num_bins = 9
hog = cv2.HOGDescriptor(small_size, block_size, block_stride,
cell_size, num_bins)
X = [hog.compute(x) for x in X]
elif feature is not None:
# normalize all intensities to be between 0 and 1
X = np.array(X).astype(np.float32) / 255
# subtract mean
X = [x - np.mean(x) for x in X]
X = [x.flatten() for x in X]
return X
def detect():
move=0
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
cap=cv2.VideoCapture(0)
while(1):
ret, img=cap.read()
gray=cv2. cvtColor(img, cv2.COLOR_BGR2GRAY)
image = imutils.resize(img, width=min(400, img.shape[1]))
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),padding=(8, 8), scale=1.05)
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
if (xA/480)>0.5 :
print("move to right")
move=4
elif (yA/640)>0.5:
print('move to down')
move=3
elif (xB/480)<0.3:
print('move to left')
move=2
elif (yB/640)<0.3:
print('move to up')
move=1
else:
print('do nothing')
move=0
mqt.pass_message(move)
#eyes = eye_cascade.detectMultiScale(roi_gray)
#for (ex,ey,ew,eh) in eyes:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',image)
k=cv2.waitKey(1)& 0xff
if k==27:
break
elif (k==ord('w')):
mqt.pass_message(1)
elif (k==ord('s')):
mqt.pass_message(3)
cap.release()
cv2.destroyAllWindows()