def compute(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
descriptor = []
dominantGradients = np.zeros_like(frame)
maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
maxGradient = np.absolute(maxGradient)
for k in range(1,len(self.kernels)):
kernel = self.kernels[k]
gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
gradient = np.absolute(gradient)
np.maximum(maxGradient, gradient, maxGradient)
indices = (maxGradient == gradient)
dominantGradients[indices] = k
frameH, frameW = frame.shape
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
python类normalize()的实例源码
def compute(self, frame):
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dx = cv2.filter2D(frame, cv2.CV_32F, self.xkernel)
dy = cv2.filter2D(frame, cv2.CV_32F, self.ykernel)
orientations = np.zeros_like(dx)
magnitudes = np.zeros_like(dx)
cv2.cartToPolar(dx,dy, magnitudes,orientations)
descriptor = []
frameH, frameW = frame.shape
mask_threshold = magnitudes <= self.threshold
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 1
mask[mask_threshold] = 0
a_, b_ = mask.shape
hist = cv2.calcHist([orientations], self.channel, mask, [self.bins], self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
def optical_flow(one, two):
"""
method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4)
"""
one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
hsv = np.zeros((120, 320, 3))
# set saturation
hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
# obtain dense optical flow paramters
flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
pyr_scale=0.5, levels=1, winsize=15,
iterations=2,
poly_n=5, poly_sigma=1.1, flags=0)
# convert from cartesian to polar
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
# hue corresponds to direction
hsv[:,:,0] = ang * (180/ np.pi / 2)
# value corresponds to magnitude
hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
# convert HSV to int32's
hsv = np.asarray(hsv, dtype= np.float32)
rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
return rgb_flow
def describe(self, image):
# Compute a 3D histogram in the RGB colorspace and normalize.
hist = cv2.calcHist([image], [0, 1, 2],
None, self.bins, [0, 256, 0, 256, 0, 256])
hist = cv2.normalize(hist, hist)
# Return the 3D histogram output as a flattened array.
return hist.flatten()
def depth_callback(self, ros_image):
try:
inImg = self.bridge.imgmsg_to_cv2(ros_image)
except CvBridgeError, e:
print e
inImgarr = np.array(inImg, dtype=np.uint16)
# inImgarr = cv2.GaussianBlur(inImgarr, (3, 3), 0)
# cv2.normalize(inImgarr, inImgarr, 0, 1, cv2.NORM_MINMAX)
self.outImg, self.num_fingers = self.process_depth_image(inImgarr)
# outImg = self.process_depth_image(inImgarr)
# rate = rospy.Rate(10)
self.num_pub.publish(self.num_fingers)
# self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))
# rate.sleep()
cv2.imshow("Hand Gesture Recognition", self.outImg)
cv2.waitKey(3)
def writeOpticalFlowImage(self, index, optical_flow):
filename = "flow_" + str(index) + ".png"
output_path = os.path.join(self.optical_flow_output_directory, filename)
# create hsv image
shape_optical_flow = optical_flow.shape[:-1]
shape_hsv = [shape_optical_flow[0], shape_optical_flow[1], 3]
hsv = np.zeros(shape_hsv, np.float32)
# set saturation to 255
hsv[:,:,1] = 255
# create colorful illustration of optical flow
mag, ang = cv2.cartToPolar(optical_flow[:,:,0], optical_flow[:,:,1])
hsv[:,:,0] = ang*180/np.pi/2
hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
cv2.imwrite(output_path, bgr)
def vlad(descriptors, centers):
"""
Calculate the Vector of Locally Aggregated Descriptors (VLAD) which is a global descriptor from a group of
descriptors and centers that are codewords of a codebook, obtained for example with K-Means.
Args:
descriptors (numpy float matrix): The local descriptors.
centers (numpy float matrix): The centers are points representatives of the classes.
Returns:
numpy float array: The VLAD vector.
"""
dimensions = len(descriptors[0])
vlad_vector = np.zeros((len(centers), dimensions), dtype=np.float32)
for descriptor in descriptors:
nearest_center, center_idx = utils.find_nn(descriptor, centers)
for i in range(dimensions):
vlad_vector[center_idx][i] += (descriptor[i] - nearest_center[i])
# L2 Normalization
vlad_vector = cv2.normalize(vlad_vector)
vlad_vector = vlad_vector.flatten()
return vlad_vector
def normalize_nn(transM, sigma=1):
"""
Normalize transition matrix using gaussian weighing
Input:
transM: (k,k)
sigma: var=sigma^2 of gaussian weight between elements
Output: transM: (k,k)
"""
# Make weights Gaussian and normalize
k = transM.shape[0]
transM[np.nonzero(transM)] = np.exp(
-np.square(transM[np.nonzero(transM)]) / sigma**2)
transM[np.arange(k), np.arange(k)] = 1.
normalization = np.dot(transM, np.ones(k))
# This is inefficient, bottom line is better ..
# transM = np.dot(np.diag(1. / normalization), transM)
transM = (1. / normalization).reshape((-1, 1)) * transM
return transM
def consensus_vote(votes, transM, frameEnd, iters):
"""
Perform iterative consensus voting
"""
sTime = time.time()
for t in range(iters):
votes = np.dot(transM, votes)
# normalize per frame
for i in range(frameEnd.shape[0]):
currStartF = 1 + frameEnd[i - 1] if i > 0 else 0
currEndF = frameEnd[i]
frameVotes = np.max(votes[currStartF:1 + currEndF])
votes[currStartF:1 + currEndF] /= frameVotes + (frameVotes <= 0)
eTime = time.time()
print('Consensus voting finished: %.2f s' % (eTime - sTime))
return votes
def detectTemplateMatching(self, img):
self.templateMatchingCurrentTime = cv2.getTickCount()
duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency()
if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0:
self.foundFace = False
self.isTemplateMatchingRunning = False
return
faceTemplate = self.getSubRect(img, self.trackedFaceTemplate)
roi = self.getSubRect(img, self.trackedFaceROI)
match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED)
cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match)
foundTemplate = (
minLoc[0] + self.trackedFaceROI[0],
minLoc[1] + self.trackedFaceROI[1],
self.trackedFaceTemplate[2],
self.trackedFaceTemplate[3])
self.trackedFaceTemplate = foundTemplate
self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2)
self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
generate_cascade_training_data.py 文件源码
项目:ObjectDetection
作者: PhilippParis
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def create_cascade_neg_data():
img = cv2.imread(FLAGS.negatives_spritesheet)
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
height, width, _ = img.shape
c = 0
txt = ""
for y in xrange(0, height, FLAGS.image_size):
for x in xrange(0, width, FLAGS.image_size):
cv2.imwrite(FLAGS.output_dir + "/negatives/" + str(c) + ".png", img[y:y+FLAGS.image_size, x:x+FLAGS.image_size])
txt += "negatives/" + str(c) + ".png" + "\n"
c += 1
with open(FLAGS.output_dir + "/negatives.info", 'w') as file:
file.write(txt)
return c
# ========================================== #
def im_normalize(im, lo=0, hi=255, dtype='uint8'):
return cv2.normalize(im, alpha=lo, beta=hi, norm_type=cv2.NORM_MINMAX, dtype={'uint8': cv2.CV_8U, \
'float32': cv2.CV_32F, \
'float64': cv2.CV_64F}[dtype])
def getProjectionMat(self, from_E=True):
ei,eii = self.getEpipoles()
ei = cv2.normalize(ei).flatten()
eii = cv2.normalize(eii).flatten()
if from_E == True:
F21,F31 = self.getFundamentalMat()
H21 = H_from_E(F21)
H31 = H_from_E(F31)
v1 = r_[0.5,0.5,50,1.]
v2 = r_[1.,1.,51,1.]
for H in H21:
ln2 = cross(dot(H,v1)[:3],dot(H,v2)[:3])
pt3 = self.pl_(v1[:3]/v1[3], ln2)
pt3 /= pt3[2]
print pt3
for H2 in H31:
print dot(H2,v1)[:3]/dot(H2,v1)[:3][2]
return H21,H31
else:
Pi = eye(4)
Pi[:3,:3] = array([dot(t,eii) for t in self.T]).T
Pi[:3,3] = ei
# K = cv2.decomposeProjectionMatrix(Pi[:3])[0]
# Pi[:3] = dot(cv2.invert(K)[1],Pi[:3])
return Pi
def extract_color_histogram(image, bins=(8, 8, 8)):
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins, [0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3 (I
# personally hate the way this is done
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
def optical_flow(one, two):
"""
method taken from https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4
input: image_current, image_next (RGB images)
calculates optical flow magnitude and angle and places it into HSV image
"""
one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
hsv = np.zeros((120, 320, 3))
# set saturation
hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
# obtain dense optical flow paramters
flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
pyr_scale=0.5,
levels=1,
winsize=10,
iterations=2,
poly_n=5,
poly_sigma=1.1,
flags=0)
# convert from cartesian to polar
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
# hue corresponds to direction
hsv[:,:,0] = ang * (180/ np.pi / 2)
# value corresponds to magnitude
hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
# convert HSV to int32's
hsv = np.asarray(hsv, dtype= np.float32)
rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
return rgb_flow
def blur_image(self, save=False, show=False):
if self.part is None:
psf = self.PSFs
else:
psf = [self.PSFs[self.part]]
yN, xN, channel = self.shape
key, kex = self.PSFs[0].shape
delta = yN - key
assert delta >= 0, 'resolution of image should be higher than kernel'
result=[]
if len(psf) > 1:
for p in psf:
tmp = np.pad(p, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# blured = np.zeros(self.shape)
blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
else:
psf = psf[0]
tmp = np.pad(psf, delta // 2, 'constant')
cv2.normalize(tmp, tmp, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.normalize(self.original, self.original, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
blured[:, :, 0] = np.array(signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
blured[:, :, 1] = np.array(signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
blured[:, :, 2] = np.array(signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
blured = cv2.normalize(blured, blured, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
result.append(np.abs(blured))
self.result = result
if show or save:
self.__plot_canvas(show, save)
def compute(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frameH, frameW = frame.shape
descriptor = []
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 1
hist = cv2.calcHist([frame], self.channel, mask, [self.bins], self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def describe(image, mask = None):
hist = cv2.calcHist([image], [0, 1, 2], mask, [8,8,8], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
def describe(image, mask = None):
hist = cv2.calcHist([image], [0, 1, 2], mask, [8,8,8], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
def to_binary_mask(mask, t=0.00001):
mask = inverse_preprocessing(mask)
### Threshold the RGB image - This step increase sensitivity
mask[mask > t] = 255
mask[mask <= t] = 0
### To grayscale and normalize
mask_gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask_gray = cv2.normalize(src=mask_gray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
### Auto binary threshold
(thresh, mask_binary) = cv2.threshold(mask_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
return mask_binary
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def getImageDescriptors_HOG_cdist(self, all_emb, ref_emb, ref_mask):
# unnormalized cosine distance for HOG
dist = numpy.dot(all_emb, ref_emb.T)
# normalize by length of query descriptor projected on reference
norm = numpy.sqrt(numpy.dot(numpy.square(all_emb), ref_mask.T))
dist /= norm
dist[numpy.isinf(dist)] = 0.
dist[numpy.isnan(dist)] = 0.
# dist[numpy.triu_indices(dist.shape[0], 1)] = numpy.maximum(dist[numpy.triu_indices(dist.shape[0], 1)],
# dist.T[numpy.triu_indices(dist.shape[0], 1)])
# dist[numpy.tril_indices(dist.shape[0], -1)] = 0.
# dist += dist.T
return dist
def saveVideoFrames(self, filename, images):
"""
Create a video with synthesized images
:param filename: name of file to save
:param images: video data
:return: None
"""
txt = 'Saving {}'.format(filename)
pbar = pb.ProgressBar(maxval=images.shape[0], widgets=[txt, pb.Percentage(), pb.Bar()])
pbar.start()
height = width = 128
# Define the codec and create VideoWriter object
fourcc = cv2.cv.CV_FOURCC(*'DIVX')
video = cv2.VideoWriter('{}/synth_{}.avi'.format(self.subfolder, filename), fourcc, self.fps, (height, width))
if not video:
raise EnvironmentError("Error in creating video writer")
for i in range(images.shape[0]):
img = images[i]
img = cv2.normalize(img, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC1)
img = cv2.cvtColor(img, cv2.cv.CV_GRAY2BGR)
img = cv2.resize(img, (height, width))
# write frame
video.write(img)
pbar.update(i)
video.release()
del video
cv2.destroyAllWindows()
pbar.finish()
def process_output(self, disparity):
cv8uc = cv2.normalize(disparity, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
if self.args.preview:
cv2.imshow("disparity", cv8uc)
cv2.waitKey(0)
cv2.imwrite(os.path.join(self.args.folder, self.args.output), cv8uc)
def prep_img_save(img, b=5):
return cv2.normalize(cv2.copyMakeBorder(img, b, b, b, b, cv2.BORDER_CONSTANT, value=0), 0, 255,
cv2.NORM_MINMAX).astype(np.uint8)
def normalize(im):
return cv2.normalize(im, np.zeros(im.shape), 0, 255, norm_type=cv2.NORM_MINMAX)
def calculate_flow(self, frame_a, frame_b):
previous_frame = cv2.cvtColor(frame_a, cv2.COLOR_BGR2GRAY)
next_frame = cv2.cvtColor(frame_b, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(
previous_frame,
next_frame,
None,
0.5, 3, 15, 3, 5, 1.2, 0
)
# Change here
horz = cv2.normalize(flow[..., 0], None, 0, 255, cv2.NORM_MINMAX)
vert = cv2.normalize(flow[..., 1], None, 0, 255, cv2.NORM_MINMAX)
horz = horz.astype('uint8')
vert = vert.astype('uint8')
# Change here too
cv2.imshow('Horizontal Component', horz)
cv2.imshow('Vertical Component', vert)
k = cv2.waitKey(0) & 0xff
if k == ord('s'): # Change here
cv2.imwrite('opticalflow_horz.pgm', horz)
cv2.imwrite('opticalflow_vert.pgm', vert)
cv2.destroyAllWindows()
FaceRecognizer.py 文件源码
项目:Gabor-Filter-Face-Extraction
作者: duycao2506
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def build_filters(self, w, h,num_theta, fi, sigma_x, sigma_y, psi):
"Get set of filters for GABOR"
filters = []
for i in range(num_theta):
theta = ((i+1)*1.0 / num_theta) * np.pi
for f_var in fi:
kernel = self.get_gabor_kernel(w, h,sigma_x, sigma_y, theta, f_var, psi)
kernel = 2.0*kernel/kernel.sum()
# kernel = cv2.normalize(kernel, kernel, 1.0, 0, cv2.NORM_L2)
filters.append(kernel)
return filters
FaceRecognizer.py 文件源码
项目:Gabor-Filter-Face-Extraction
作者: duycao2506
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def distanceOfFV(self, fv1, fv2):
"distance of feature vector 1 and feature vector 2"
normset = []
for i in range(len(fv1)):
k = fv1[i]
p = fv2[i]
# k = cv2.normalize(fv1[i],k,1.0,0,norm_type=cv2.NORM_L2)
# p = cv2.normalize(fv2[i],p,1.0,0,norm_type=cv2.NORM_L2)
normset.append((p-k)**2.0)
sums = 0
sums = sum([i.sum() for i in normset])
return mth.sqrt(sums)/100000