def _extract_spots(self) -> None:
# Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img = cv2.dilate(self._img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=2)
img = cv2.dilate(img, kernel, iterations=1)
# Perform a simple blob detect
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 20 # The dot in 20pt font has area of about 30
params.filterByCircularity = True
params.minCircularity = 0.7
params.filterByConvexity = True
params.minConvexity = 0.8
params.filterByInertia = True
params.minInertiaRatio = 0.4
detector = cv2.SimpleBlobDetector_create(params)
self.spot_keypoints = detector.detect(img)
# Log intermediate image
img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
python类drawKeypoints()的实例源码
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def execute_BlobDetector(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im=255-im
im2 = img
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = obj.Area
params.filterByConvexity = True
params.minConvexity = obj.Convexity/200
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
if not obj.showBlobs:
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
obj.Proxy.img = im_with_keypoints
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
# cv2.circle(im,(x,y),4,0,5)
cv2.circle(im,(x,y),4,255,5)
cv2.circle(im,(x,y),4,0,5)
im[y,x]=255
im[y,x]=0
obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
else:
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
cv2.circle(im2,(x,y),4,(255,0,0),5)
cv2.circle(im2,(x,y),4,(0,0,0),5)
im2[y,x]=(255,0,0)
im2[y,x]=(0,0,0)
obj.Proxy.img = im2
def test_descriptors():
img = cv2.imread(constants.TESTING_IMG_PATH)
cv2.imshow("Normal Image", img)
print("Normal Image")
option = input("Enter [1] for using ORB features and other number to use SIFT.\n")
start = time.time()
if option == 1:
orb = cv2.ORB()
kp, des = orb.detectAndCompute(img, None)
else:
sift = cv2.SIFT()
kp, des = sift.detectAndCompute(img, None)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
des_name = constants.ORB_FEAT_NAME if option == ord(constants.ORB_FEAT_OPTION_KEY) else constants.SIFT_FEAT_NAME
print("Elapsed time getting descriptors {0}".format(elapsed_time))
print("Number of descriptors found {0}".format(len(des)))
if des is not None and len(des) > 0:
print("Dimension of descriptors {0}".format(len(des[0])))
print("Name of descriptors used is {0}".format(des_name))
img2 = cv2.drawKeypoints(img, kp)
# plt.imshow(img2), plt.show()
cv2.imshow("{0} descriptors".format(des_name), img2)
print("Press any key to exit ...")
cv2.waitKey()
def find_blobs(img):
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 100;
params.maxThreshold = 5000;
# Filter by Area.
params.filterByArea = True
params.minArea = 200
# Filter by Circularity
params.filterByCircularity = False
params.minCircularity = 0.785
# Filter by Convexity
params.filterByConvexity = False
params.minConvexity = 0.87
# Filter by Inertia
#params.filterByInertia = True
#params.minInertiaRatio = 0.01
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector(params)
# Detect blobs.
keypoints = detector.detect(img)
print keypoints
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]),
(0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite("blobs.jpg", im_with_keypoints);
def find_keypoints(img):
# Initiate FAST object with default values
fast = cv2.FastFeatureDetector()
# find and draw the keypoints
kp = fast.detect(img,None)
img2 = cv2.drawKeypoints(img, kp, color=(255,0,0))
# Print all default params
print "Threshold: ", fast.getInt('threshold')
print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
#print "neighborhood: ", fast.getInt('type')
print "Total Keypoints with nonmaxSuppression: ", len(kp)
cv2.imwrite('fast_true.png',img2)
def draw_keypoints(self, im, keypoints, filename="keypoints.jpg"):
self._log("drawing keypoints into '%s'..." % filename)
rows, cols = im.shape
def to_cv2_kp(kp):
# assert kp = [<row>, <col>, <ori>, <octave_ind>, <layer_ind>]
ratio = get_size_ratio_by_octave(kp[3])
scale = get_scale_by_ind(kp[3], kp[4])
return cv2.KeyPoint(kp[1] / ratio, kp[0] / ratio, 10, kp[2] / PI * 180)
kp_for_draw = list(map(to_cv2_kp, keypoints))
im_kp = cv2.drawKeypoints(im, kp_for_draw, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(filename, im_kp)
def cv2_match(im1, im2):
mysift = SIFT()
sift = cv2.SIFT()
bf = cv2.BFMatcher()
kp1, dp1 = sift.detectAndCompute(im1, None)
kp2, dp2 = sift.detectAndCompute(im2, None)
matches_ = bf.knnMatch(dp1, dp2, k=2)
print(len(matches_))
good = []
for m, n in matches_:
if m.distance < 0.90 * n.distance:
good.append(m)
print(len(good))
pos1 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp1]
pos2 = [(int(kp.pt[1]), int(kp.pt[0])) for kp in kp2]
matches = [(m.queryIdx, m.trainIdx, 0.15) for m in good]
cv2.imwrite("cvkp1.jpg", cv2.drawKeypoints(im, kp1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
cv2.imwrite("cvkp2.jpg", cv2.drawKeypoints(imm, kp2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
mysift.draw_matches(im, pos1, imm, pos2, matches, 'ckmatch.jpg')
def draw_keypoints(img, keypoints):
"""Draw keypoints"""
# Convert to OpenCV KeyPoints
cv_kps = []
for kp in keypoints:
cv_kps.append(kp.as_cv_keypoint())
# Draw keypoints
img = cv2.drawKeypoints(img, cv_kps, None, color=(0, 255, 0))
return img
def draw_features(img, features):
"""Draw features"""
# Convert to OpenCV KeyPoints
cv_kps = []
for f in features:
cv_kps.append(cv2.KeyPoint(f.pt[0], f.pt[1], f.size))
# Draw keypoints
img = cv2.drawKeypoints(img, cv_kps, None, color=(0, 255, 0))
return img
def describeSIFT( image):
sift = cv2.xfeatures2d.SIFT_create()
kp, des = sift.detectAndCompute(image,None)
#draw keypoints
#import matplotlib.pyplot as plt
#img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
#plt.imshow(img2),plt.show()
return kp,des
def visualize_keypoints(image, keypoints):
kp_image = np.array([])
kp_image = cv2.drawKeypoints(image, keypoints, kp_image, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow(PROJ_WIN, kp_image)
wait()
def main():
checkOpennCVVersion()
img1 = cv2.imread('napis_z_tlem.png', 0) # duzy obrazek
img2 = cv2.imread('napis.png', 0) # maly obrazek, tego szukamy w duzym
orb = cv2.ORB()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
#zapis do pliku wynikowych keypointow
imgKP1 = cv2.drawKeypoints(img1, kp1)
cv2.imwrite('orb_keypoints_big.jpg', imgKP1)
imgKP2 = cv2.drawKeypoints(img2, kp2)
cv2.imwrite('orb_keypoints.jpg', imgKP2)
matcher = cv2.BFMatcher(cv2.NORM_L2)
matches = matcher.knnMatch(des1, trainDescriptors=des2, k=2)
pairs = filterMatches(kp1, kp2, matches)
l1 = len( kp1 )
l2 = len( kp2 )
lp = len( pairs )
r = (lp * 100) / l1
print r, "%"
cv2.waitKey()
cv2.destroyAllWindows()
return None
#funkcja wywolowywana przed mainem. By uzyc ORB musimy byc pewni ze mamy wersje opencv 2.4
def draw_image_with_keypoints(img: np.ndarray, keypoints, window_title: str ="Image with keypoints") -> None:
"""An apparently unused method which is actually quite useful when debugging!"""
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
img_with_keypoints = cv2.drawKeypoints(img, keypoints, outImage=np.array([]), color=(0, 0, 255),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
draw_image(img_with_keypoints, window_title)
featuredetect.py 文件源码
项目:Compare-OpenCV-SIFT-SURF-FAST-ORB
作者: chengtaow
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def sift_thread():
sift = cv2.xfeatures2d.SIFT_create()
(kps, descs) = sift.detectAndCompute(gray, None)
cv2.drawKeypoints(gray, kps, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('SIFT Algorithm', img)
featuredetect.py 文件源码
项目:Compare-OpenCV-SIFT-SURF-FAST-ORB
作者: chengtaow
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def surf_thread():
surf = cv2.xfeatures2d.SURF_create()
(kps2, descs2) = surf.detectAndCompute(gray, None)
cv2.drawKeypoints(gray, kps2, img2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('SURF Algorithm', img2)
featuredetect.py 文件源码
项目:Compare-OpenCV-SIFT-SURF-FAST-ORB
作者: chengtaow
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def fast_thread():
fast = cv2.FastFeatureDetector_create()
kps3 = fast.detect(gray, None)
cv2.drawKeypoints(gray, kps3, img3, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('FAST Algorithm', img3)
featuredetect.py 文件源码
项目:Compare-OpenCV-SIFT-SURF-FAST-ORB
作者: chengtaow
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def orb_thread():
orb = cv2.ORB_create()
kps4 = orb.detect(gray, None)
(kps4, des4) = orb.compute(gray, kps4)
cv2.drawKeypoints(gray, kps4, img4, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('ORB Algorithm', img4)
def main():
img = None
main_win = Windows_handler.WinHandler(title='Nox',class_name='Qt5QWindowIcon')
main_box = main_win.get_bbox()
px_handler = Pixel_handler.PixelSearch(win_handler=main_win)
mouse = Mouse_handler.MouseMovement(window_handler=main_win)
main_win.init_window()
cv2.namedWindow('image_name')
cv2.namedWindow('config')
while True:
img = px_handler.grab_window(bbox=main_box)
img = px_handler.img_to_numpy(img,compound=False)
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
orb = cv2.ORB_create()
kp = orb.detect(img, None)
kp, des = orb.compute(img, kp)
img2 = cv2.drawKeypoints(img, kp)
cv2.imshow('image_name',img2)
cv2.setMouseCallback('image_name', mouse_event, param=img)
k = cv2.waitKey(1)
if k == ord('q'): # wait for ESC key to exit
cv2.destroyAllWindows()
quit(0)
def find_image_position(origin='origin.png', query='query.png', outfile=None):
'''
find all image positions
@return None if not found else a tuple: (origin.shape, query.shape, postions)
might raise Exception
'''
img1 = cv2.imread(query, 0) # query image(small)
img2 = cv2.imread(origin, 0) # train image(big)
# Initiate SIFT detector
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
print len(kp1), len(kp2)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
# flann
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
print len(kp1), len(kp2), 'good cnt:', len(good)
if len(good)*1.0/len(kp1) < 0.5:
#if len(good)<MIN_MATCH_COUNT:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
return img2.shape, img1.shape, []
queryPts = []
trainPts = []
for dm in good:
queryPts.append(kp1[dm.queryIdx])
trainPts.append(kp2[dm.trainIdx])
img3 = cv2.drawKeypoints(img1, queryPts)
cv2.imwrite('image/query.png', img3)
img3 = cv2.drawKeypoints(img2, trainPts)
point = _middlePoint(trainPts)
print 'position in', point
if outfile:
edge = 10
top_left = (point[0]-edge, point[1]-edge)
bottom_right = (point[0]+edge, point[1]+edge)
cv2.rectangle(img3, top_left, bottom_right, 255, 2)
cv2.imwrite(outfile, img3)
return img2.shape, img1.shape, [point]
def find_image_position(origin='origin.png', query='query.png', outfile=None):
'''
find all image positions
@return None if not found else a tuple: (origin.shape, query.shape, postions)
might raise Exception
'''
img1 = cv2.imread(query, 0) # query image(small)
img2 = cv2.imread(origin, 0) # train image(big)
# Initiate SIFT detector
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
print len(kp1), len(kp2)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
# flann
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
print len(kp1), len(kp2), 'good cnt:', len(good)
if len(good)*1.0/len(kp1) < 0.5:
#if len(good)<MIN_MATCH_COUNT:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
return img2.shape, img1.shape, []
queryPts = []
trainPts = []
for dm in good:
queryPts.append(kp1[dm.queryIdx])
trainPts.append(kp2[dm.trainIdx])
img3 = cv2.drawKeypoints(img1, queryPts)
cv2.imwrite('image/query.png', img3)
img3 = cv2.drawKeypoints(img2, trainPts)
point = _middlePoint(trainPts)
print 'position in', point
if outfile:
edge = 10
top_left = (point[0]-edge, point[1]-edge)
bottom_right = (point[0]+edge, point[1]+edge)
cv2.rectangle(img3, top_left, bottom_right, 255, 2)
cv2.imwrite(outfile, img3)
return img2.shape, img1.shape, [point]
def process_loop(self):
cap_sd = cv2.VideoCapture('pipe:%d' % self.pipe_r_sd)
fps = cap_sd.get(cv2.CAP_PROP_FPS)
fps = 24
self.ws.log('pr: opened video')
det = cut_detector.ContentDetector()
orb = cv2.ORB_create()
i = 0
scene = 0
while cap_sd.isOpened():
if self.do_stop:
break
ret, frame = cap_sd.read()
# self.ws.log('pr: read frame', i)
is_cut = det.process_frame(i, frame)
kp = orb.detect(frame, None)
kp, des = orb.compute(frame, kp)
# img2 = cv2.drawKeypoints(frame, kp, None, color=(0,255,0), flags=0)
# cv2.imshow('', img2)
# cv2.waitKey(0)
# 1/0
if is_cut:
self.ws.log('pr: cut at', i)
preview = 'previews/frame%04d_%d.png' % (scene, i)
cv2.imwrite(preview, frame)
self.ws.sendJSON({
'scene': scene,
'time': frame2time(i, fps),
'preview': preview
})
scene += 1
# call to descriptor callback
self.desc_cb(i, des, is_cut)
self.processed = i
i += 1
cap_sd.release()
def get_orb_keypoints(bd, image_min, image_max):
"""
Computes the ORB key points
Args:
bd (2d array)
image_min (int or float)
image_max (int or float)
"""
# We want odd patch sizes.
# if parameter_object.scales[-1] % 2 == 0:
# patch_size = parameter_object.scales[-1] - 1
if bd.dtype != 'uint8':
bd = np.uint8(rescale_intensity(bd,
in_range=(image_min,
image_max),
out_range=(0, 255)))
patch_size = 31
patch_size_d = patch_size * 3
# Initiate ORB detector
orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])),
edgeThreshold=patch_size,
scaleFactor=1.2,
nlevels=8,
patchSize=patch_size,
WTA_K=4,
scoreType=cv2.ORB_FAST_SCORE)
# Add padding because ORB ignores edges.
bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT)
# Compute ORB keypoints
key_points = orb.detectAndCompute(bd, None)[0]
# img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy())
return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d]