def test_descriptors():
img = cv2.imread(constants.TESTING_IMG_PATH)
cv2.imshow("Normal Image", img)
print("Normal Image")
option = input("Enter [1] for using ORB features and other number to use SIFT.\n")
start = time.time()
if option == 1:
orb = cv2.ORB()
kp, des = orb.detectAndCompute(img, None)
else:
sift = cv2.SIFT()
kp, des = sift.detectAndCompute(img, None)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
des_name = constants.ORB_FEAT_NAME if option == ord(constants.ORB_FEAT_OPTION_KEY) else constants.SIFT_FEAT_NAME
print("Elapsed time getting descriptors {0}".format(elapsed_time))
print("Number of descriptors found {0}".format(len(des)))
if des is not None and len(des) > 0:
print("Dimension of descriptors {0}".format(len(des[0])))
print("Name of descriptors used is {0}".format(des_name))
img2 = cv2.drawKeypoints(img, kp)
# plt.imshow(img2), plt.show()
cv2.imshow("{0} descriptors".format(des_name), img2)
print("Press any key to exit ...")
cv2.waitKey()
python类ORB的实例源码
def test_codebook():
dataset = pickle.load(open(constants.DATASET_OBJ_FILENAME, "rb"))
option = input("Enter [1] for using ORB features or [2] to use SIFT features.\n")
start = time.time()
des = descriptors.all_descriptors(dataset, dataset.get_train_set(), option)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
print("Elapsed time getting all the descriptors is {0}".format(elapsed_time))
k = 64
des_name = constants.ORB_FEAT_NAME if option == constants.ORB_FEAT_OPTION else constants.SIFT_FEAT_NAME
codebook_filename = "codebook_{0}_{1}.csv".format(k, des_name)
start = time.time()
codebook = descriptors.gen_codebook(dataset, des, k)
end = time.time()
elapsed_time = utils.humanize_time(end - start)
print("Elapsed time calculating the k means for the codebook is {0}".format(elapsed_time))
np.savetxt(codebook_filename, codebook, delimiter=constants.NUMPY_DELIMITER)
print("Codebook loaded in {0}, press any key to exit ...".format(constants.CODEBOOK_FILE_NAME))
cv2.waitKey()
def orb(img):
"""
Calculate the ORB descriptors for an image and resizes the image
having the larger dimension set to 640 and keeping the size relation.
Args:
img (BGR matrix): The image that will be used.
Returns:
list of floats array: The descriptors found in the image.
"""
orb = cv2.ORB()
kp, des = orb.detectAndCompute(img, None)
return des
def all_descriptors(dataset, class_list, option = constants.ORB_FEAT_OPTION):
"""
Gets every local descriptor of a set with different classes (This is useful for getting a codebook).
Args:
class_list (list of arrays of strings): The list has information for a specific class in each element and each
element is an array of strings which are the paths for the image of that class.
option (integer): It's 49 (the key '1') if ORB features are going to be used, else use SIFT features.
Returns:
numpy float matrix: Each row are the descriptors found in an image of the set
"""
des = None
for i in range(len(class_list)):
message = "*** Getting descriptors for class number {0} of {1} ***".format(i, len(class_list))
print(message)
class_img_paths = class_list[i]
new_des = descriptors_from_class(dataset, class_img_paths, i, option)
if des is None:
des = new_des
else:
des = np.vstack((des, new_des))
message = "*****************************\n"\
"Finished getting all the descriptors\n"
print(message)
print("Total number of descriptors: {0}".format(len(des)))
if len(des) > 0:
print("Dimension of descriptors: {0}".format(len(des[0])))
print("First descriptor:\n{0}".format(des[0]))
return des
def main():
checkOpennCVVersion()
img1 = cv2.imread('napis_z_tlem.png', 0) # duzy obrazek
img2 = cv2.imread('napis.png', 0) # maly obrazek, tego szukamy w duzym
orb = cv2.ORB()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
#zapis do pliku wynikowych keypointow
imgKP1 = cv2.drawKeypoints(img1, kp1)
cv2.imwrite('orb_keypoints_big.jpg', imgKP1)
imgKP2 = cv2.drawKeypoints(img2, kp2)
cv2.imwrite('orb_keypoints.jpg', imgKP2)
matcher = cv2.BFMatcher(cv2.NORM_L2)
matches = matcher.knnMatch(des1, trainDescriptors=des2, k=2)
pairs = filterMatches(kp1, kp2, matches)
l1 = len( kp1 )
l2 = len( kp2 )
lp = len( pairs )
r = (lp * 100) / l1
print r, "%"
cv2.waitKey()
cv2.destroyAllWindows()
return None
#funkcja wywolowywana przed mainem. By uzyc ORB musimy byc pewni ze mamy wersje opencv 2.4
def Orb(img):
orb = cv2.ORB()
kps, des = orb.detectAndCompute(img, None)
return kps, des
def __init__(self, descriptor_type):
self.rootsift = False
lists = ['sift','rootsift','orb','surf']
if descriptor_type is 'sift':
self.lfe = cv2.SIFT()
elif descriptor_type is 'surf':
self.lfe = cv2.SURF()
elif descriptor_type is 'rootsift':
self.lfe = cv2.SIFT()
elif descriptor_type is 'orb':
self.lfe = cv2.ORB()
else:
assert(descriptor_type in lists)
def findMatchesBetweenImages(image_1, image_2):
""" Return the top 10 list of matches between two input images.
This function detects and computes SIFT (or ORB) from the input images, and
returns the best matches using the normalized Hamming Distance.
Args:
image_1 (numpy.ndarray): The first image (grayscale).
image_2 (numpy.ndarray): The second image. (grayscale).
Returns:
image_1_kp (list): The image_1 keypoints, the elements are of type
cv2.KeyPoint.
image_2_kp (list): The image_2 keypoints, the elements are of type
cv2.KeyPoint.
matches (list): A list of matches, length 10. Each item in the list is of
type cv2.DMatch.
"""
# matches - type: list of cv2.DMath
matches = None
# image_1_kp - type: list of cv2.KeyPoint items.
image_1_kp = None
# image_1_desc - type: numpy.ndarray of numpy.uint8 values.
image_1_desc = None
# image_2_kp - type: list of cv2.KeyPoint items.
image_2_kp = None
# image_2_desc - type: numpy.ndarray of numpy.uint8 values.
image_2_desc = None
# WRITE YOUR CODE HERE.
#init
sift = SIFT()
#1. Compute SIFT keypoints and descriptors for both images
image_1_kp, image_1_desc = sift.detectAndCompute(image_1,None)
image_2_kp, image_2_desc = sift.detectAndCompute(image_2,None)
#2. Create a Brute Force Matcher, using the hamming distance (and set crossCheck to true).
#create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
#3. Compute the matches between both images.
#match descriptors
matches = bf.match(image_1_desc,image_2_desc)
#4. Sort the matches based on distance so you get the best matches.
matches = sorted(matches, key=lambda x: x.distance)
#5. Return the image_1 keypoints, image_2 keypoints, and the top 10 matches in a list.
return image_1_kp, image_2_kp, matches[:10]
# END OF FUNCTION.
def descriptors_from_class(dataset, class_img_paths, class_number, option = constants.ORB_FEAT_OPTION):
"""
Gets all the local descriptors for a class. If an image has a side with more than 640 pixels it will be resized
leaving the biggest side at 640 pixels and conserving the aspect ratio for the other side.
Args:
dataset (Dataset object): An object that stores information about the dataset.
class_img_paths (array of strings): The paths for each image in certain class.
class_number (integer): The number of the class.
option (integer): If this is 49 (The key '1') uses ORB features, else use SIFT.
Returns:
numpy float matrix: Each row are the descriptors found in an image of the class
"""
des = None
step = (constants.STEP_PERCENTAGE * len(class_img_paths)) / 100
for i in range(len(class_img_paths)):
img_path = class_img_paths[i]
img = cv2.imread(img_path)
resize_to = 640
h, w, channels = img.shape
if h > resize_to or w > resize_to:
img = utils.resize(img, resize_to, h, w)
if option == constants.ORB_FEAT_OPTION:
des_name = "ORB"
new_des = orb(img)
else:
des_name = "SIFT"
new_des = sift(img)
if new_des is not None:
if des is None:
des = np.array(new_des, dtype=np.float32)
else:
des = np.vstack((des, np.array(new_des)))
# Print a message to show the status of the function
if i % step == 0:
percentage = (100 * i) / len(class_img_paths)
message = "Calculated {0} descriptors for image {1} of {2}({3}%) of class number {4} ...".format(
des_name, i, len(class_img_paths), percentage, class_number
)
print(message)
message = "* Finished getting the descriptors for the class number {0}*".format(class_number)
print(message)
print("Number of descriptors in class: {0}".format(len(des)))
dataset.set_class_count(class_number, len(des))
return des
servoing_designed_features_quad_panda3d_env.py 文件源码
项目:citysim3d
作者: alexlee-gk
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def __init__(self, action_space, feature_type=None, filter_features=None,
max_time_steps=100, distance_threshold=4.0, **kwargs):
"""
filter_features indicates whether to filter out key points that are not
on the object in the current image. Key points in the target image are
always filtered out.
"""
SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)
lens = self.camera_node.node().getLens()
self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
np.array([np.inf, lens.getFar(), np.inf]))
film_size = tuple(int(s) for s in lens.getFilmSize())
self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
size=film_size,
near_far=(lens.getNear(), lens.getFar()),
hfov=lens.getFov())
for cam in self.mask_camera_sensor.cam:
cam.reparentTo(self.camera_sensor.cam)
self.filter_features = True if filter_features is None else False
self._feature_type = feature_type or 'sift'
if cv2.__version__.split('.')[0] == '3':
from cv2.xfeatures2d import SIFT_create, SURF_create
from cv2 import ORB_create
if self.feature_type == 'orb':
# https://github.com/opencv/opencv/issues/6081
cv2.ocl.setUseOpenCL(False)
else:
SIFT_create = cv2.SIFT
SURF_create = cv2.SURF
ORB_create = cv2.ORB
if self.feature_type == 'sift':
self._feature_extractor = SIFT_create()
elif self.feature_type == 'surf':
self._feature_extractor = SURF_create()
elif self.feature_type == 'orb':
self._feature_extractor = ORB_create()
else:
raise ValueError("Unknown feature extractor %s" % self.feature_type)
if self.feature_type == 'orb':
self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
else:
self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
self._target_key_points = None
self._target_descriptors = None
def TestKptMatch():
img1=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img1.ppm",cv2.IMREAD_COLOR)
img2=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img2.ppm",cv2.IMREAD_COLOR)
gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
gap_width=20
black_gap=npy.zeros((img1.shape[0],gap_width),dtype=npy.uint8)
# objSIFT = cv2.SIFT(500)
# kpt1,desc1 = objSIFT.detectAndCompute(gray1,None)
# kpt2,desc2 = objSIFT.detectAndCompute(gray2,None)
# objMatcher=cv2.BFMatcher(cv2.NORM_L2)
# matches=objMatcher.knnMatch(desc1,desc2,k=2)
objORB = cv2.ORB(500)
kpt1,desc1 = objORB.detectAndCompute(gray1,None)
kpt2,desc2 = objORB.detectAndCompute(gray2,None)
objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
matches=objMatcher.knnMatch(desc1,desc2,k=2)
goodMatches=[]
for bm1,bm2 in matches:
if bm1.distance < 0.7*bm2.distance:
goodMatches.append(bm1)
if len(goodMatches)>10:
ptsFrom = npy.float32([kpt1[bm.queryIdx].pt for bm in goodMatches]).reshape(-1,1,2)
ptsTo = npy.float32([kpt2[bm.trainIdx].pt for bm in goodMatches]).reshape(-1,1,2)
matH, matchMask = cv2.findHomography(ptsFrom, ptsTo, cv2.RANSAC,5.0)
imgcnb=npy.concatenate((gray1,black_gap,gray2),axis=1)
plt.figure(1,figsize=(15,6))
plt.imshow(imgcnb,cmap="gray")
idx=0
for bm in goodMatches:
if 1==matchMask[idx]:
kptFrom=kpt1[bm.queryIdx]
kptTo=kpt2[bm.trainIdx]
plt.plot(kptFrom.pt[0],kptFrom.pt[1],"rs",
markerfacecolor="none",markeredgecolor="r",markeredgewidth=2)
plt.plot(kptTo.pt[0]+img1.shape[1]+gap_width,kptTo.pt[1],"bo",
markerfacecolor="none",markeredgecolor="b",markeredgewidth=2)
plt.plot([kptFrom.pt[0],kptTo.pt[0]+img1.shape[1]+gap_width],
[kptFrom.pt[1],kptTo.pt[1]],"g-",linewidth=2)
idx+=1
plt.axis("off")