def Surf(gray):
surf = cv2.SURF()
kps, des = surf.detectAndCompute(gray, None)
return kps, des
python类SURF的实例源码
def __init__(self, descriptor_type):
self.rootsift = False
lists = ['sift','rootsift','orb','surf']
if descriptor_type is 'sift':
self.lfe = cv2.SIFT()
elif descriptor_type is 'surf':
self.lfe = cv2.SURF()
elif descriptor_type is 'rootsift':
self.lfe = cv2.SIFT()
elif descriptor_type is 'orb':
self.lfe = cv2.ORB()
else:
assert(descriptor_type in lists)
def _extract_feature(X, feature):
"""Performs feature extraction
:param X: data (rows=images, cols=pixels)
:param feature: which feature to extract
- None: no feature is extracted
- "gray": grayscale features
- "rgb": RGB features
- "hsv": HSV features
- "surf": SURF features
- "hog": HOG features
:returns: X (rows=samples, cols=features)
"""
# transform color space
if feature == 'gray' or feature == 'surf':
X = [cv2.cvtColor(x, cv2.COLOR_BGR2GRAY) for x in X]
elif feature == 'hsv':
X = [cv2.cvtColor(x, cv2.COLOR_BGR2HSV) for x in X]
# operate on smaller image
small_size = (32, 32)
X = [cv2.resize(x, small_size) for x in X]
# extract features
if feature == 'surf':
surf = cv2.SURF(400)
surf.upright = True
surf.extended = True
num_surf_features = 36
# create dense grid of keypoints
dense = cv2.FeatureDetector_create("Dense")
kp = dense.detect(np.zeros(small_size).astype(np.uint8))
# compute keypoints and descriptors
kp_des = [surf.compute(x, kp) for x in X]
# the second element is descriptor: choose first num_surf_features
# elements
X = [d[1][:num_surf_features, :] for d in kp_des]
elif feature == 'hog':
# histogram of gradients
block_size = (small_size[0] / 2, small_size[1] / 2)
block_stride = (small_size[0] / 4, small_size[1] / 4)
cell_size = block_stride
num_bins = 9
hog = cv2.HOGDescriptor(small_size, block_size, block_stride,
cell_size, num_bins)
X = [hog.compute(x) for x in X]
elif feature is not None:
# normalize all intensities to be between 0 and 1
X = np.array(X).astype(np.float32) / 255
# subtract mean
X = [x - np.mean(x) for x in X]
X = [x.flatten() for x in X]
return X
def sift(imageval):
file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
#surf = cv2.SURF(400)
sift = cv2.SIFT(40)
kp, des = sift.detectAndCompute(gray,None)
#kp, des = surf.detectAndCompute(gray,None)
#print len(kp)
def surf(imageval):
file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)
surf = cv2.SURF(40)
#sift = cv2.SIFT(40)
#kp, des = sift.detectAndCompute(gray,None)
kp, des = surf.detectAndCompute(gray,None)
#print len(kp)
servoing_designed_features_quad_panda3d_env.py 文件源码
项目:citysim3d
作者: alexlee-gk
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def __init__(self, action_space, feature_type=None, filter_features=None,
max_time_steps=100, distance_threshold=4.0, **kwargs):
"""
filter_features indicates whether to filter out key points that are not
on the object in the current image. Key points in the target image are
always filtered out.
"""
SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)
lens = self.camera_node.node().getLens()
self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
np.array([np.inf, lens.getFar(), np.inf]))
film_size = tuple(int(s) for s in lens.getFilmSize())
self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
size=film_size,
near_far=(lens.getNear(), lens.getFar()),
hfov=lens.getFov())
for cam in self.mask_camera_sensor.cam:
cam.reparentTo(self.camera_sensor.cam)
self.filter_features = True if filter_features is None else False
self._feature_type = feature_type or 'sift'
if cv2.__version__.split('.')[0] == '3':
from cv2.xfeatures2d import SIFT_create, SURF_create
from cv2 import ORB_create
if self.feature_type == 'orb':
# https://github.com/opencv/opencv/issues/6081
cv2.ocl.setUseOpenCL(False)
else:
SIFT_create = cv2.SIFT
SURF_create = cv2.SURF
ORB_create = cv2.ORB
if self.feature_type == 'sift':
self._feature_extractor = SIFT_create()
elif self.feature_type == 'surf':
self._feature_extractor = SURF_create()
elif self.feature_type == 'orb':
self._feature_extractor = ORB_create()
else:
raise ValueError("Unknown feature extractor %s" % self.feature_type)
if self.feature_type == 'orb':
self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
else:
self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
self._target_key_points = None
self._target_descriptors = None