def get_image_compressed(self):
rospy.loginfo("Getting image...")
image_msg = rospy.wait_for_message(
"/wide_stereo/left/image_raw/compressed",
CompressedImage)
rospy.loginfo("Got image!")
# Image to numpy array
np_arr = np.fromstring(image_msg.data, np.uint8)
# Decode to cv2 image and store
cv2_img = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
img_file_path = "/tmp/telegram_last_image.png"
cv2.imwrite(img_file_path, cv2_img)
rospy.loginfo("Saved to: " + img_file_path)
return img_file_path
# Define a few command handlers
python类CV_LOAD_IMAGE_COLOR的实例源码
def get_image_compressed(self):
rospy.loginfo("Getting image...")
image_msg = rospy.wait_for_message(
"/wide_stereo/left/image_raw/compressed",
CompressedImage)
rospy.loginfo("Got image!")
# Image to numpy array
np_arr = np.fromstring(image_msg.data, np.uint8)
# Decode to cv2 image and store
cv2_img = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
img_file_path = "/tmp/telegram_last_image.png"
cv2.imwrite(img_file_path, cv2_img)
rospy.loginfo("Saved to: " + img_file_path)
return img_file_path
# Define a few command handlers
def dirClassificationSentiment(dirName, modelName, modelType):
types = ('*.jpg', '*.png',)
filesList = []
for files in types:
filesList.extend(glob.glob(os.path.join(dirName, files)))
filesList = sorted(filesList)
print filesList
Features = []
plt.close('all');
ax = plt.gca()
plt.hold(True)
for fi in filesList:
P, classNames = fileClassification(fi, modelName, modelType)
im = cv2.imread(fi, cv2.CV_LOAD_IMAGE_COLOR)
Width = 0.1; Height = 0.1; startX = P[classNames.index("positive")]; startY = 0;
myaximage = ax.imshow(cv2.cvtColor(im, cv2.cv.CV_RGB2BGR), extent=(startX-Width/2.0, startX+Width/2.0, startY-Height/2.0, startY+Height/2.0), alpha=1.0, zorder=-1)
plt.axis((0,1,-0.1,0.1))
plt.show(block = False);
plt.draw()
plt.show(block = True);
def visualizeFeatures(Features, Files, Names):
y_eig, coeff = pcaDimRed(Features, 2)
plt.close("all")
print y_eig
plt.subplot(2,1,1);
ax = plt.gca()
for i in range(len(Files)):
im = cv2.imread(Files[i], cv2.CV_LOAD_IMAGE_COLOR)
Width = 0.2; Height = 0.2; startX = y_eig[i][0]; startY = y_eig[i][1];
print startX, startY
myaximage = ax.imshow(cv2.cvtColor(im, cv2.cv.CV_RGB2BGR), extent=(startX-Width/2.0, startX+Width/2.0, startY-Height/2.0, startY+Height/2.0), alpha=1.0, zorder=-1)
plt.axis((-3,3,-3,3))
# Plot feaures
plt.subplot(2,1,2)
ax = plt.gca()
for i in range(len(Files)):
plt.plot(numpy.array(Features[i,:].T));
plt.xticks(range(len(Names)))
plt.legend(Files)
ax.set_xticklabels(Names)
plt.setp(plt.xticks()[1], rotation=90)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.show()
def scriptDemo():
dirName = "demoData/dofDemo"
types = ('*.jpg', )
imageFilesList = []
for files in types:
imageFilesList.extend(glob.glob(os.path.join(dirName, files)))
imageFilesList = sorted(imageFilesList)
print imageFilesList
Features = numpy.zeros( (len(imageFilesList), 7) )
for i, f in enumerate(imageFilesList):
img = cv2.imread(f, cv2.CV_LOAD_IMAGE_COLOR) # read image
[F, names] = getDepthOfFieldFeature2(img)
Features[i,:] = F
# Features[i,j] contains the j-th feature of the i-th file
for i in range(7):
plt.subplot(7,1,i+1); plt.bar(range(Features[:,i].shape[0]), Features[:,i])
plt.title(names[i])
plt.show()
print Features
#scriptDemo()
def dirClassificationSentiment(dirName, modelName, modelType):
types = ('*.jpg', '*.png',)
filesList = []
for files in types:
filesList.extend(glob.glob(os.path.join(dirName, files)))
filesList = sorted(filesList)
print filesList
Features = []
plt.close('all');
ax = plt.gca()
plt.hold(True)
for fi in filesList:
P, classNames = fileClassification(fi, modelName, modelType)
im = cv2.imread(fi, cv2.CV_LOAD_IMAGE_COLOR)
Width = 0.1; Height = 0.1; startX = P[classNames.index("positive")]; startY = 0;
myaximage = ax.imshow(cv2.cvtColor(im, cv2.cv.CV_RGB2BGR), extent=(startX-Width/2.0, startX+Width/2.0, startY-Height/2.0, startY+Height/2.0), alpha=1.0, zorder=-1)
plt.axis((0,1,-0.1,0.1))
plt.show(block = False);
plt.draw()
plt.show(block = True);
def visualizeFeatures(Features, Files, Names):
y_eig, coeff = pcaDimRed(Features, 2)
plt.close("all")
print y_eig
plt.subplot(2,1,1);
ax = plt.gca()
for i in range(len(Files)):
im = cv2.imread(Files[i], cv2.CV_LOAD_IMAGE_COLOR)
Width = 0.2; Height = 0.2; startX = y_eig[i][0]; startY = y_eig[i][1];
print startX, startY
myaximage = ax.imshow(cv2.cvtColor(im, cv2.cv.CV_RGB2BGR), extent=(startX-Width/2.0, startX+Width/2.0, startY-Height/2.0, startY+Height/2.0), alpha=1.0, zorder=-1)
plt.axis((-3,3,-3,3))
# Plot feaures
plt.subplot(2,1,2)
ax = plt.gca()
for i in range(len(Files)):
plt.plot(numpy.array(Features[i,:].T));
plt.xticks(range(len(Names)))
plt.legend(Files)
ax.set_xticklabels(Names)
plt.setp(plt.xticks()[1], rotation=90)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.show()
def decode(self, msg):
fn = os.path.join(self.directory_, msg)
if os.path.exists(fn):
im = cv2.imread(fn,
cv2.CV_LOAD_IMAGE_COLOR if self.color_ \
else cv2.CV_LOAD_IMAGE_GRAYSCALE)
return im_resize(im, shape=self.shape_)
else:
raise Exception('File does not exist')
# Basic type for image annotations
def __init__(self, directory, max_files=20000):
"""
SUN RGB-D Dataset reader
Note: First run find . | grep seg.mat > annotations.txt (in SUNRGBD folder)
@params directory: SUNRGBD directory listing with image/*.png, and seg.mat files
"""
self.directory_ = os.path.expanduser(directory)
with open(os.path.join(self.directory_, 'image.txt')) as f:
rgb_files = f.read().splitlines()
with open(os.path.join(self.directory_, 'depth.txt')) as f:
depth_files = f.read().splitlines()
assert(len(rgb_files) == len(depth_files))
self.rgb_files_ = [os.path.join(self.directory_, fn) for fn in fnmatch.filter(rgb_files,'*mit_*')][:max_files]
self.depth_files_ = [os.path.join(self.directory_, fn) for fn in fnmatch.filter(depth_files,'*mit_*')][:max_files]
self.label_files_ = [ os.path.join(
os.path.split(
os.path.split(fn)[0])[0], 'seg.mat') for fn in self.rgb_files_ ]
if not len(self.rgb_files_):
raise RuntimeError('{} :: Failed to load dataset'.format(self.__class__.__name__))
print('{} :: Loading {} image/depth/segmentation pairs'.format(self.__class__.__name__, len(self.rgb_files_)))
self.rgb_ = imap(lambda fn: self._pad_image(cv2.imread(fn, cv2.CV_LOAD_IMAGE_COLOR)), self.rgb_files_)
self.depth_ = imap(lambda fn: self._pad_image(cv2.imread(fn, -1)), self.depth_files_)
self.labels_ = imap(self._process_label, self.label_files_)
# self.target_hash_ = {item.encode('utf8'): idx+1
# for idx, item in enumerate(loadmat('data/sun3d/seg37list.mat', squeeze_me=True)['seg37list'])}
# self.target_unhash_ = {v:k for k,v in self.target_hash_.iteritems()}
# self.target_hash_ = SUNRGBDDataset.target_hash
# self.target_unhash_ = SUNRGBDDataset.target_unhash
# @property
# def target_unhash(self):
# return self.objects_.target_unhash
# @property
# def target_hash(self):
# return self.objects_.target_hash
def callback_camera(self, data):
# format: rgb8; jpeg compressed bgr8
np_img = np.fromstring(data.data, dtype=np.uint8)
img = cv2.imdecode(np_img, cv2.CV_LOAD_IMAGE_COLOR)
img = cv2.cvtColor(cv2.resize(img, (self.width, self.height)), cv2.COLOR_BGR2GRAY)
self.image = np.reshape(img, newshape=(self.width, self.height, 1)) / 256.0
def callback_camera_y(self, data):
# format: rgb8; jpeg compressed bgr8
np_img = np.fromstring(data.data, dtype=np.uint8)
img = cv2.imdecode(np_img, cv2.CV_LOAD_IMAGE_COLOR)
img = cv2.cvtColor(cv2.resize(img, (self.width, self.height)), cv2.COLOR_BGR2GRAY)
self.image_y = np.reshape(img, newshape=(self.width, self.height, 1)) / 256.0
def getFeaturesFromFile(fileName, PLOT = False):
img = cv2.imread(fileName, cv2.CV_LOAD_IMAGE_COLOR) # read image
#img2 = resizeFrame(img, 128)# resize
#img2[:,:,0] = img2[:,:,0] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#img2[:,:,1] = img2[:,:,1] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#img2[:,:,2] = img2[:,:,2] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#plt.imshow(img2)
#plt.show()
#[F, N] = featureExtraction(img2, PLOT) # feature extraction
[F, N] = featureExtraction(img, PLOT) # feature extraction
return F, N
def processContoursinGt(path):
folder = sort_files(path)
length_cont_gt = []
for i in range(20,len(folder)):
newPath = path + "/0 (" + str(i) + ")" + ".png"
img = cv2.imread(newPath,cv2.CV_LOAD_IMAGE_COLOR)
#print "Image in processContour: ",img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (31, 31), 0)
thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
length_cont_gt.append(len(cnts))
return length_cont_gt
def processContoursinFr(path1):
folder1 = sort_files_fr(path1)
length_cont_fr = []
for i in range(1,len(folder1)):
newPath = path1 + "/" + str(i)+ ".jpg"
img = cv2.imread(newPath,cv2.CV_LOAD_IMAGE_COLOR)
#print "Image in processContourinFr: ",img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (31, 31), 0)
thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
length_cont_fr.append(len(cnts))
return length_cont_fr
def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
req = requests.get(url, timeout=5, stream=True)
content = ''
for chunk in req.iter_content(2048):
content += chunk
if len(content) > maxsize:
req.close()
raise ValueError('Response too large')
img_array = np.asarray(bytearray(content), dtype=np.uint8)
cv2_img_flag = cv2.CV_LOAD_IMAGE_COLOR
image = cv2.imdecode(img_array, cv2_img_flag)
return image
def getFeaturesFromFile(fileName, PLOT = False):
img = cv2.imread(fileName, cv2.CV_LOAD_IMAGE_COLOR) # read image
#img2 = resizeFrame(img, 128)# resize
#img2[:,:,0] = img2[:,:,0] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#img2[:,:,1] = img2[:,:,1] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#img2[:,:,2] = img2[:,:,2] + 3.5 * img2.std() * np.random.random([img2.shape[0], img2.shape[1]])
#plt.imshow(img2)
#plt.show()
#[F, N] = featureExtraction(img2, PLOT) # feature extraction
[F, N] = featureExtraction(img, PLOT) # feature extraction
return F, N
def receive_frame(self,c,tkk):
print "Waiting to receive frames of video from Server...."
data = np.empty((700,500), dtype = np.int8)
self.sock.recv_into(data)
raw=cv2.imdecode(data,cv2.CV_LOAD_IMAGE_COLOR)
cv2.imwrite("frame%d.jpeg" %c,raw)
path="frame"+str(c)+".jpeg"
print "pATH :"+path
print c
return path
def read_image(filename):
image = cv2.imread(filename,cv2.CV_LOAD_IMAGE_COLOR)
image = cv2.resize(image, (resize_height, resize_width))
return image
def get_word_image(self, gray_scale=True):
col_type = None
if gray_scale:
col_type = cv2.CV_LOAD_IMAGE_GRAYSCALE
else:
col_type = cv2.CV_LOAD_IMAGE_COLOR
# load the image
ul = self.bounding_box['upperLeft']
wh = self.bounding_box['widthHeight']
img = cv2.imread(self.image_path, col_type)
if not np.all(self.bounding_box['widthHeight'] == -1):
img = img[ul[1]:ul[1]+wh[1], ul[0]:ul[0]+wh[0]]
return img
def __MR_readimg(self,img):
if isinstance(img,str): # a image path
img = cv2.imread(img,cv2.CV_LOAD_IMAGE_COLOR)
# img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB).astype(float)/255
# img = cv2.cvtColor(img,cv2.COLOR_BGR2LAB).astype(float)/255
img = cv2.cvtColor(img,cv2.COLOR_RGB2LAB).astype(float)/255
# h = 100
# w = int(float(h)/float(img.shape[0])*float(img.shape[1]))
return img #cv2.resize(img,(w,h))
def match_face(model, pair):
global_conf = None
nparr_model = np.fromstring(model, np.uint8)
path = cv2.imdecode(nparr_model, cv2.CV_LOAD_IMAGE_COLOR)
recognizer = cv2.face.createLBPHFaceRecognizer()
# path = './train_dir/yu/yu2.jpg'
model_faces, model_labels = mtcnn.get_face(path)
print model_labels
model_faces_gray = []
for face in model_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
model_faces_gray.append(gray_image)
recognizer.train(model_faces_gray, np.array(model_labels))
nparr_pair = np.fromstring(pair, np.uint8)
imgPath = cv2.imdecode(nparr_pair, cv2.CV_LOAD_IMAGE_COLOR)
# imgPath = './train_dir/yu/yu.jpg'
# img_pair = cv2.imread(path)
pair_faces, pair_labels = mtcnn.get_face(imgPath)
pair_faces_gray = []
for face in pair_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
pair_faces_gray.append(gray_image)
for face in pair_faces_gray:
global global_conf
nbr_predicted, conf = recognizer.predict(face)
print "Recognized with confidence {}".format(conf)
global_conf = conf
return global_conf
def match_face(model, pair):
global_conf = None
nparr_model = np.fromstring(model, np.uint8)
path = cv2.imdecode(nparr_model, cv2.CV_LOAD_IMAGE_COLOR)
recognizer = cv2.face.createLBPHFaceRecognizer()
# path = './train_dir/yu/yu2.jpg'
model_faces, model_labels = mtcnn.get_face(path)
print model_labels
model_faces_gray = []
for face in model_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
model_faces_gray.append(gray_image)
recognizer.train(model_faces_gray, np.array(model_labels))
nparr_pair = np.fromstring(pair, np.uint8)
imgPath = cv2.imdecode(nparr_pair, cv2.CV_LOAD_IMAGE_COLOR)
# imgPath = './train_dir/yu/yu.jpg'
# img_pair = cv2.imread(path)
pair_faces, pair_labels = mtcnn.get_face(imgPath)
pair_faces_gray = []
for face in pair_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
pair_faces_gray.append(gray_image)
for face in pair_faces_gray:
global global_conf
nbr_predicted, conf = recognizer.predict(face)
print "Recognized with confidence {}".format(conf)
global_conf = conf
return global_conf
def match_face(model, pair):
global_conf = None
nparr_model = np.fromstring(model, np.uint8)
path = cv2.imdecode(nparr_model, cv2.CV_LOAD_IMAGE_COLOR)
recognizer = cv2.face.createLBPHFaceRecognizer()
# path = './train_dir/yu/yu2.jpg'
model_faces, model_labels = mtcnn.get_face(path)
print model_labels
model_faces_gray = []
for face in model_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
model_faces_gray.append(gray_image)
recognizer.train(model_faces_gray, np.array(model_labels))
nparr_pair = np.fromstring(pair, np.uint8)
imgPath = cv2.imdecode(nparr_pair, cv2.CV_LOAD_IMAGE_COLOR)
# imgPath = './train_dir/yu/yu.jpg'
# img_pair = cv2.imread(path)
pair_faces, pair_labels = mtcnn.get_face(imgPath)
pair_faces_gray = []
for face in pair_faces:
gray_image = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
pair_faces_gray.append(gray_image)
for face in pair_faces_gray:
global global_conf
nbr_predicted, conf = recognizer.predict(face)
print "Recognized with confidence {}".format(conf)
global_conf = conf
return global_conf
def im_callback(self, data):
np_arr = np.fromstring(data.data, np.uint8)
#im = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
bgr_im = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
# Need to convert BGR image to RGB
b,g,r = cv2.split(bgr_im)
rgb_im = cv2.merge([r,g,b])
self.im_data.append( rgb_im )
def scriptDemo():
dirName = "demoData/lineDemo"
types = ('*.jpg', )
imageFilesList = []
for files in types:
imageFilesList.extend(glob.glob(os.path.join(dirName, files)))
imageFilesList = sorted(imageFilesList)
Features = np.zeros( (len(imageFilesList), 7) )
labels = []
for i, f in enumerate(imageFilesList):
print f
if ntpath.basename(f)[0:5]=="noPer":
labels.append(0)
else:
labels.append(1)
img = cv2.imread(f, cv2.CV_LOAD_IMAGE_COLOR) # read image
[F, names] = getLineFeatures(img)
Features[i,:] = F
FeaturesPrespective = Features[:,4:7];
fig = plt.figure()
color = ["ro","gx"]
labels = np.array(labels)
ax = fig.add_subplot(111, projection='3d')
ax.plot(FeaturesPrespective[np.nonzero(labels==0)[0],0], FeaturesPrespective[np.nonzero(labels==0)[0],1], FeaturesPrespective[np.nonzero(labels==0)[0],2], color[0], label='Non Perspective')
ax.plot(FeaturesPrespective[np.nonzero(labels==1)[0],0], FeaturesPrespective[np.nonzero(labels==1)[0],1], FeaturesPrespective[np.nonzero(labels==1)[0],2], color[1], label='Perspective')
ax.set_xlabel('Close Intersections - 5%')
ax.set_ylabel('Close Intersections - 20%')
ax.set_zlabel('Acute Angles')
plt.legend(loc='upper left', numpoints=1)
plt.show()
for f in range(Features.shape[0]):
print "{0:.4f}\t{1:.4f}\t{2:.4f}".format(Features[f, 4], Features[f, 5], Features[f, 6])
# For generating figures for paper
#scriptDemo()
# test intersection:
#A1 = np.array([0.0,0.0])
#A2 = np.array([4.0,2.0])
#B1 = np.array([1.0,1.2])
#B2 = np.array([2.0,1.0])
#plt.plot([A1[0], A2[0]], [A1[1],A2[1]])
#plt.plot([B1[0], B2[0]], [B1[1],B2[1]])
#[X, Y] = seg_intersect(A1, A2, B1, B2)
#print X, Y
#plt.plot(X, Y, '*');
#plt.show()