def get_points():
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('calibration_wide/GO*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (8,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (8,6), corners, ret)
#write_name = 'corners_found'+str(idx)+'.jpg'
#cv2.imwrite(write_name, img)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
return objpoints, imgpoints
python类imshow()的实例源码
def skin_calib(self, raw_yrb):
mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin)
cv2.imshow('YRB_calib', cal_skin)
k = cv2.waitKey(5) & 0xFF
if k == ord('s'):
self.calib_switch = False
cv2.destroyWindow('YRB_calib')
ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib')
ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib')
rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib')
rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib')
bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib')
bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib')
self.mask_lower_yrb = np.array([ymin, rmin, bmin])
self.mask_upper_yrb = np.array([ymax, rmax, bmax])
# Do skin detection with some filtering
def test_image(addr):
target = ['angry','disgust','fear','happy','sad','surprise','neutral']
font = cv2.FONT_HERSHEY_SIMPLEX
im = cv2.imread(addr)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
face_crop = im[y:y+h,x:x+w]
face_crop = cv2.resize(face_crop,(48,48))
face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
face_crop = face_crop.astype('float32')/255
face_crop = np.asarray(face_crop)
face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
result = target[np.argmax(model.predict(face_crop))]
cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
cv2.imshow('result', im)
cv2.imwrite('result.jpg',im)
cv2.waitKey(0)
cameracalibrator.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def run(self):
cv2.namedWindow("display", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)
if self.extra_queue:
cv2.namedWindow("extra", cv2.WINDOW_NORMAL)
while True:
# wait for an image (could happen at the very beginning when the queue is still empty)
while len(self.queue) == 0:
time.sleep(0.1)
im = self.queue[0]
cv2.imshow("display", im)
k = cv2.waitKey(6) & 0xFF
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
elif k == ord('s'):
self.opencv_calibration_node.screendump(im)
if self.extra_queue:
if len(self.extra_queue):
extra_img = self.extra_queue[0]
cv2.imshow("extra", extra_img)
def run(im):
im_disp = im.copy()
window_name = "Draw line here."
cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(window_name, 910, 0)
print " Drag across the screen to set lines.\n Do it twice"
print " After drawing the lines press 'r' to resume\n"
l1 = np.empty((2, 2), np.uint32)
l2 = np.empty((2, 2), np.uint32)
list = [l1,l2]
mouse_down = False
def callback(event, x, y, flags, param):
global trigger, mouse_down
if trigger<2:
if event == cv2.EVENT_LBUTTONDOWN:
mouse_down = True
list[trigger][0] = (x, y)
if event == cv2.EVENT_LBUTTONUP and mouse_down:
mouse_down = False
list[trigger][1] = (x,y)
cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
(list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
trigger += 1
else:
pass
cv2.setMouseCallback(window_name, callback)
while True:
cv2.imshow(window_name,im_disp)
key = cv2.waitKey(10) & 0xFF
if key == ord('r'):
# Press key `q` to quit the program
return list
exit()
def load_image(self):
'''
????
'''
try:
im_path,_=QFileDialog.getOpenFileName(self,'??????','./','Image Files(*.png *.jpg *.bmp)')
if not os.path.exists(im_path):
return
self.im_path=im_path
self.statu_text.append('???????'+self.im_path)
if not self.swapper:
self.swapper=Coupleswapper([self.im_path])
elif not self.im_path== self.cur_im_path:
self.swapper.load_heads([self.im_path])
self.img_ori=self.swapper.heads[os.path.split(self.im_path)[-1]][0]
cv2.imshow('Origin',self.img_ori)
except (TooManyFaces,NoFace):
self.statu_text.append(traceback.format_exc()+'\n????????????????????????????')
return
def CaptureImage():
imageName = 'DontCare.jpg' #Just a random string
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
rgbImage = frame #For capture the image in RGB color space
# Display the resulting frame
cv2.imshow('Webcam',rgbImage)
#Wait to press 'q' key for capturing
if cv2.waitKey(1) & 0xFF == ord('q'):
#Set the image name to the date it was captured
imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
#Save the image
cv2.imwrite(imageName, rgbImage)
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#Returns the captured image's name
return imageName
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
cap = cv2.VideoCapture(src)
while(1):
ret, frame = cap.read()
# To speed up processing; Almost real-time on my PC
frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
frame = cv2.flip(frame, 1)
out = func(frame, args)
if out is None:
continue
out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
cv2.imshow(win_name, out)
cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
k = cv2.waitKey(delim_wait)
if k == delim_key:
cv2.destroyAllWindows()
cap.release()
return
def cvCaptureVideo():
capture = cv2.VideoCapture(0)
if capture.isOpened() is False:
raise("IO Error")
cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)
while True:
ret, image = capture.read()
if ret == False:
continue
cv2.imshow("Capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# Matplot???Web????????????
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def on_mouse(event, x, y, flags, params):
# global img
t = time()
if event == cv2.EVENT_LBUTTONDOWN:
print 'Start Mouse Position: '+str(x)+', '+str(y)
sbox = [x, y]
boxes.append(sbox)
# print count
# print sbox
elif event == cv2.EVENT_LBUTTONUP:
print 'End Mouse Position: '+str(x)+', '+str(y)
ebox = [x, y]
boxes.append(ebox)
print boxes
crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]]
cv2.imshow('crop',crop)
k = cv2.waitKey(0)
if ord('r')== k:
cv2.imwrite('Crop'+str(t)+'.jpg',crop)
print "Written to file"
data_preprocessing_autoencoder.py 文件源码
项目:AVSR-Deep-Speech
作者: pandeydivesh15
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]):
"""
Args:
1. frame: OpenCV's image which has to be visualized.
2. coordinates_list: List of coordinates which will be visualized in the given `frame`
3. alpha, color: Some parameters which help in visualizing properly.
A convex hull will be shown for each element in the `coordinates_list`
"""
layer = frame.copy()
output = frame.copy()
for coordinates in coordinates_list:
c_hull = cv2.convexHull(coordinates)
cv2.drawContours(layer, [c_hull], -1, color, -1)
cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)
cv2.imshow("Output", output)
def camera_detector(self, cap, wait=10):
detect_timer = Timer()
ret, _ = cap.read()
while ret:
ret, frame = cap.read()
detect_timer.tic()
result = self.detect(frame)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))
self.draw_result(frame, result)
cv2.imshow('Camera', frame)
cv2.waitKey(wait)
ret, frame = cap.read()
def imshow_cv(label, im, block=False, text=None, wait=2):
vis = im.copy()
print_status(vis, text=text)
window_manager.imshow(label, vis)
ch = cv2.waitKey(0 if block else wait) & 0xFF
if ch == ord(' '):
cv2.waitKey(0)
if ch == ord('v'):
print('Entering debug mode, image callbacks active')
while True:
ch = cv2.waitKey(10) & 0xFF
if ch == ord('q'):
print('Exiting debug mode!')
break
if ch == ord('s'):
fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S")
print 'Saving %s' % fn
cv2.imwrite(fn, vis)
elif ch == 27 or ch == ord('q'):
sys.exit(1)
def _render(self):
self._smoothed_img = cv2.GaussianBlur(self.image, (self._filter_size, self._filter_size), sigmaX=0, sigmaY=0)
self._edge_img = cv2.Canny(self._smoothed_img, self._threshold1, self._threshold2)
cv2.imshow('smoothed', self._smoothed_img)
cv2.imshow('edges', self._edge_img)
def on_new_point_cloud(data):
global im
pc = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z","intensity"))
#print pc.type
#print data.type
cloud_points = []
for p in pc:
cloud_points.append(p)
npc = np.array(cloud_points)
#lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="depth", y_fudge=Y_FUDGE)
#lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="height", y_fudge=Y_FUDGE)
#lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="reflectance", y_fudge=Y_FUDGE)
#im = birds_eye_point_cloud(npc, side_range=(-10, 10), fwd_range=(-10, 10), res=0.1)
im = point_cloud_to_panorama(npc,
v_res=VRES,
h_res=HRES,
v_fov=VFOV,
y_fudge=5,
d_range=(0,100))
#plt.imshow(im,cmap='spectral')
#plt.show()
def camera_callback(self, msg):
try:
self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
except cv_bridge.CvBridgeError:
return
gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 30, 150)
cv2.imshow("Robot Camera", canny)
cv2.waitKey(1)
def bench(folder):
from os.path import join
from video_capture.av_file_capture import File_Capture
cap = File_Capture(join(folder,'marker-test.mp4'))
markers = []
detected_count = 0
for x in range(500):
frame = cap.get_frame()
img = frame.img
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True)
draw_markers(img, markers)
cv2.imshow('Detected Markers', img)
# for m in markers:
# if 'img' in m:
# cv2.imshow('id %s'%m['id'], m['img'])
# cv2.imshow('otsu %s'%m['id'], m['otsu'])
if cv2.waitKey(1) == 27:
break
detected_count += len(markers)
print(detected_count) #2900 #3042 #3021
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)
_, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow('Thresh', thresh1)
contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# draw contour on threshold image
if len(contours) > 0:
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
return contours, crop_res
# Check ConvexHull and Convexity Defects
def color_quant(input,K,output):
img = cv2.imread(input)
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
cv2.imshow('res2',res2)
cv2.waitKey(0)
cv2.imwrite(output, res2)
cv2.destroyAllWindows()
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,opening)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=opening
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
edges = cv2.Canny(img,obj.minVal,obj.maxVal)
color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
edges=color
if True:
print "zeige"
cv2.imshow(obj.Label,edges)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=edges
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
print "copy image ..."
img = obj.imageNode.ViewObject.Proxy.img.copy()
print "cpied"
print " loaded"
# print (obj.blockSize,obj.ksize,obj.k)
# edges = cv2.Canny(img,obj.minVal,obj.maxVal)
# color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
# edges=color
#
kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)
if True:
print "zeige"
cv2.imshow(obj.Label,closing)
print "gezeigt"
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
print "fertig"
self.img=closing
def findpathlist(ed,showPics=True):
''' generate list of pathes '''
pathlist=[]
w,l=ed.shape
for x in range(l):
for y in range(w):
if ed[y][x] :
path=runpath(ed,x,y)
if len(path)>4:
if showPics:
cv2.imshow('remaining points',ed)
cv2.waitKey(1)
Gui.updateGui()
pathlist.append(path)
return pathlist
def draw_images(img, undistorted, title, cmap):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
if cmap is not None:
ax2.imshow(undistorted, cmap=cmap)
else:
ax2.imshow(undistorted)
ax2.set_title(title, fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
# TODO: Write a function that takes an image, object points, and image points
# performs the camera calibration, image distortion correction and
# returns the undistorted image
def apply_motion_blur(image, kernel_size, strength = 1.0):
"""Applies motion blur on image
"""
# generating the kernel
kernel_motion_blur = np.zeros((kernel_size, kernel_size))
kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
kernel_motion_blur = kernel_motion_blur / kernel_size
rotation_kernel = np.random.uniform(0, 360)
kernel_motion_blur = rotate(kernel_motion_blur, rotation_kernel)
#cv2.imshow("kernel", cv2.resize(kernel_motion_blur, (100, 100)))
kernel_motion_blur *= strength
# applying the kernel to the input image
output = cv2.filter2D(image, -1, kernel_motion_blur)
return output
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
#Create Rectangle and Text using OpenCV
#print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score)
#Draw the Rectangle
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3)
#Draw the Text
cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA)
#Show Image
#cv2.imshow("Detect Result", im)
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
'''Generate test images as solid blocks of colour of known size, save to filename fn.'''
# Create a zero (black) image of HD size with 3 colour dimensions. Colour space assumed BGR by default.
h = 1080
w = 1920
img = np.zeros((h,w,3),dtype="uint8")
# Want to set all of the pixels to bgr tuple, default red, 8 bit colour
if not bgr:
bgr = [0,0,255]
img[:,:] = bgr
vw = ImageViewer(img)
vw.windowShow()
#cv2.imshow("zeroes", frame)
#ch = 0xff & cv2.waitKey(10000)
#cv2.destroyAllWindows()
cv2.imwrite(fn, img)
def do_warp(M, warp):
warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
# convert the warped image to grayscale and then adjust
# the intensity of the pixels to have minimum and maximum
# values of 0 and 255, respectively
warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
warp = exposure.rescale_intensity(warp, out_range = (0, 255))
# the pokemon we want to identify will be in the top-right
# corner of the warped image -- let's crop this region out
(h, w) = warp.shape
(dX, dY) = (int(w * 0.4), int(h * 0.45))
crop = warp[10:dY, w - dX:w - 10]
# save the cropped image to file
cv2.imwrite("cropped.png", crop)
# show our images
cv2.imshow("image", image)
cv2.imshow("edge", edged)
cv2.imshow("warp", imutils.resize(warp, height = 300))
cv2.imshow("crop", imutils.resize(crop, height = 300))
cv2.waitKey(0)