def skin_calib(self, raw_yrb):
mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin)
cv2.imshow('YRB_calib', cal_skin)
k = cv2.waitKey(5) & 0xFF
if k == ord('s'):
self.calib_switch = False
cv2.destroyWindow('YRB_calib')
ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib')
ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib')
rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib')
rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib')
bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib')
bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib')
self.mask_lower_yrb = np.array([ymin, rmin, bmin])
self.mask_upper_yrb = np.array([ymax, rmax, bmax])
# Do skin detection with some filtering
python类destroyWindow()的实例源码
def test_minicap():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
while True:
try:
h, w = d._screen.shape[:2]
img = cv2.resize(d._screen, (w/2, h/2))
cv2.imshow('preview', img)
key = cv2.waitKey(1)
if key == 100: # d for dump
filename = time.strftime('%Y%m%d%H%M%S.png')
cv2.imwrite(filename, d._screen)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def test_minicap():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
while True:
try:
h, w = d._screen.shape[:2]
img = cv2.resize(d._screen, (w/2, h/2))
cv2.imshow('preview', img)
key = cv2.waitKey(1)
if key == 100: # d for dump
filename = time.strftime('%Y%m%d%H%M%S.png')
cv2.imwrite(filename, d._screen)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def _showloop(self):
while cv2.waitKey(10) not in [keycode.ESCAPE, keycode.Q, keycode.q]:
image = self.capture.read()
image = image.transpose(Image.FLIP_LEFT_RIGHT)
image = _resize_image(image, self.size)
array = np.asarray(image)
array = _mount_roi(array, self.roi, color = (74, 20, 140), thickness = 2)
crop = _crop_array(array, self.roi)
# process image for any gestures
if self.verbose:
segments, event = spockpy.detect(crop, verbose = self.verbose)
else:
event = spockpy.detect(crop, verbose = self.verbose)
self.image = Image.fromarray(segments)
self.event = event
cv2.imshow(HoverPad.TITLE, array)
cv2.destroyWindow(HoverPad.TITLE)
def load_images(queue: PriorityQueue,
source: int,
file_path: str,
target_width: int,
target_height: int,
display_progress: bool=False):
window = 'image'
if display_progress:
cv2.namedWindow(window)
for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True):
buffer = cv2.imread(file)
buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)
random_priority = random()
queue.put((random_priority, (buffer, source)))
if display_progress:
cv2.imshow(window, buffer)
if (cv2.waitKey(33) & 0xff) == 27:
break
if display_progress:
cv2.destroyWindow(window)
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0):
self.image = image
self._filter_size = filter_size
self._threshold1 = threshold1
self._threshold2 = threshold2
def onchangeThreshold1(pos):
self._threshold1 = pos
self._render()
def onchangeThreshold2(pos):
self._threshold2 = pos
self._render()
def onchangeFilterSize(pos):
self._filter_size = pos
self._filter_size += (self._filter_size + 1) % 2
self._render()
cv2.namedWindow('edges')
cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1)
cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2)
cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize)
self._render()
print "Adjust the parameters as desired. Hit any key to close."
cv2.waitKey(0)
cv2.destroyWindow('edges')
cv2.destroyWindow('smoothed')
tarfile_calibration.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def display(win_name, img):
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow( win_name, numpy.asarray( img[:,:] ))
k=-1
while k ==-1:
k=waitkey()
cv2.destroyWindow(win_name)
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
def run(self):
print ("VEDIO client starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("video client <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
print ("receive authend")
#self.cap = cv2.VideoCapture(0)
self.cap = cv2.VideoCapture("test.mp4")
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print ("remote VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
if self.level > 0:
frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(frame)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
print("video send ", len(zdata))
except:
break
for i in range(self.interval):
self.cap.read()
def run(self):
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print("VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(sframe)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
except:
break
for i in range(self.interval):
self.cap.read()
def __init__(self, img, squares, all = True):
#w = ImageViewer(img)
square_contours = [square.contour for square in squares]
best_contours = []
best_contour = classify_monitor_contour_set(square_contours)
best_contours.append(best_contour.astype('int32'))
print('Iterate over %d contours' % len(square_contours))
if all:
cycle = True
while (cycle):
for (i, c) in enumerate(square_contours):
src = img.copy()
cv2.drawContours( src, best_contours, -1, (0,0,255),3)
cv2.drawContours( src, square_contours, i, (0, 255, 0), 1 )
print('contour %d overlaid on basic image' % i)
cv2.imshow('view', src)
time.sleep(0.2)
k = cv2.waitKey(30) & 0xFF
if k == 27:
cycle = False
else:
cycle = True
src = img.copy()
while (cycle):
cv2.drawContours( src, best_contours, -1, (0,0,255),3)
cv2.imshow('view', src)
time.sleep(0.2)
k = cv2.waitKey(30) & 0xFF
if k == 27:
cycle = False
cv2.destroyWindow('view')
def __init__(self, img, squares, all = True):
#w = ImageViewer(img)
square_contours = [square.contour for square in squares]
#pdb.set_trace()
best_contours_tuples = classify_multi_monitors_contour_set(square_contours)
best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples]
#pdb.set_trace()
#print('Iterate over %d contours' % len(square_contours))
if all:
cycle = True
while (cycle):
for (i, c) in enumerate(square_contours):
src = img.copy()
cv2.drawContours( src, square_contours, i, (0, 255, 0), 1 )
cv2.drawContours( src, best_contours, -1, (0,0,255),3)
print('contour %d overlaid on basic image' % i)
cv2.imshow('view', src)
time.sleep(0.2)
k = cv2.waitKey(30) & 0xFF
if k == 27:
cycle = False
else:
cycle = True
src = img.copy()
while (cycle):
cv2.drawContours( src, best_contours, -1, (0,0,255),3)
cv2.imshow('view', src)
time.sleep(0.2)
k = cv2.waitKey(30) & 0xFF
if k == 27:
cycle = False
cv2.destroyWindow('view')
#####################################################################################################################
# Contours and Sets of Contours : various problems in computer vision relevant to the project.
#
# All "Heuristics" functions have the same signature ; img, cnts, *args, **kwargs
#####################################################################################################################
def closeWindow(win="video"):
cv2.destroyWindow(win)
for i in range(4):
cv2.waitKey(1)
def testModel(self):
"""
This method is to test the trained classifier
read all images from testing path
use BOVHelpers.predict() function to obtain classes of each image
"""
self.testImages, self.testImageCount = self.file_helper.getFiles(self.test_path)
predictions = []
for word, imlist in self.testImages.iteritems():
print "processing " ,word
for im in imlist:
cl = self.recognize(im)
predictions.append({
'image':im,
'class':cl,
'object_name':self.name_dict[str(int(cl[0]))]
})
print predictions
for each in predictions:
# cv2.imshow(each['object_name'], each['image'])
# cv2.waitKey()
# cv2.destroyWindow(each['object_name'])
#
plt.imshow(cv2.cvtColor(each['image'], cv2.COLOR_GRAY2RGB))
plt.title(each['object_name'])
plt.show()
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def screen_simple(host, port, serial, scale=0.5):
adb = get_adb(host, port, serial)
img = adb.screenshot_cv2()
while img is None:
time.sleep(1)
img = adb.screenshot_cv2()
print 'Press Ctrl-C or Esc to quit.'
winname = 'Sync Screen'
cv2.namedWindow(winname)
while True:
try:
img = adb.screenshot_cv2()
if scale != 1.0:
h, w = img.shape[:2]
h, w = int(scale*h), int(scale*w)
img = cv2.resize(img, (w, h))
cv2.imshow(winname, img)
key = cv2.waitKey(10)
if key == 27: # Escape
break
except KeyboardInterrupt:
print 'Done'
break
except:
traceback.print_exc()
break
cv2.destroyWindow(winname)
def test_features():
from atx.drivers.android_minicap import AndroidDeviceMinicap
cv2.namedWindow("preview")
d = AndroidDeviceMinicap()
# r, h, c, w = 200, 100, 200, 100
# track_window = (c, r, w, h)
# oldimg = cv2.imread('base1.png')
# roi = oldimg[r:r+h, c:c+w]
# hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# mask = cv2.inRange(hsv_roi, 0, 255)
# roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
# cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
try:
w, h = d._screen.shape[:2]
img = cv2.resize(d._screen, (h/2, w/2))
cv2.imshow('preview', img)
hist = cv2.calcHist([img], [0], None, [256], [0,256])
plt.plot(plt.hist(hist.ravel(), 256))
plt.show()
# if img.shape == oldimg.shape:
# # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
# # x, y, w, h = track_window
# cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
# cv2.imshow('preview', img)
# # cv2.imshow('preview', img)
cv2.waitKey(1)
except KeyboardInterrupt:
break
cv2.destroyWindow('preview')
def screen_simple(host, port, serial, scale=0.5):
adb = get_adb(host, port, serial)
img = adb.screenshot_cv2()
while img is None:
time.sleep(1)
img = adb.screenshot_cv2()
print 'Press Ctrl-C or Esc to quit.'
winname = 'Sync Screen'
cv2.namedWindow(winname)
while True:
try:
img = adb.screenshot_cv2()
if scale != 1.0:
h, w = img.shape[:2]
h, w = int(scale*h), int(scale*w)
img = cv2.resize(img, (w, h))
cv2.imshow(winname, img)
key = cv2.waitKey(10)
if key == 27: # Escape
break
except KeyboardInterrupt:
print 'Done'
break
except:
traceback.print_exc()
break
cv2.destroyWindow(winname)
def filter_frame_manually(self):
display_image = self.frame
cv2.imshow("frame of video {0:s}".format(self.name), display_image)
key = cv2.waitKey(0) & 0xFF
add_corners = (key == ord('a'))
cv2.destroyWindow("frame")
return add_corners, key
def filter_frame_manually(self):
display_image = np.hstack([video.frame for video in self.videos])
cv2.imshow("frame", display_image)
key = cv2.waitKey(0) & 0xFF
add_corners = (key == ord('a'))
cv2.destroyWindow("frame")
return add_corners, key
def selectArea(self):
self.userInteraction = True
cv2.namedWindow(self.selectionWindow)
cv2.setMouseCallback(self.selectionWindow, self.mouseInteraction)
self.workingFrame = self.processedFrame.copy()
self.showFrame(self.selectionWindow, self.workingFrame)
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self.undoFrames = []
break
elif key == ord('c'):
self.workingFrame = self.processedFrame.copy()
self.trackedAreasList = []
self.undoFrames = []
self.showFrame(self.selectionWindow, self.workingFrame)
elif key == ord('l'):
try:
self.trackedAreasList.pop()
except IndexError:
pass
else:
self.workingFrame = self.undoFrames.pop()
self.showFrame(self.selectionWindow, self.workingFrame)
elif key == ord('t'):
self.undoFrames = []
self.trackArea = self.refPt
self.tracking = True
self.trackDump = []
if self.pause is True:
self.pause = False
break
elif key == ord('h'):
self.showHelp('select')
cv2.destroyWindow(self.selectionWindow)
self.userInteration = False
def toggle_debug_mode(self):
self.debug_mode = not self.debug_mode
if not self.debug_mode:
cv2.destroyWindow(DEBUG_WINDOW)
def release(self):
self.is_stopped = True
time.sleep(1)
# get rid of video stream window
if self.play_video:
cv2.destroyWindow('live')
# Release video capture
self.cap.release()
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
"""
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model.
:param emoticons: List of emotions images.
:param window_size: Size of webcam image window.
:param window_name: Name of webcam image window.
:param update_time: Image update time interval.
"""
cv2.namedWindow(window_name, WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = cv2.VideoCapture(0)
if vc.isOpened():
read_value, webcam_image = vc.read()
else:
print("webcam not found")
return
while read_value:
for normalized_face, (x, y, w, h) in find_faces(webcam_image):
prediction = model.predict(normalized_face) # do prediction
if cv2.__version__ != '3.1.0':
prediction = prediction[0]
image_to_draw = emoticons[prediction]
draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
cv2.imshow(window_name, webcam_image)
read_value, webcam_image = vc.read()
key = cv2.waitKey(update_time)
if key == 27: # exit on ESC
break
cv2.destroyWindow(window_name)
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, 0, 180, nothing)
cv2.createTrackbar('Sat', name, 0, 255, nothing)
cv2.createTrackbar('Val', name, 0, 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, 0, 180, nothing)
cv2.createTrackbar('Sat', name, 0, 255, nothing)
cv2.createTrackbar('Val', name, 0, 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1], 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2], 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, 0, 180, nothing)
cv2.createTrackbar('Sat', name, 0, 255, nothing)
cv2.createTrackbar('Val', name, 0, 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range