def rgb_callback(self,data):
try:
self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
cv2.imshow('rgb_img',self.rgb_img)
cv2.waitKey(5)
if rgb_ret == True:
rgb_tempimg = self.rgb_img.copy()
cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)
cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===rgb_camera rvec:")
print(rgb_rvec)
print("===rgb_camera rmat:")
print(self.rgb_rmat)
print("===rgb_camera tvec:")
print(self.rgb_tvec)
print("rgb_camera_shape: ")
print(self.rgb_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.rgb_rmat.T)
print("===camera tvec:")
print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))
rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
yaml.dump(data, rgb_stream)
cv2.imshow('rgb_img',rgb_tempimg)
cv2.waitKey(5)
python类WINDOW_NORMAL的实例源码
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
cameracalibrator.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def run(self):
cv2.namedWindow("display", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)
if self.extra_queue:
cv2.namedWindow("extra", cv2.WINDOW_NORMAL)
while True:
# wait for an image (could happen at the very beginning when the queue is still empty)
while len(self.queue) == 0:
time.sleep(0.1)
im = self.queue[0]
cv2.imshow("display", im)
k = cv2.waitKey(6) & 0xFF
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
elif k == ord('s'):
self.opencv_calibration_node.screendump(im)
if self.extra_queue:
if len(self.extra_queue):
extra_img = self.extra_queue[0]
cv2.imshow("extra", extra_img)
get_extrinsics.py 文件源码
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials
作者: taochenshh
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def ir_callback(self,data):
try:
self.ir_img = self.mkgray(data)
except CvBridgeError as e:
print(e)
ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
cv2.imshow('ir_img',self.ir_img)
cv2.waitKey(5)
if ir_ret == True:
ir_tempimg = self.ir_img.copy()
cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)
cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
# ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)
print("The world coordinate system's origin in camera's coordinate system:")
print("===ir_camera rvec:")
print(ir_rvec)
print("===ir_camera rmat:")
print(self.ir_rmat)
print("===ir_camera tvec:")
print(self.ir_tvec)
print("ir_camera_shape: ")
print(self.ir_img.shape)
print("The camera origin in world coordinate system:")
print("===camera rmat:")
print(self.ir_rmat.T)
print("===camera tvec:")
print(-np.dot(self.ir_rmat.T, self.ir_tvec))
depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
yaml.dump(data, depth_stream)
cv2.imshow('ir_img',ir_tempimg)
cv2.waitKey(5)
def get_start_points(image):
window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(MAZE_NAME, 900,900)
cv2.imshow(MAZE_NAME,image)
cv2.moveWindow(MAZE_NAME,100,100)
print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)")
while(True):
key = cv2.waitKey(0)
if key == ord('a'):
print("Using Default Start and End Points")
imageProcessor = ImageProcessor(image)
start_x,start_y = imageProcessor.getDefaultStart(image)
end_x, end_y = imageProcessor.getDefaultEnd(image)
print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y)))
break
elif key == ord ('s'):
print("Please select a start point")
start_x,start_y = get_user_selected_point(image)
print ("Start Point: {0}, please select an end point".format((start_x,start_y)))
end_x,end_y = get_user_selected_point(image)
print("End Pont: {0}".format((end_x,end_y)))
break
else:
print("Invalid")
continue
cv2.destroyAllWindows()
return start_x,start_y,end_x,end_y
def cvCaptureVideo():
capture = cv2.VideoCapture(0)
if capture.isOpened() is False:
raise("IO Error")
cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)
while True:
ret, image = capture.read()
if ret == False:
continue
cv2.imshow("Capture", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
# Matplot???Web????????????
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def run(self):
print("VEDIO server starts...")
self.sock.bind(self.ADDR)
self.sock.listen(1)
conn, addr = self.sock.accept()
print("remote VEDIO client success connected...")
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += conn.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += conn.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
cv2.imshow('Remote', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
def evaluate_images():
ssd = SSD()
cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
loader = coco.Loader(False)
test_batches = loader.create_batches(1, shuffle=True)
global i2name
i2name = loader.i2name
while True:
batch = test_batches.next()
imgs, anns = loader.preprocess_batch(batch)
pred_labels_f, pred_locs_f, step = ssd.sess.run([ssd.pred_labels, ssd.pred_locs, ssd.global_step],
feed_dict={ssd.imgs_ph: imgs, ssd.bn: False})
boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
draw_outputs(imgs[0], boxes_, confidences_, wait=0)
def show_webcam(address):
cam = webcam.WebcamStream(address)
cam.start_stream_threads()
ssd = SSD()
global i2name
i2name = pickle.load(open("i2name.p", "rb"))
cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
boxes_ = None
confidences_ = None
while True:
sample = cam.image
resized_img = skimage.transform.resize(sample, (image_size, image_size))
pred_labels_f, pred_locs_f = ssd.sess.run([ssd.pred_labels, ssd.pred_locs],
feed_dict={ssd.imgs_ph: [resized_img], ssd.bn: False})
boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0], boxes_, confidences_)
resize_boxes(resized_img, sample, boxes_)
draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10)
def run_main():
# First initialize the camera capture object with the cv2.VideoCapture class.
camera = cv2.VideoCapture(camera_port)
#To create a new window for display
#Disabled it for now as we can't see the image on the cap
#cv2.namedWindow(display, cv2.WINDOW_NORMAL)
# Discard the first few frame to adjust the camera light levels
for i in xrange(ramp_frames):
temp = get_image(camera)
#Now take the image and save it after every 1 second
while(1):
try:
time.sleep(1)
take_save_image(camera)
except Exception, e:
print "Exception occured \n"
print e
pass
def setupWindow():
filename = getUserSelectedImage()
imageProcessor = ImageProcessor(cv2.imread(filename,0))
colourImage = cv2.imread(filename,1)
image = imageProcessor.getThresholdedImage(False)
granularity = imageProcessor.get_granularity(image, 100)
print("Granularity: {0}".format(granularity))
start_x,start_y,end_x,end_y = get_start_points(image)
image = imageProcessor.encloseMaze(image)
mazerunner = MazeSolver.MazeSolver(image,granularity)
solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y)
if(not solution):
cv2.imshow(MAZE_NAME,image)
else:
solvedImage = draw_solution(solution, colourImage)
solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage)
solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage)
window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Solved Image", 900,900)
cv2.moveWindow("Solved Image",100,100)
cv2.imshow("Solved Image",solvedImage)
print "Press any key to exit"
cv2.waitKey(0)
cv2.destroyAllWindows
tarfile_calibration.py 文件源码
项目:camera_calibration_frontend
作者: groundmelon
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def display(win_name, img):
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow( win_name, numpy.asarray( img[:,:] ))
k=-1
while k ==-1:
k=waitkey()
cv2.destroyWindow(win_name)
if k in [27, ord('q')]:
rospy.signal_shutdown('Quit')
def cvShowImageColor(image_file):
image_bgr = cv2.imread(image_file)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image_bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
# OpenCV???????????????
def cvShowImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Matplot??????????
def run(self):
print ("VEDIO server starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("video server <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:
data += self.sock.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += self.sock.recv(81920)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
try:
cv2.imshow('Remote', frame)
except:
pass
if cv2.waitKey(1) & 0xFF == 27:
break
def run(self):
print ("VEDIO client starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("video client <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
print ("receive authend")
#self.cap = cv2.VideoCapture(0)
self.cap = cv2.VideoCapture("test.mp4")
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print ("remote VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
if self.level > 0:
frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(frame)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
print("video send ", len(zdata))
except:
break
for i in range(self.interval):
self.cap.read()
def run(self):
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
if self.showme:
cv2.namedWindow('You', cv2.WINDOW_NORMAL)
print("VEDIO client connected...")
while self.cap.isOpened():
ret, frame = self.cap.read()
if self.showme:
cv2.imshow('You', frame)
if cv2.waitKey(1) & 0xFF == 27:
self.showme = False
cv2.destroyWindow('You')
sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
data = pickle.dumps(sframe)
zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
try:
self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
except:
break
for i in range(self.interval):
self.cap.read()
def __init__(self, video_src = 0, interactive = True, video = 'vss.avi', fallback = 'synth:bg=./data/hi.jpg:noise=0.05', save_frames = False, frame_dir = './data', frame_prefix='frame-'):
cam = create_capture(video_src, fallback=fallback)
vwriter = None
if interactive:
print("q to quit")
window = 'video square search'
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
else:
vwriter = VideoWriter(video)
run = True
t = clock()
frameCounter = 0
while run:
ret, img = cam.read()
if interactive:
print("read next image")
squares = find_squares(img, 0.2)
nOfSquares = len(squares)
cv2.drawContours( img, squares, -1, (0, 255, 0), 3 )
dt = clock() - t
draw_str(img, (80, 80), 'time: %.1f ms found = %d' % (dt*1000, nOfSquares), 2.0)
if interactive:
cv2.imshow(window, img)
print('q to quit, n for next')
ch = 0xff & cv2.waitKey(100)
if ch == ord('q'):
run = False
elif ch == ord('n'):
continue
else:
vwriter.addFrame(img)
if save_frames:
fn = os.path.join(frame_dir, '%s-%04d.%s' % (frame_prefix, frameCounter, 'jpg'))
print("Saving %d frame to %s" % (frameCounter, fn))
cv2.imwrite(fn, img)
frameCounter+=1
if vwriter:
vwriter.finalise()
def process_image(self, cv_image, header, tag):
""" process the image """
objects = self.cascade.detectMultiScale(cv_image)
for obj in objects:
cv2.rectangle(
cv_image, (obj[0], obj[1]),
(obj[0] + obj[2], obj[1] + obj[3]), (0, 255, 0))
cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
cv2.resizeWindow(tag, 600, 600)
cv2.imshow(tag, cv_image)
cv2.waitKey(1)
def look_for_exit(self, image, debug=False, unwarped=True):
""" Look for an exit from our position """
for row in range(1, image.shape[0]/10):
img = image.copy()
base_row = (row * 10) + 5
image_dump = os.environ.get("ZARJ_IMAGE_DUMP")
markup = debug or (image_dump is not None)
output = self.figure_path(img, base_row, markup, True)
if output is not None and output['right'] is not None and\
output['left'] is not None:
real_distance = self.unwarp_perspective((image.shape[1]/2,
base_row))
if unwarped:
base_row = real_distance
if markup:
txt = "({0:.2f})".format(real_distance)
cv2.putText(img, txt, (0, img.shape[0]-10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
if debug:
name = "_exit_decision_"
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, 500, 250)
cv2.imshow(name, img)
cv2.waitKey(1)
if image_dump is not None:
cv2.imwrite(image_dump + '/exit_{0}.png'.format(
debug_sequence()), img)
if real_distance > 1.8:
log('Wait a second! An exit {} away is too far away'.format(
real_distance))
return None
return base_row
return None
def process_image(self, cv_image, header, tag):
""" process the image """
cv2.namedWindow("watching:"+tag, cv2.WINDOW_NORMAL)
cv2.resizeWindow("watching:"+tag, 500, 250)
cv2.imshow("watching:"+tag, cv_image)
cv2.waitKey(1)
def process_image(self, cv_image, header, tag):
""" process the image """
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# mask for color range
if self.color_range:
mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
count = cv2.countNonZero(mask)
if count:
kernel = np.ones((5, 5), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=2)
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for i, c in enumerate(contours):
x, y, w, h = cv2.boundingRect(c)
if self.prefix is not None:
name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
tag,
header.seq, i)
print name
roi = cv_image[y:y+h, x:x+w]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
cv2.imwrite(name, gray)
for c in contours:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
elif self.prefix is not None:
name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
header.seq, )
cv2.imwrite(name, cv_image)
cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
cv2.resizeWindow(tag, 600, 600)
cv2.imshow(tag, cv_image)
cv2.waitKey(1)
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
"""
Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
:param model: Learnt emotion detection model.
:param emoticons: List of emotions images.
:param window_size: Size of webcam image window.
:param window_name: Name of webcam image window.
:param update_time: Image update time interval.
"""
cv2.namedWindow(window_name, WINDOW_NORMAL)
if window_size:
width, height = window_size
cv2.resizeWindow(window_name, width, height)
vc = cv2.VideoCapture(0)
if vc.isOpened():
read_value, webcam_image = vc.read()
else:
print("webcam not found")
return
while read_value:
for normalized_face, (x, y, w, h) in find_faces(webcam_image):
prediction = model.predict(normalized_face) # do prediction
if cv2.__version__ != '3.1.0':
prediction = prediction[0]
image_to_draw = emoticons[prediction]
draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))
cv2.imshow(window_name, webcam_image)
read_value, webcam_image = vc.read()
key = cv2.waitKey(update_time)
if key == 27: # exit on ESC
break
cv2.destroyWindow(window_name)
def showImage(img, weight, height):
screen_res = weight, height
scale_width = screen_res[0] / img.shape[1]
scale_height = screen_res[1] / img.shape[0]
scale = min(scale_width, scale_height)
window_width = int(img.shape[1] * scale)
window_height = int(img.shape[0] * scale)
cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL)
cv2.resizeWindow('dst_rt', window_width, window_height)
cv2.imshow('dst_rt', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def showImage(img, weight, height):
screen_res = weight, height
scale_width = screen_res[0] / img.shape[1]
scale_height = screen_res[1] / img.shape[0]
scale = min(scale_width, scale_height)
window_width = int(img.shape[1] * scale)
window_height = int(img.shape[0] * scale)
cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL)
cv2.resizeWindow('dst_rt', window_width, window_height)
cv2.imshow('dst_rt', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
assert len(sys.argv) > 1, "Pass image file as argument."
I = cv2.imread(sys.argv[-1])
assert I is not None, "Failed to open image."
cv2.namedWindow("cat", cv2.WINDOW_NORMAL)
cv2.imshow("cat", I)
cv2.waitKey(0)
J = pyflipping.vertflip(I)
cv2.imshow("cat", J)
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
cv2.namedWindow("segment", cv2.WINDOW_NORMAL)
db = speedlib.SpeedSegmentsDatabase.get_instance()
for key in db.segments:
seg = db.segments[key]
cv2.imshow("segment", seg.get_image())
cv2.waitKey(100)
label = raw_input("Enter label [current label: '%s']: " % (str(seg.label),))
seg.label = label
db.save()
print("Finished.")
def main():
cv2.namedWindow("speed", cv2.WINDOW_NORMAL)
cv2.namedWindow("speed_bin", cv2.WINDOW_NORMAL)
def on_route_advisor_visible(game, scr):
speed_image = game.win.get_speed_image(scr)
som = speedlib.SpeedOMeter(speed_image)
speed_image_bin = som.get_postprocessed_image()
cv2.imshow("speed", speed_image)
cv2.imshow("speed_bin", speed_image_bin.astype(np.uint8)*255)
cv2.waitKey(1)
segments = som.split_to_segments()
print("Found %d segments" % (len(segments),), [s.arr.shape for s in segments])
for seg in segments:
if not seg.is_in_database():
speedlib.SpeedSegmentsDatabase.get_instance().add(seg)
print("Added new segment with key: %s" % (seg.get_key(),))
else:
print("Segment already in database.")
game = ets2game.ETS2Game()
game.on_route_advisor_visible = on_route_advisor_visible
game.run()
def __init__(self, fn):
self.img = cv2.imread(fn)
if self.img is None:
raise Exception('Failed to load image file: %s' % fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.moveWindow('img',300,200)
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
self.returnVar = self.markers.copy()
def watershed(self):
m = self.markers.copy()
cv2.watershed(self.img, m)
self.returnVar = m.copy()
overlay = self.colors[np.maximum(m, 0)]
vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3)
cv2.namedWindow('watershed', cv2.WINDOW_NORMAL)
cv2.moveWindow('watershed',780,200)
cv2.imshow('watershed', vis)