def image_callback(self, msg):
image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_yellow = numpy.array([18, 120, 200])
upper_yellow = numpy.array([28, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
h, w, d = image.shape
search_top = 3*h/4
search_bot = 3*h/4 + 20
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
# BEGIN CONTROL
err = cx - w/2
self.twist.linear.x = 0.2
self.twist.angular.z = -float(err) / 100
self.cmd_vel_pub.publish(self.twist)
# END CONTROL
cv2.imshow("window", image)
cv2.waitKey(3)
python类circle()的实例源码
def draw_termites(self):
"""Draw termites on simulation.
Args:
None.
Returns:
None.
"""
for termite in self.termites:
cv2.circle(self.background, termite.trail[self.current_step],
self.params['termite_radius'], termite.color, 2)
cv2.circle(self.background, termite.trail[self.current_step],
2, termite.color, -1)
cv2.putText(self.background, termite.number, (termite.trail[self.current_step][0] - 4,
termite.trail[self.current_step][1] - self.params['termite_radius'] - 5), 2,
color=termite.color, fontScale=0.4)
cv2.circle(self.video_source.current_frame, termite.trail[self.current_step],
self.params['termite_radius'], termite.color, 2)
cv2.circle(self.video_source.current_frame, termite.trail[self.current_step],
2, termite.color, -1)
cv2.putText(self.video_source.current_frame, termite.number, (termite.trail[self.current_step][0] - 4,
termite.trail[self.current_step][1] - self.params['termite_radius'] - 5), 2,
color=termite.color, fontScale=0.4)
def draw_joints_15(test_image, joints, save_image):
image = cv2.imread(test_image)
# bounding box
bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])]
# draw bounding box in red rectangle
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
# draw joints in green spots
for j in xrange(len(joints)):
cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 1)
# draw torso in yellow lines
torso = [[0, 1], [0, 14], [5, 10]]
for item in torso:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 1)
# draw left part in pink lines
lpart = [[14, 13], [13, 12], [12, 11], [13, 10], [10, 9], [9, 8]]
for item in lpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 1)
# draw right part in blue lines
rpart = [[1, 2], [2, 3], [3, 4], [2, 5], [5, 6], [6, 7]]
for item in rpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 1)
cv2.imwrite(save_image, image)
def draw_circle(img, radius, x, y):
"""
Render circle
:param img : Canvas
:param radius : Radius of the circle
:param x : (x, y) is the center of the circle graph on the canvas
:param y : (x, y) is the center of the circle graph on the canvas
"""
############################################################
# Write your code here! #
############################################################
cv2.circle(img, (x, y), radius, (20, 215, 20), radius)
############################################################
# End #
############################################################
def transparent_circle(img,center,radius,color,thickness):
center = tuple(map(int,center))
rgb = [255*c for c in color[:3]] # convert to 0-255 scale for OpenCV
alpha = color[-1]
radius = int(radius)
if thickness > 0:
pad = radius + 2 + thickness
else:
pad = radius + 3
roi = slice(center[1]-pad,center[1]+pad),slice(center[0]-pad,center[0]+pad)
try:
overlay = img[roi].copy()
cv2.circle(img,center,radius,rgb, thickness=thickness, lineType=cv2.LINE_AA)
opacity = alpha
cv2.addWeighted(src1=img[roi], alpha=opacity, src2=overlay, beta=1. - opacity, gamma=0, dst=img[roi])
except:
logger.debug("transparent_circle would have been partially outside of img. Did not draw it.")
def brush_circle(event, x, y, flags, param):
global ix, iy, drawing, mode, r,g,b,radius
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True # start to draw when L button down
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True and mode == True:
cv2.circle(img, (x,y), radius, (b, g, r), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False # end drawing when L button up
if mode == True:
cv2.circle(img, (x,y), radius, (b, g, r), -1)
# Create a black image, a window
def draw_flow(img, flow, step=8):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
#####################################################################
# define video capture object
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
def draw_circle(event,x,y,flags,param):
global drawing,drawing1
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
if event == cv2.EVENT_RBUTTONDOWN:
drawing1 = True
if event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.circle(img,(x,y),5,(0,0,255),-1)
if drawing1 == True:
cv2.circle(img,(x,y),5,(0,255,0),-1)
if event == cv2.EVENT_LBUTTONUP:
drawing = False
if event == cv2.EVENT_RBUTTONUP:
drawing1 = False
#print (drawing)
def find_center(self, name, frame, mask, min_radius):
if name not in self.pts:
self.pts[name] = deque(maxlen=self.params['tracking']['buffer_size'])
# find contours in the mask and initialize the current (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
center = (int(x), int(y))
# only proceed if the radius meets a minimum size
if radius > min_radius:
# draw the circle and centroid on the frame, then update the list of tracked points
cv2.circle(frame, center, int(radius), (0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
self.pts[name].appendleft(center)
smooth_points = 8
return (int(np.mean([self.pts[name][i][0] for i in range(min(smooth_points, len(self.pts[name])))])),
int(np.mean([self.pts[name][i][1] for i in range(min(smooth_points, len(self.pts[name])))]))), radius
return None, None
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
HandRecognition.py 文件源码
项目:hand-gesture-recognition-opencv
作者: mahaveerverma
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def mark_hand_center(frame_in,cont):
max_d=0
pt=(0,0)
x,y,w,h = cv2.boundingRect(cont)
for ind_y in xrange(int(y+0.3*h),int(y+0.8*h)): #around 0.25 to 0.6 region of height (Faster calculation with ok results)
for ind_x in xrange(int(x+0.3*w),int(x+0.6*w)): #around 0.3 to 0.6 region of width (Faster calculation with ok results)
dist= cv2.pointPolygonTest(cont,(ind_x,ind_y),True)
if(dist>max_d):
max_d=dist
pt=(ind_x,ind_y)
if(max_d>radius_thresh*frame_in.shape[1]):
thresh_score=True
cv2.circle(frame_in,pt,int(max_d),(255,0,0),2)
else:
thresh_score=False
return frame_in,pt,max_d,thresh_score
# 6. Find and display gesture
def draw_joints(test_image, joints, save_image):
image = cv2.imread(test_image)
joints = np.vstack((joints, (joints[8, :] + joints[11, :])/2))
# bounding box
bbox = [min(joints[:, 0]), min(joints[:, 1]), max(joints[:, 0]), max(joints[:, 1])]
# draw bounding box in red rectangle
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
# draw joints in green spots
for j in xrange(len(joints)):
cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2)
# draw torso in yellow lines
torso = [[0, 1], [1, 14], [2, 14], [5, 14]]
for item in torso:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 255, 255), 2)
# draw left part in pink lines
lpart = [[1, 5], [5, 6], [6, 7], [5, 14], [14, 11], [11, 12], [12, 13]]
for item in lpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 255), 2)
# draw right part in blue lines
rpart = [[1, 2], [2, 3], [3, 4], [2, 14], [14, 8], [8, 9], [9, 10]]
for item in rpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2)
cv2.imwrite(save_image, image)
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def plot_points(image, points, color=(0, 0, 255)):
"""Draw circles at given locations on image.
Arguments:
image -- Image to draw on.
points -- x,y pairs of points to plot.
"""
# find best radius for image
image_width = image.shape[1]
radius = int(image_width / 400)
# draw
for point in points:
point = tuple(map(int, point))
cv2.circle(image, point, color=color, radius=radius,
thickness=radius/2)
def _get_mask(self, scan, slide, series):
img, s, o, origShape = scan
mask = np.zeros((origShape[1], origShape[2]))
nodules = self._nodule_info[series]
for nodule in nodules:
iid, z, edges = nodule
z = int((z - o[2])/s[2])
if z == slide:
if edges.shape[0] > 1:
cv.fillPoly(mask, [edges], 255)
else:
#It's a small nodule. Make a circle of radius 3mm
edges = np.squeeze(edges)
center = tuple(edges)
radius = max(3.0/s[0], 3.0/s[1])
cv.circle(mask, center, int(radius+1), 255, -1)
if img.shape[1] != origShape[1] or img.shape[2] != origShape[2]:
mask = imu.resize_2d(mask, (img.shape[1], img.shape[2]))
return mask
def draw_circle( event, x,y,flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle( img, (ix, iy), (x,y),(0,255,0), 1) # -1 for last argument like CV_FILLED
else:
cv2.circle( img, (x,y), 5, (0,0,255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle( img, (ix, iy), (x,y), (0,255,0), 1)
else:
cv2.circle(img, (x,y), 5, (0,0,255),-1)
def execute_BlobDetector(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im=255-im
im2 = img
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = obj.Area
params.filterByConvexity = True
params.minConvexity = obj.Convexity/200
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
if not obj.showBlobs:
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
obj.Proxy.img = im_with_keypoints
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
# cv2.circle(im,(x,y),4,0,5)
cv2.circle(im,(x,y),4,255,5)
cv2.circle(im,(x,y),4,0,5)
im[y,x]=255
im[y,x]=0
obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
else:
for k in keypoints:
(x,y)=k.pt
x=int(round(x))
y=int(round(y))
cv2.circle(im2,(x,y),4,(255,0,0),5)
cv2.circle(im2,(x,y),4,(0,0,0),5)
im2[y,x]=(255,0,0)
im2[y,x]=(0,0,0)
obj.Proxy.img = im2
def execute_GoodFeaturesToTrack(proxy,obj):
'''
https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
'''
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,obj.maxCorners,obj.qualityLevel,obj.minDistance)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
obj.Proxy.img = img
def detectmarker(image):
grayscale = getgrayimage(image)
mkradius = getapproxmarkerradius(grayscale) # approximate marker radius
marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker
#template matching
matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32
#detect 4 greatest values
markerposarray = []
for i in range(4):
(minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched)
markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc)))
cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc
return markerposarray
def init_mask(self, event, x, y, flags, param):
self._thickness = 3 # The thickness in drawing;
self._WHITE = [255, 255, 255] # Pure white;
# Draw a point on the image;
if event == cv2.EVENT_RBUTTONDOWN:
if self._drawing == True:
cv2.circle(self.img, (x, y), self._thickness, self._WHITE, -1)
self.mask[y-self._thickness:y+self._thickness, x-self._thickness:x+self._thickness] = self._SHADOW
self._shadow_seed = self.img[y-self._thickness:y+self._thickness, x-self._thickness:x+self._thickness].copy()
elif event == cv2.EVENT_RBUTTONUP:
if self._drawing == True:
self._drawing = False
self._drawn = True
cv2.circle(self.img, (x, y), self._thickness, self._WHITE, -1)
def mark_point(img, x, y):
"""
Mark a point
Args:
- img(numpy): the source image
- x, y(int): position
"""
overlay = img.copy()
output = img.copy()
alpha = 0.5
radius = max(5, min(img.shape[:2])//15)
center = int(x), int(y)
color = (0, 0, 255)
cv2.circle(overlay, center, radius, color, -1)
cv2.addWeighted(overlay, alpha, output, 1-alpha, 0, output)
return output
def do_draw(self, event, x, y, flags, param):
draw_vals = {1: 100, 2: 0}
if event == cv2.EVENT_LBUTTONUP or event == cv2.EVENT_RBUTTONUP:
self.drawing = 0
elif event == cv2.EVENT_LBUTTONDOWN:
self.drawing = 1
elif event == cv2.EVENT_RBUTTONDOWN:
self.drawing = 2
elif self.drawing != 0:
cv2.circle(self.img, (x, y), 5, draw_vals[self.drawing], -1)
Modules.py 文件源码
项目:apparent-age-gender-classification
作者: danielyou0230
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def facial_landmark_detection(image, detector, predictor, file):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_size = gray.shape
landmark_faces = detector(gray, 1)
faces = list()
area = 0
face_idx = 0
bItr = False
for (idx, landmark_faces) in enumerate(landmark_faces):
shape = predictor(gray, landmark_faces)
shape = shape_to_np(shape)
(x, y, w, h) = rect_to_bb(landmark_faces, img_size, file)
if (w * h) > area:
area = w * h
faces = [x, y, w, h]
bItr = True
#cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
#cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
#for (x, y) in shape:
# cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
return bItr, faces
def side_intersect(self, image, contours, row, markup=True):
""" Find intersections to both sides along a row """
if markup:
cv2.line(image, (0, row), (image.shape[1], row), (0, 0, 255), 1)
cnt_l, col_l = self.find_intersect(image, contours, row, -1)
if markup and cnt_l is not None:
cv2.drawContours(image, [contours[cnt_l]], -1, (0, 255, 255), -1)
cv2.circle(image, (col_l, row), 4, (0, 255, 0), 2)
cnt_r, col_r = self.find_intersect(image, contours, row, 1)
if markup and cnt_r is not None:
cv2.drawContours(image, [contours[cnt_r]], -1, (255, 255, 0), -1)
cv2.circle(image, (col_r, row), 4, (0, 255, 0), 2)
return (cnt_l, col_l), (cnt_r, col_r)
def draw_humans(img, human_list):
img_copied = np.copy(img)
image_h, image_w = img_copied.shape[:2]
centers = {}
for human in human_list:
part_idxs = human.keys()
# draw point
for i in range(CocoPart.Background.value):
if i not in part_idxs:
continue
part_coord = human[i][1]
center = (int(part_coord[0] * image_w + 0.5), int(part_coord[1] * image_h + 0.5))
centers[i] = center
cv2.circle(img_copied, center, 3, CocoColors[i], thickness=3, lineType=8, shift=0)
# draw line
for pair_order, pair in enumerate(CocoPairsRender):
if pair[0] not in part_idxs or pair[1] not in part_idxs:
continue
img_copied = cv2.line(img_copied, centers[pair[0]], centers[pair[1]], CocoColors[pair_order], 3)
return img_copied
def _show(self, path, inpmat, heatmat, pafmat, humans):
image = cv2.imread(path)
# CocoPoseLMDB.display_image(inpmat, heatmat, pafmat)
image_h, image_w = image.shape[:2]
heat_h, heat_w = heatmat.shape[:2]
for _, human in humans.items():
for part in human:
if part['partIdx'] not in common.CocoPairsRender:
continue
center1 = (int((part['c1'][0] + 0.5) * image_w / heat_w), int((part['c1'][1] + 0.5) * image_h / heat_h))
center2 = (int((part['c2'][0] + 0.5) * image_w / heat_w), int((part['c2'][1] + 0.5) * image_h / heat_h))
cv2.circle(image, center1, 2, (255, 0, 0), thickness=3, lineType=8, shift=0)
cv2.circle(image, center2, 2, (255, 0, 0), thickness=3, lineType=8, shift=0)
cv2.putText(image, str(part['partIdx'][1]), center2, cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0), 1)
image = cv2.line(image, center1, center2, (255, 0, 0), 1)
cv2.imshow('result', image)
cv2.waitKey(0)
def draw(event, x, y, flags, param):
global drawing, ix, iy, shape, canvas, brush
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if shape == 1:
cv2.circle(canvas, (x, y), pencil, color, -1)
elif shape == 2:
cv2.circle(canvas, (x, y), brush, color, -1)
elif shape == 3:
cv2.circle(canvas, (x, y), eraser, (255, 255, 255), -1)
elif shape == 5:
cv2.rectangle(canvas, (ix, iy), (x, y), color, -1)
elif shape == 6:
cv2.circle(canvas, (x, y), calc_radius(x, y), color, -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if shape == 1:
cv2.circle(canvas, (x, y), pencil, color, -1)
elif shape == 2:
cv2.circle(canvas, (x, y), brush, color, -1)
elif shape == 3:
cv2.circle(canvas, (x, y), eraser, (255, 255, 255), -1)
elif shape == 4:
cv2.line(canvas, (ix, iy), (x, y), color, pencil)
elif shape == 5:
cv2.rectangle(canvas, (ix, iy), (x, y), color, -1)
elif shape == 6:
cv2.circle(canvas, (x, y), calc_radius(x, y), color, -1)
def saveAnnotatedSample(self, path):
skel2 = self.crop2D()
skel2 = skel2.reshape(-1, 3)
for i, pt in enumerate(skel2):
skel2[i] = Camera.to2D(pt)
print 'current camera option={}'.format(Camera.focal_x)
skel = self.skel
skel.shape = (-1,3)
dm = self.norm_dm.copy()
dm[dm == Camera.far_point] = 0
ax = fig.add_subplot(121)
img = dm.copy()
img = img - img.min()
img *= 255/img.max()
for pt in skel2:
cv2.circle(img, (pt[0], pt[1]), 2, (255,0,0), -1)
cv2.imwrite(path, img)
def vis_pose(normed_vec):
import depth
origin_pt = np.array([0,0,depth.DepthMap.invariant_depth])
vec = normed_vec.copy()*50.0
vec.shape = (-1,3)
offset_x = Camera.center_x - depth.DepthMap.size2[0]/2
offset_y = Camera.center_y - depth.DepthMap.size2[1]/2
img = np.ones((depth.DepthMap.size2[0], depth.DepthMap.size2[1]))*255
img = cv2.cvtColor(img.astype('uint8'), cv2.COLOR_GRAY2BGR)
for idx, pt3 in enumerate(vec):
pt = Camera.to2D(pt3+origin_pt)
pt = (pt[0]-offset_x, pt[1]-offset_y)
cv2.circle(img, (int(pt[0]), int(pt[1])),2, (255,0,0), -1)
return img