def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
#returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
python类fillPoly()的实例源码
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
#returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked = cv2.bitwise_and(img, mask)
return masked
def load_contour(contour, img_path):
filename = "IM-%s-%04d.dcm" % (SAX_SERIES[contour.case], contour.img_no)
full_path = os.path.join(img_path, contour.case, filename)
f = dicom.read_file(full_path)
img = f.pixel_array.astype(np.int)
ctrs = np.loadtxt(contour.ctr_path, delimiter=" ").astype(np.int)
label = np.zeros_like(img, dtype="uint8")
cv2.fillPoly(label, [ctrs], 1)
return img, label
def maskit(fname):
m = cv.imread(fname)
cv.fillPoly(m, [np.array(poly)], BACK_PIX)
nfname = 'masked-' + fname
cv.imwrite(nfname, m)
faceWarp.py 文件源码
项目:DelaunayVisualization-FacialWarp
作者: sneha-belkhale
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def drawColoredTriangles(img, triangleList, disp):
#sort the triangle list by distance from the top left corner in order to get a gradient effect when drawing triangles
triangleList=sorted(triangleList, cmp=triDistanceSort)
h, w, c = img.shape
#get bounding rectangle points of image
r = (0, 0, w, h)
#iterate through and draw all triangles in the list
for idx, t in enumerate(triangleList):
#grab individual vertex points
pt1 = [t[0], t[1]]
pt2 = [t[2], t[3]]
pt3 = [t[4], t[5]]
#select a position for displaying the enumerated triangle value
pos = (t[2], t[3])
#create the triangle
triangle = np.array([pt1, pt2, pt3], np.int32)
#select a color in HSV!! (manipulate idx for cool color gradients)
color = np.uint8([[[idx, 100, 200]]])
#color = np.uint8([[[0, 0, idx]]])
#convert color to BGR
bgr_color = cv2.cvtColor(color, cv2.COLOR_HSV2BGR)
color = (int(bgr_color[(0, 0, 0)]), int(bgr_color[(0, 0, 1)]), int(bgr_color[(0, 0, 2)]))
#draw the triangle if it is within the image bounds
if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):
cv2.fillPoly(img, [triangle], color)
# if display triangle number was selected, display the number.. this helps with triangle manipulation later
if(disp==1):
cv2.putText(img, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale=0.3, color=(0, 0, 0))
######################################## example script ########################################
def define_roi(self, image, above=0.0, below=0.0, side=0.0):
'''
Bildbereiche welche nicht von Interesse sind werden geschwärzt.
Parameter
---------
image : das zu maskierende Bild
above (optional) : Float
Angabe in Prozent, wie viel vom oberen Bild geschwärzt werden soll.
Default Wert ist 0.0
>> 1.0 entspricht dabei 100%
below (optional) : Float
Angabe in Prozent, wie viel vom unteren Bild geschwärzt werden soll.
Default Wert ist 0.0
>> 1.0 entspricht dabei 100%
side (optional) : Float
Angabe in Prozent, wie viel von den Seiten des Bildes geschwärzt werden soll.
Dabei werden die Seiten nicht senkrecht nach unten maskiert, sondern trapezförmig
zum oberen maskierten Bildrand (above).
Default Wert ist 0.0
>> 1.0 entspricht dabei 100%
Rückgabe
---------
image : maskiertes Bild
'''
height, width, channels = image.shape
color_black = (0, 0, 0)
# maskiert untere Bildhäfte
image[height - int((height*below)):height, :] = color_black
# definiere Punkte für Polygon und maskiert die obere und seitliche Bildhälfte
pts = np.array([[0, 0], [0, int(height*(above+0.15))], [int(width*side), int(height*above)], [width-int(width*side), int(height*above)], [width, int(height*(above+0.15))], [width, 0]], np.int32)
cv2.fillPoly(image, [pts], color_black)
return image
def generate_rbox(im_size, polys, tags):
h, w = im_size
poly_mask = np.zeros((h, w), dtype=np.uint8)
score_map = np.zeros((h, w), dtype=np.uint8)
geo_map = np.zeros((h, w, 8), dtype=np.float32)
# mask used during traning, to ignore some hard areas
training_mask = np.ones((h, w), dtype=np.uint8)
for poly_idx, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
r = [None, None, None, None]
for i in range(4):
r[i] = min(np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
np.linalg.norm(poly[i] - poly[(i - 1) % 4]))
# score map
# shrinked_poly = shrink_poly(poly.copy(), r).astype(np.int32)[np.newaxis, :, :]
# close shrink function
shrinked_poly = poly.astype(np.int32)[np.newaxis, :,:]
cv2.fillPoly(score_map, shrinked_poly, 1)
cv2.fillPoly(poly_mask, shrinked_poly, poly_idx + 1)
# if the poly is too small, then ignore it during training
poly_h = min(np.linalg.norm(poly[0] - poly[3]), np.linalg.norm(poly[1] - poly[2]))
poly_w = min(np.linalg.norm(poly[0] - poly[1]), np.linalg.norm(poly[2] - poly[3]))
if min(poly_h, poly_w) < FLAGS.min_text_size:
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
if tag:
cv2.fillPoly(training_mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
xy_in_poly = np.argwhere(poly_mask == (poly_idx + 1))
for y, x in xy_in_poly:
point = np.array([x, y], dtype=np.int32)
# left
geo_map[y, x, 0] = valid_link(point, score_map, w, h,'left')
# left_down
geo_map[y, x, 1] = valid_link(point, score_map, w, h, 'left_down')
# left_up
geo_map[y, x, 2] = valid_link(point, score_map, w, h, 'left_up')
# right
geo_map[y, x, 3] = valid_link(point, score_map, w, h,'right')
# right_down
geo_map[y, x, 4] = valid_link(point, score_map, w, h,'right_down')
# right_up
geo_map[y, x, 5] = valid_link(point, score_map, w, h, 'right_up')
# up
geo_map[y, x, 6] = valid_link(point, score_map, w, h, 'up')
# down
geo_map[y, x, 7] = valid_link(point, score_map, w, h, 'down')
return score_map, geo_map, training_mask
def project_on_road(self, image_input):
image = image_input[self.remove_pixels:, :]
image = self.trans_per(image)
self.im_shape = image.shape
self.get_fit(image)
if self.detected_first & self.detected:
# create fill image
temp_filler = np.zeros((self.remove_pixels,self.im_shape[1])).astype(np.uint8)
filler = np.dstack((temp_filler,temp_filler,temp_filler))
# create an image to draw the lines on
warp_zero = np.zeros_like(image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, image_input.shape[0]-1, image_input.shape[0] )
left_fitx = self.best_fit_l[0]*ploty**2 + self.best_fit_l[1]*ploty + self.best_fit_l[2]
right_fitx = self.best_fit_r[0]*ploty**2 + self.best_fit_r[1]*ploty + self.best_fit_r[2]
# recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, self.Minv, color_warp.shape[-2:None:-1])
left_right = cv2.warpPerspective(self.left_right, self.Minv, color_warp.shape[-2:None:-1])
# combine the result with the original image
left_right_fill = np.vstack((filler,left_right))
result = cv2.addWeighted(left_right_fill,1, image_input, 1, 0)
result = cv2.addWeighted(result, 1, np.vstack((filler,newwarp)), 0.3, 0)
# get curvature and offset
self.calculate_curvature_offset()
# plot text on resulting image
img_text = "radius of curvature: " + str(round((self.left_curverad + self.right_curverad)/2,2)) + ' (m)'
if self.offset< 0:
img_text2 = "vehicle is: " + str(round(np.abs(self.offset),2)) + ' (m) left of center'
else:
img_text2 = "vehicle is: " + str(round(np.abs(self.offset),2)) + ' (m) right of center'
result2 = cv2.resize(result, (0,0), fx=self.enlarge, fy=self.enlarge)
cv2.putText(result2,img_text, (15,15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,255),1)
cv2.putText(result2,img_text2,(15,40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,255),1)
return result2
# if lanes were not detected output source image
else:
return cv2.resize(image_input,(0,0), fx=self.enlarge, fy=self.enlarge)