def boundingRects(scale, contours):
for contour in contours:
epsilon = 0.1 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
x, y, w, h = cv2.boundingRect(approx)
yield [x * scale, y * scale, w * scale, h * scale]
python类approxPolyDP()的实例源码
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=5, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.1, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def get_contours(image, polydb=0.03, contour_range=7, show=False):
# find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
# if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
# loop over the contours
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True) #finds the Contour Perimeter
approx = cv2.approxPolyDP(c, polydb * peri, True)
# if our approximated contour has four points, then we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None: raise EdgeNotFound()
# sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
if not cv2.isContourConvex(screenCnt):
screenCnt = cv2.convexHull(screenCnt)
x,y,w,h = cv2.boundingRect(screenCnt)
screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
if show: #this is for debugging puposes
new_image = image.copy()
cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
cv2.imshow("Contour1 image", new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return screenCnt
def countVertices(contour):
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
return len(approx)
#Classifies a contour as a generic polygon shape
def countVertices(contour):
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
return len(approx)
#Classifies a contour as a generic polygon shape
digital_display_ocr.py 文件源码
项目:digital-display-character-rec
作者: upupnaway
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def find_display_contour(edge_img_arr):
display_contour = None
edge_copy = edge_img_arr.copy()
contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
top_cntrs = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
for cntr in top_cntrs:
peri = cv2.arcLength(cntr,True)
approx = cv2.approxPolyDP(cntr, 0.02 * peri, True)
if len(approx) == 4:
display_contour = approx
break
return display_contour
def FindField(self):
#Feld: Hue zwischen 60 und 100
LowerGreen = np.array([40,0,0])
UpperGreen = np.array([90,255,150])
mask = cv2.inRange(self.ImgHSV,LowerGreen,UpperGreen)
# plt.figure()
# plt.imshow(mask,cmap='gray')
mask = self.SmoothFieldMask(mask)
# plt.figure()
# plt.imshow(mask.copy(),cmap='gray')
im2, contours, hierarchy = cv2.findContours(mask.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if(len(contours) <= 0):
return
contours_sorted = sorted(contours, key = cv2.contourArea, reverse=True)[:10]
peri = cv2.arcLength(contours_sorted[0],True)
approx = cv2.approxPolyDP(contours_sorted[0], 0.02*peri, True)
if(len(approx) >-1):#== 4):
self.FieldContours = approx
cv2.rectangle(mask,(((self.FieldContours[0])[0])[0],((self.FieldContours[0])[0])[1]),(((self.FieldContours[2])[0])[0],((self.FieldContours[2])[0])[1]),(128,128,128),3)
# plt.imshow(mask, cmap="gray")
# plt.show()
def Quadrify(contour):
epsilon = 10
for i in range(1,10):
quad = cv2.approxPolyDP(contour, epsilon, True)
length = len(quad)
randomVar = np.random.random()
epsilon = np.multiply(epsilon, np.true_divide(np.add(length, randomVar), np.add(4, randomVar)))
# print epsilon, length
if length == 4:
return np.multiply(i, 0.01)
return 1
def detect_cnt_again(poly, base_img):
"""
???????????????????
:param poly: ndarray
:param base_img: ndarray
:return: ndarray
"""
# ?????????????????flag
flag = False
# ?????????????????????????
top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
img = get_init_process_img(roi_img)
# ?????????
cnt = get_max_area_cnt(img)
# ?????????????????????
if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
flag = True
poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
if not poly.shape[0] == 4:
raise PolyNodeCountError
# ?????????????????
base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
base_nodes = np.float32([[0, 0],
[base_img.shape[1], 0],
[0, base_img.shape[0]],
[base_img.shape[1], base_img.shape[0]]])
transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)
if flag:
img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
else:
img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
return img_warp
def get_polygonized_contours(self, distance):
self.polygonized_contours = [cv2.approxPolyDP(cnt, distance, True) for cnt in self.simple_contours]
# return ContoursFilter(self.polygonized_contours).get_filtered_contour()
return self.polygonized_contours
def __init__(self,g_pool):
super().__init__(g_pool)
self.menu=None
logger.error("This will be implemented as part of gaze mapper soon.")
self.alive= False
return
width,height = self.g_pool.capture.frame_size
if g_pool.app == 'capture':
cal_pt_path = os.path.join(g_pool.user_dir,"user_calibration_data")
else:
cal_pt_path = os.path.join(g_pool.rec_dir,"user_calibration_data")
try:
user_calibration_data = load_object(cal_pt_path)
except:
logger.warning("Please calibrate first")
self.close()
return
if self.g_pool.binocular:
fn_input_eye1 = cal_pt_cloud[:,2:4].transpose()
cal_pt_cloud[:,0:2] = np.array(map_fn(fn_input_eye0, fn_input_eye1)).transpose()
cal_pt_cloud[:,2:4] = cal_pt_cloud[:,4:6]
else:
fn_input = cal_pt_cloud[:,0:2].transpose()
cal_pt_cloud[:,0:2] = np.array(map_fn(fn_input)).transpose()
ref_pts = cal_pt_cloud[inlier_map][:,np.newaxis,2:4]
ref_pts = np.array(ref_pts,dtype=np.float32)
logger.debug("calibration ref_pts %s"%ref_pts)
if len(ref_pts)== 0:
logger.warning("Calibration is bad. Please re-calibrate")
self.close()
return
self.calib_bounds = cv2.convexHull(ref_pts)
# create a list [[px1,py1],[wx1,wy1],[px2,py2],[wx2,wy2]...] of outliers and inliers for gl_lines
self.outliers = np.concatenate((cal_pt_cloud[~inlier_map][:,0:2],cal_pt_cloud[~inlier_map][:,2:4])).reshape(-1,2)
self.inliers = np.concatenate((cal_pt_cloud[inlier_map][:,0:2],cal_pt_cloud[inlier_map][:,2:4]),axis=1).reshape(-1,2)
self.inlier_ratio = cal_pt_cloud[inlier_map].shape[0]/float(cal_pt_cloud.shape[0])
self.inlier_count = cal_pt_cloud[inlier_map].shape[0]
# hull = cv2.approxPolyDP(self.calib_bounds, 0.001,closed=True)
full_screen_area = 1.
logger.debug("calibration bounds %s"%self.calib_bounds)
self.calib_area_ratio = cv2.contourArea(self.calib_bounds)/full_screen_area
def build_correspondance(self, visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence):
"""
- use all visible markers
- fit a convex quadrangle around it
- use quadrangle verts to establish perpective transform
- map all markers into surface space
- build up list of found markers and their uv coords
"""
all_verts = [m['verts'] for m in visible_markers if m['perimeter']>=min_marker_perimeter]
if not all_verts:
return
all_verts = np.array(all_verts,dtype=np.float32)
all_verts.shape = (-1,1,2) # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
# all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
all_verts_undistorted_normalized = cv2.undistortPoints(all_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs']*self.use_distortion)
hull = cv2.convexHull(all_verts_undistorted_normalized,clockwise=False)
#simplify until we have excatly 4 verts
if hull.shape[0]>4:
new_hull = cv2.approxPolyDP(hull,epsilon=1,closed=True)
if new_hull.shape[0]>=4:
hull = new_hull
if hull.shape[0]>4:
curvature = abs(GetAnglesPolyline(hull,closed=True))
most_acute_4_threshold = sorted(curvature)[3]
hull = hull[curvature<=most_acute_4_threshold]
# all_verts_undistorted_normalized space is flipped in y.
# we need to change the order of the hull vertecies
hull = hull[[1,0,3,2],:,:]
# now we need to roll the hull verts until we have the right orientation:
# all_verts_undistorted_normalized space has its origin at the image center.
# adding 1 to the coordinates puts the origin at the top left.
distance_to_top_left = np.sqrt((hull[:,:,0]+1)**2+(hull[:,:,1]+1)**2)
bot_left_idx = np.argmin(distance_to_top_left)+1
hull = np.roll(hull,-bot_left_idx,axis=0)
#based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
m_from_undistored_norm_space = m_verts_from_screen(hull)
self.detected = True
# map the markers vertices into the surface space (one can think of these as texture coordinates u,v)
marker_uv_coords = cv2.perspectiveTransform(all_verts_undistorted_normalized,m_from_undistored_norm_space)
marker_uv_coords.shape = (-1,4,1,2) #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]
# build up a dict of discovered markers. Each with a history of uv coordinates
for m,uv in zip (visible_markers,marker_uv_coords):
try:
self.markers[m['id']].add_uv_coords(uv)
except KeyError:
self.markers[m['id']] = Support_Marker(m['id'])
self.markers[m['id']].add_uv_coords(uv)
#average collection of uv correspondences accros detected markers
self.build_up_status = sum([len(m.collected_uv_coords) for m in self.markers.values()])/float(len(self.markers))
if self.build_up_status >= self.required_build_up:
self.finalize_correnspondance()
submission.py 文件源码
项目:Dstl-Satellite-Imagery-Feature-Detection
作者: DeepVoltaire
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def mask_to_polygons(mask, epsilon=1, min_area=1.):
"""
Create a Multipolygon from a mask of 0-1 pixels.
"""
# find contours of mask of pixels
image, contours, hierarchy = cv2.findContours(
((mask == 1) * 255).astype(np.uint8),
cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
# create approximate contours to have reasonable submission size
approx_contours = [cv2.approxPolyDP(cnt, epsilon, True)
for cnt in contours]
if not contours:
return MultiPolygon()
# now messy stuff to associate parent and child contours
cnt_children = defaultdict(list)
child_contours = set()
assert hierarchy.shape[0] == 1
# http://docs.opencv.org/3.1.0/d9/d8b/tutorial_py_contours_hierarchy.html
for idx, (_, _, _, parent_idx) in enumerate(hierarchy[0]):
if parent_idx != -1:
child_contours.add(idx)
cnt_children[parent_idx].append(approx_contours[idx])
# create actual polygons filtering by area (removes artifacts)
all_polygons = []
for idx, cnt in enumerate(approx_contours):
if idx not in child_contours and cv2.contourArea(cnt) >= min_area:
assert cnt.shape[1] == 1
poly = Polygon(
shell=cnt[:, 0, :],
holes=[c[:, 0, :] for c in cnt_children.get(idx, [])
if cv2.contourArea(c) >= min_area])
all_polygons.append(poly)
# approximating polygons might have created invalid ones, fix them
all_polygons = MultiPolygon(all_polygons)
if not all_polygons.is_valid:
all_polygons = all_polygons.buffer(0)
# Sometimes buffer() converts a simple Multipolygon to just a Polygon,
# need to keep it a Multi throughout
if all_polygons.type == 'Polygon':
all_polygons = MultiPolygon([all_polygons])
return all_polygons
def process_captcha(self, image):
"""
TODO: DOC
"""
cv2_img = cv2.cvtColor(numpy.array(image), cv2.COLOR_BGR2GRAY)
# Find the threshold of the image so that we can identify contours.
ret, thresh = cv2.threshold(
cv2_img,
127,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C
)
# Find the contours of the image
_, contours, hierarchy = cv2.findContours(
thresh,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
)
# Find the largest contour in the image with 4 points. This is the
# rectangle that is required to crop to for the captcha.
largest_contour = None
for contour in contours:
if (len(cv2.approxPolyDP(contour, 0.1*cv2.arcLength(contour, True), True)) == 4) and (2500 < cv2.contourArea(contour) < 4000):
if isinstance(largest_contour, type(None)):
largest_contour = contour
continue
if cv2.contourArea(contour) > cv2.contourArea(largest_contour):
largest_contour = contour
# If we don't have a matching contour, don't try to crop and such
if isinstance(largest_contour, type(None)):
return None
# If we do have a matching contour, build the rectangle
crop_x, crop_y, crop_width, crop_height = cv2.boundingRect(
largest_contour
)
# Crop down to the contour rectangle
image = image.crop(
(
crop_x,
crop_y,
crop_x + crop_width,
crop_y + crop_height
)
)
return image
def extract_bv(image):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
contrast_enhanced_green_fundus = clahe.apply(image)
# applying alternate sequential filtering (3 times closing opening)
r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
f5 = clahe.apply(f4)
# removing very small contours through area parameter noise removal
ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
mask = np.ones(f5.shape[:2], dtype="uint8") * 255
im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(f5, f5, mask=mask)
ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
# removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
# vessels and also in an interval of area
fundus_eroded = cv2.bitwise_not(newfin)
xmask = np.ones(image.shape[:2], dtype="uint8") * 255
x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
shape = "circle"
else:
shape = "veins"
if(shape=="circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)
blood_vessels = cv2.bitwise_not(finimage)
dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
#dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
blood_vessels_1 = cv2.bitwise_not(dilated)
return blood_vessels_1
def extract_bv(image):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
contrast_enhanced_green_fundus = clahe.apply(image)
# applying alternate sequential filtering (3 times closing opening)
r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
f5 = clahe.apply(f4)
# removing very small contours through area parameter noise removal
ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
mask = np.ones(f5.shape[:2], dtype="uint8") * 255
im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(f5, f5, mask=mask)
ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
# removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
# vessels and also in an interval of area
fundus_eroded = cv2.bitwise_not(newfin)
xmask = np.ones(image.shape[:2], dtype="uint8") * 255
x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
shape = "circle"
else:
shape = "veins"
if(shape=="circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)
blood_vessels = cv2.bitwise_not(finimage)
dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
#dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
blood_vessels_1 = cv2.bitwise_not(dilated)
return blood_vessels_1
def _find_hull_defects(self, segment):
# Use cv2 findContours function to find all the contours in segmented img
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# assume largest contour is the one of interest
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01*cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# determine convex hull & convexity defects of the hull
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return (max_contour, defects)
def main():
# gets screen size
w, h = pyautogui.size()
# takes screen screenshot. Returns hsv format image
scrn_scrnshot = Screenshot.this(0, 0, w, h, 'hsv')
#cv2.imshow('img', scrn_scrnshot)
# cv2.waitKey(0)
# find Grand exchange window
lower_hsv = np.array([12, 0, 7])
upper_hsv = np.array([40, 62, 64])
# mask of applied values
mask = cv2.inRange(scrn_scrnshot, lower_hsv, upper_hsv)
cv2.imshow('img', mask)
cv2.waitKey(0)
return
# find contours to get sides of rectangle
_, contours, h = cv2.findContours(mask, 1, 2)
for cnt in contours:
# looks for biggest square
# if cv2.contourArea(cnt) <= 1695.0:
# continue
# checks contour sides
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
# Square found here vvvv
if len(approx) == 4:
#print("square of {}".format(cv2.contourArea(cnt)))
# cv2.drawContours(rs_window,[cnt],0,(255,255,255),-1)
# get geometry of approx
# add rs coords
x, y, w, h = cv2.boundingRect(cnt)
print(cv2.contourArea(cnt))