def __bound_contours(roi):
"""
returns modified roi(non-destructive) and rectangles that founded by the algorithm.
@roi region of interest to find contours
@return (roi, rects)
"""
roi_copy = roi.copy()
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
# filter black color
mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
mask1 = cv2.Canny(mask1, 100, 300)
mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
mask1 = cv2.Canny(mask1, 100, 300)
# mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
# Find contours for detected portion of the image
im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
rects = []
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
x, y, w, h = cv2.boundingRect(approx)
if h >= 15:
# if height is enough
# create rectangle for bounding
rect = (x, y, w, h)
rects.append(rect)
cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);
return (roi_copy, rects)
python类inRange()的实例源码
def skin_calib(self, raw_yrb):
mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin)
cv2.imshow('YRB_calib', cal_skin)
k = cv2.waitKey(5) & 0xFF
if k == ord('s'):
self.calib_switch = False
cv2.destroyWindow('YRB_calib')
ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib')
ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib')
rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib')
rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib')
bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib')
bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib')
self.mask_lower_yrb = np.array([ymin, rmin, bmin])
self.mask_upper_yrb = np.array([ymax, rmax, bmax])
# Do skin detection with some filtering
def image_callback(self, msg):
image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_yellow = numpy.array([18, 120, 200])
upper_yellow = numpy.array([28, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
h, w, d = image.shape
search_top = 3*h/4
search_bot = 3*h/4 + 20
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
# BEGIN CONTROL
err = cx - w/2
self.twist.linear.x = 0.2
self.twist.angular.z = -float(err) / 100
self.cmd_vel_pub.publish(self.twist)
# END CONTROL
cv2.imshow("window", image)
cv2.waitKey(3)
def color_picker(rect):
global img,img_gray2,hsv
roi=img[rect[0][1]:rect[1][1],rect[0][0]:rect[1][0]]
b,g,r,_=np.uint8(cv2.mean(roi))
color=cv2.cvtColor(np.uint8([[[b,g,r]]]),cv2.COLOR_BGR2HSV)
h= color[0][0][0]
# define range of blue color in HSV
lower = np.array([h-10,50,50])
upper = np.array([h+10,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower, upper)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
res2=cv2.bitwise_and(img_gray2,img_gray2, mask= cv2.bitwise_not(mask))
return res+res2
def add_blobs(crop_frame):
frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = np.array([70,50,50])
upper_green = np.array([85,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
reversemask=255-mask
keypoints = detector.detect(reversemask)
if keypoints:
print "found blobs"
if len(keypoints) > 4:
keypoints.sort(key=(lambda s: s.size))
keypoints=keypoints[0:3]
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
print "no blobs"
im_with_keypoints=crop_frame
return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def print_img_array(self):
img = self.take_screenshot('array')
#converts image to HSV
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# gets the values from the sliders
low_hue = self.low_hue.get()
low_sat = self.low_sat.get()
low_val = self.low_val.get()
# gets upper values from sliders
high_hue = self.high_hue.get()
high_sat = self.high_sat.get()
high_val = self.high_val.get()
lower_color = np.array([low_hue,low_sat,low_val])
upper_color= np.array([high_hue,high_sat,high_val])
#creates the mask and result
mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
mask = np.array(mask)
mask.view
# Instance of Tkinter
def imgSeg_contour(img, b,g,r, per):
lower = np.array([0, 0, 0])
upper = np.array([b,g,r])
shapeMask = cv2.inRange(img, lower, upper)
#http://stackoverflow.com/questions/27746089/python-computer-vision-contours-too-many-values-to-unpack
_, cnts, hierarchy = cv2.findContours(shapeMask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:4]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, per * peri, True) ### 0.04 ###
if (len(approx) >= 4) and (len(approx) < 6):
break
return approx
def __filterRedColor(image_hsv):
"""
Filters the red color from image_hsv and returns mask.
"""
mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255]))
mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255]))
mask = mask1 + mask2
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
mask = cv2.Canny(mask, 50, 100)
mask = cv2.GaussianBlur(mask, (13, 13), 0)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
return mask
def dif_gaus(image, lower, upper):
lower, upper = int(lower-1), int(upper-1)
lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
# upper +=50
# lower +=50
dif = lower-upper
# dif *= .1
# dif = cv2.medianBlur(dif,3)
# dif = 255-dif
dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
dif = cv2.dilate(dif, kernel, iterations=2)
dif = cv2.erode(dif, kernel, iterations=1)
# dif = cv2.max(image,dif)
# dif = cv2.dilate(dif, kernel, iterations=1)
return dif
def erase_specular(image,lower_threshold=0.0, upper_threshold=150.0):
"""erase_specular: removes specular reflections
within given threshold using a binary mask (hi_mask)
"""
thresh = cv2.inRange(image,
np.asarray(float(lower_threshold)),
np.asarray(256.0))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
hi_mask = cv2.dilate(thresh, kernel, iterations=2)
specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA)
# return cv2.max(hi_mask,image)
return specular
def skin_detect(self, raw_yrb, img_src):
# use median blurring to remove signal noise in YCRCB domain
raw_yrb = cv2.medianBlur(raw_yrb, 5)
mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
# morphological transform to remove unwanted part
kernel = np.ones((5, 5), np.uint8)
#mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)
res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
#res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)
return res_skin
# Do background subtraction with some filtering
def execute_ColorSpace(proxy,obj):
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower = np.array([max(obj.h1-obj.h2,0),max(obj.s1-obj.s2,0),max(obj.v1-obj.v2,0)])
upper = np.array([min(obj.h1+obj.h2,255),min(obj.s1+obj.s2,255),min(obj.v1+obj.v2,255)])
say("ee")
say(lower)
say(upper)
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.inRange(img, lower, upper)
res = cv2.bitwise_and(img,img, mask= mask)
obj.Proxy.img=res
def execute_HSV(proxy,obj):
say("hsv ..")
try: img=obj.sourceObject.Proxy.img.copy()
except: img=cv2.imread(__dir__+'/icons/freek.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower=np.array([obj.valueColor-obj.deltaColor,0,0])
upper=np.array([obj.valueColor+obj.deltaColor,255,255])
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(hsv,hsv, mask= mask)
obj.Proxy.img=res
def equal_color(img: Image, color):
arr_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
arr_img = cv2.resize(arr_img, (img.size[0] * 10, img.size[1] * 10))
boundaries = []
boundaries.append(([max(color[2] - 15, 0), max(color[1] - 15, 0), max(color[0] - 15, 0)],
[min(color[2] + 15, 255), min(color[1] + 15, 255), min(color[0] + 15, 255)]))
for (lower, upper) in boundaries:
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(arr_img, lower, upper)
res = cv2.bitwise_and(arr_img, arr_img, mask=mask)
res = cv2.resize(res, (img.size[0], img.size[1]))
cv2_im = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
output_img = Image.fromarray(cv2_im)
return output_img
def get_hsv_mask(img, debug=False):
assert isinstance(img, numpy.ndarray), 'image must be a numpy array'
assert img.ndim == 3, 'skin detection can only work on color images'
logger.debug('getting hsv mask')
lower_thresh = numpy.array([0, 50, 0], dtype=numpy.uint8)
upper_thresh = numpy.array([120, 150, 255], dtype=numpy.uint8)
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
msk_hsv = cv2.inRange(img_hsv, lower_thresh, upper_thresh)
msk_hsv[msk_hsv < 128] = 0
msk_hsv[msk_hsv >= 128] = 1
if debug:
scripts.display('input', img)
scripts.display('mask_hsv', msk_hsv)
return msk_hsv.astype(float)
def get_rgb_mask(img, debug=False):
assert isinstance(img, numpy.ndarray), 'image must be a numpy array'
assert img.ndim == 3, 'skin detection can only work on color images'
logger.debug('getting rgb mask')
lower_thresh = numpy.array([45, 52, 108], dtype=numpy.uint8)
upper_thresh = numpy.array([255, 255, 255], dtype=numpy.uint8)
mask_a = cv2.inRange(img, lower_thresh, upper_thresh)
mask_b = 255 * ((img[:, :, 2] - img[:, :, 1]) / 20)
mask_c = 255 * ((numpy.max(img, axis=2) - numpy.min(img, axis=2)) / 20)
mask_d = numpy.bitwise_and(numpy.uint64(mask_a), numpy.uint64(mask_b))
# mask = numpy.zeros_like(mask_d, dtype=numpy.uint8)
msk_rgb = numpy.bitwise_and(numpy.uint64(mask_c), numpy.uint64(mask_d))
# msk_rgb = cv2.fromarray(mask_rgb)
msk_rgb[msk_rgb < 128] = 0
msk_rgb[msk_rgb >= 128] = 1
if debug:
scripts.display('input', img)
scripts.display('mask_rgb', msk_rgb)
return msk_rgb.astype(float)
def get_ycrcb_mask(img, debug=False):
assert isinstance(img, numpy.ndarray), 'image must be a numpy array'
assert img.ndim == 3, 'skin detection can only work on color images'
logger.debug('getting ycrcb mask')
lower_thresh = numpy.array([90, 100, 130], dtype=numpy.uint8)
upper_thresh = numpy.array([230, 120, 180], dtype=numpy.uint8)
img_ycrcb = cv2.cvtColor(img, cv2.COLOR_RGB2YCR_CB)
msk_ycrcb = cv2.inRange(img_ycrcb, lower_thresh, upper_thresh)
msk_ycrcb[msk_ycrcb < 128] = 0
msk_ycrcb[msk_ycrcb >= 128] = 1
if debug:
scripts.display('input', img)
scripts.display('mask_ycrcb', msk_ycrcb)
return msk_ycrcb.astype(float)
def process_image(self, msg):
""" Process image messages from ROS and stash them in an attribute
called cv_image for subsequent processing """
self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
self.arc_image = np.zeros((480, 640, 3), np.uint8)
self.draw_arc()
# Transform the image of our arc from a top down image into the plane of our CV
self.transform_img()
# overlay the projected path onto cv_image
self.overlay_img()
if self.omega is not None and self.omega == 0.0:
self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)
self.binary_image = cv2.inRange(self.hsv_image, self.hsv_lb, self.hsv_ub)
self.spot_delineators = self.find_delineators()
if self.color != (0, 255, 0): # This logic makes it such that once the lines turn green, they stay green
self.color = (0,0,255) if not self.check_aligned() else (0,255,0)
def get_blob(im, lower, upper):
# Finds a blob, if one exists
# Create mask of green
try:
green_mask = cv2.inRange(im, lower, upper)
except cv2.error:
# Catches the case where there is no blob in range
return None, None
# Get largest blob
largest = get_largest(green_mask, 1)
second_largest = get_largest(green_mask, 2)
if largest is not None and second_largest is not None:
return [largest, second_largest], green_mask
else:
return None, None
def _detect_bot(self, hsv_image):
BOT_MIN = np.array([28,8,100], np.uint8)
BOT_MAX = np.array([32,255,255], np.uint8)
thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX)
thresholded_image = cv2.medianBlur(thresholded_image, 15)
_, contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
(bot_x, bot_y) = (-1000,-1000)
else:
bot = contours[0]
M = cv2.moments(bot)
if len(bot) > 2:
bot_x = int(M['m10']/M['m00'])
bot_y = int(M['m01']/M['m00'])
else:
(bot_x, bot_y) = (-1000,-1000)
return thresholded_image, (bot_x, bot_y)
def color_mask(image, color, tolerance=0):
"""Extract a mask of image according to color under a certain
tolerance level (defaults to 0)."""
if tolerance > 100:
tolerance = 100
elif tolerance < 0:
tolerance = 0
tolerance = int(tolerance * 255 / 100)
red, green, blue = color
bgr_color = np.uint8([[[blue, green, red]]])
hsv_color = cv2.cvtColor(bgr_color, cv2.COLOR_BGR2HSV)[0][0]
mask_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_range = hsv_color - np.array([tolerance, 0, 0])
lower_range[lower_range > 255] = 255
lower_range[lower_range < 0] = 0
upper_range = hsv_color + np.array([tolerance, 0, 0])
upper_range[upper_range > 255] = 255
upper_range[upper_range < 0] = 0
mask = cv2.inRange(mask_image, lower_range, upper_range)
return mask
def binary_thresh( img, boundaries, filter):
if filter == 'RGB':
frame_to_thresh = img.copy()
else:
frame_to_thresh = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
for (lower, upper) in boundaries:
# create numpy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply the mask
mask = cv2.inRange(frame_to_thresh, lower, upper)
output = cv2.bitwise_and(frame_to_thresh, frame_to_thresh, mask = mask) #Returns an RGB image
return mask
def print_hsv(x):
if x == 1:
H_low = cv2.getTrackbarPos("H_low", "Segmented")
S_low = cv2.getTrackbarPos("S_low", "Segmented")
V_low = cv2.getTrackbarPos("V_low", "Segmented")
H_high = cv2.getTrackbarPos("H_high", "Segmented")
S_high = cv2.getTrackbarPos("S_high", "Segmented")
V_high = cv2.getTrackbarPos("V_high", "Segmented")
low = np.array([H_low, S_low, V_low])
high = np.array([H_high, S_high, V_high])
print "HSV Low: ", low, ", High: ", high
save_name = 'seg' + '_lh_' + str(low[0]) + '_ls_' + str(low[1]) + '_lv_' + str(low[2]) \
+ '_hh_' + str(high[0]) + '_hs_' + str(high[1]) + '_hv_' + str(high[2]) \
+ '_' + img_str
mask = cv2.inRange(birdsHSV, low, high)
result = cv2.bitwise_and(birdsImg, birdsImg, mask = mask)
res_name = '../results/' + save_name
print "Saving result as", res_name
cv2.imwrite(res_name, result)
def blob__Detec(image):
img=copy(image)
height, width, channels = img.shape
new_img=np.ones((height,width,channels), np.uint8)
HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
Yellow={'min':(20,100,100),'max':(30, 255, 255)}
Blue={'min':(50,100,100),'max':(100,255,255)}
Brown={'min':(0,100,0),'max':(20,255,255)}
mask_b=cv2.inRange(HSV,Blue['min'],Blue['max'])
mask_br=cv2.inRange(HSV,Brown['min'],Brown['max'])
mask_y=cv2.inRange(HSV,Yellow['min'],Yellow['max'])
blue=cv2.bitwise_and(img,img,mask=mask_b)
yellow=cv2.bitwise_and(img,img,mask=mask_y)
brown=cv2.bitwise_and(img,img,mask=mask_br)
new_img=cv2.add(blue,brown)
new_img=cv2.add(new_img,yellow)
return new_img
def hsvModer(self, index, hsv_valueT, hsv_value_B):
img_BGR = self.img[index]
img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV)
lower_red = np.array(hsv_value_B)
upper_red = np.array(hsv_valueT)
mask = cv2.inRange(img_HSV, lower_red, upper_red)
res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask)
if self.erosion:
kernel = np.ones((5, 5), np.uint8)
res = cv2.erode(res, kernel, iterations=1)
if self.dilate:
kernel = np.ones((9, 9), np.uint8)
res = cv2.dilate(res, kernel, iterations=1)
return res
arch_light_track.py 文件源码
项目:Vision_Processing-2016
作者: Sabercat-Robotics-4146-FRC
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_bounding_rect( cap, win_cap, win, upper, lower):
msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
if len(contours) > 0:
areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
max_index = np.argmax(areas) # get the index of the largest contour by area
cnts = contours[max_index] # get the largest contout by area
cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour
cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
cv2.imshow( "debug.", win_cap )
try:
self.smt_dash.putNumber('vis_x', x)
self.smt_dash.putNumber('vis_y', y)
self.smt_dash.putNumber('vis_w', w)
self.smt_dash.putNumber('vis_h', h)
except Exception:
pass
def isInvEmpty():
bag, bagx,bagy = get_bag('bag and coords', 'hsv')
# looks for color of empty inv
low = np.array([10,46,58])
high= np.array([21,92,82])
# applies mask
mask = cv2.inRange(bag, low, high)
# removes any noise
kernel = np.ones((5,5), np.uint8)
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# looks to see if the inv is all white pixels
# returns true, else False
if (closing.view() == 255).all():
return True
return False
def is_button_selected(button_name):
"""Returns true if button is selected, else False"""
x1, y1, x2, y2 = press_button(button_name, 'coords')
button_img = Screenshot.shoot(x1,y1,x2,y2, 'hsv')
lower_red = np.array([0,179,0])
upper_red = np.array([4,193,255])
mask = cv2.inRange(button_img, lower_red, upper_red)
for colors in mask:
for value in colors:
if value == 255:
#print('{} is selected'.format(button_name))
return 1
#print('{} is NOT selected'.format(button_name))
return 0
def find_yellow_birds():
ps, psx, psy = RS.getPlayingScreen()
lower_pink = np.array([28,197,168])
upper_pink = np.array([29,234,239])
mask = cv2.inRange(ps, lower_pink, upper_pink)
#cv2.imshow('img', mask)
#cv2.waitKey(0)
_, contours, _ = cv2.findContours(mask, 1,2)
# returns true if birds found
for cnt in contours:
if cv2.contourArea(cnt) > 0:
return 1
def check_prayer():
RSX, RSY = RS.position()
pc = (545,109,571,135)
prayer_level = Screenshot.shoot(pc[0],pc[1],pc[2],pc[3], 'hsv')
low = np.array([116,0,0])
high =np.array([141,255,255])
mask = cv2.inRange(prayer_level, low, high)
mask = np.array(mask)
percentage = 0
for color in mask:
for element in color:
if element == 255:
percentage += 1
else:
continue
return percentage/363.0