def find_ham_guard():
import random
try:
ps, psx, psy = RS.getPlayingScreen()
lower_pink = np.array([154,0,0])
upper_pink = np.array([160,255,255])
mask = cv2.inRange(ps, lower_pink, upper_pink)
_, contours, _ = cv2.findContours(mask.copy(), 1,2)
for cnt in contours:
if cv2.contourArea(cnt) <= 1:
continue
#print("Area: {}".format(cv2.contourArea(cnt)))
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cx += psx
cy += psy
cx += random.randint(-20,20)
cy += random.randint(-20,20)
# Find bouding box coords
Mouse.moveClick(cx,cy, 3)
break
RS.findOptionClick(cx,cy, 'pickpocket')
except Exception as e:
print(e)
#cv2.imshow('img', mask)
#cv2.waitKey(0)
python类moments()的实例源码
def find_prayer_pot():
rs_bag, bagx, bagy = RS.get_bag('bag coords', 'hsv')
# prayer potion color ranges
low = np.array([78,140,0])
high= np.array([81,225,211])
mask = cv2.inRange(rs_bag, low, high)
kernel = np.ones((5,5), np.uint8)
dilation = cv2.dilate(mask, kernel, iterations = 1)
_,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for con in contours:
x, y, w, h = cv2.boundingRect(con)
cv2.rectangle(mask,(x,y), (x+w, y+h), (255,255,255),-1)
_,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for con in contours[::-1]:
M = cv2.moments(con)
mx, my = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])
mx += RSX + bagx
my += RSY + bagy
mx += random.randint(-7,7)
my += random.randint(-12,5)
Mouse.moveClick(mx,my,1)
#Mouse.moveTo(mx,my)
break
def callback(self,data):
try:
imgOriginal = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print("==[CAMERA MANAGER]==", e)
blurred = cv2.GaussianBlur(imgOriginal,(11,11),0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# lower = np.array([60,90,70]) # hsv range for green
# upper = np.array([90,175,255])
lower = np.array([60,70,70]) # hsv range for green
upper = np.array([90,255,255])
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=7)
mask = cv2.dilate(mask, None, iterations=7)
output = cv2.bitwise_and(imgOriginal, imgOriginal, mask = mask)
outputGrayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
if major_ver == '3':
contours = cv2.findContours(outputGrayscale,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1]
elif major_ver == '2':
contours = cv2.findContours(outputGrayscale,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[0]
if len(contours) > 0:
c = max(contours,key=cv2.contourArea)
((x,y),radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
treasureCenter = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
self.treasurePoint.x = treasureCenter[0]
self.treasurePoint.y = treasureCenter[1]
self.treasurePoint.flag = 1
self.pub.publish(self.treasurePoint)
else:
self.treasurePoint.flag = 0
self.pub.publish(self.treasurePoint)
cv2.imshow("TreasureFilter", output)
cv2.waitKey(3)
def findCenter(contour):
M = cv2.moments(contour)
x = int(M['m10'] / M['m00'])
y = int(M['m01'] / M['m00'])
return (x, y)
#Calculates angle by using the focal length and pixel position
def findCenter(contour):
M = cv2.moments(contour)
x = int(M['m10'] / M['m00'])
y = int(M['m01'] / M['m00'])
return (x, y)
#Calculates angle by using the focal length and pixel position
def findCenter(contour):
M = cv2.moments(contour)
x = int(M['m10'] / M['m00'])
y = int(M['m01'] / M['m00'])
return (x, y)
#Calculates angle by using the focal length and pixel position
def analysis(self):
self.Hull = cv2.convexHull(self.Contour)
M = cv2.moments(self.Hull)
try:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
self.centorid = (cx, cy)
except ZeroDivisionError as e:
pass
self.ContArea = cv2.contourArea(self.Hull)
def label_contour(image, c, i, color=(0, 255, 0), thickness=2):
# compute the center of the contour area and draw a circle
# representing the center
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and label number on the image
cv2.drawContours(image, [c], -1, color, thickness)
cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 255, 255), 2)
# return the image with the contour number drawn on it
return image
def get_objects(self):
# given the low and high RGB ranges
if self.__low_rgb is None or self.__high_rgb is None:
return []
# given the image, contours, and hierarchy
image, contours, hierarchy = self.find_contours_in_range(self.__low_rgb, self.__high_rgb)
# get the moment for each contour
moments = []
for contour in contours:
moments.append(cv2.moments(contour, False))
# get the camera_object for each contour
camera_objects = []
for i, contour in enumerate(contours):
x, y, w, h = cv2.boundingRect(contour)
# get the moment for this contour
moment = moments[i]
# get the confidence in the Rectangle
confidence = moment['m00'] / (w * h)
# get the XY point of the moment
point = (moment['m10'] / moment['m00'], moment['m01'] / moment['m00'])
# setup the rectangle
rectangle = (x, y, w, h)
# now get the camera_object
camera_object = (point, rectangle, confidence)
# save the camera_object
camera_objects.append(camera_object)
return camera_objects
def classify_monitor_contour_set(contours):
'''Not a general purpose function : given the expectation of a set of strongly related contours for one monitor...'''
# First pass : compute the center of mass of every contour
classified = {}
for (i,c) in enumerate(contours):
classified[i] = {}
classified[i]['contour'] = c
moments = M = cv2.moments(c)
classified[i]['com'] = (int(M['m10']/M['m00']), int(M['m01']/M['m00']))
rect = contour_to_monitor_coords(c)
(maxWidth, maxHeight, dest, Mwarp) = compute_warp(rect)
classified[i]['rect'] = rect
classified[i]['maxWidth'] = maxWidth
classified[i]['maxHeight'] = maxHeight
classified[i]['dest'] = dest
classified[i]['Mwarp'] = Mwarp
# Second pass : establish if c-o-m of every contour is within the first contour
reference_contour = contours[0]
for (i,c) in enumerate(contours):
classified[i]['coherent'] = cv2.pointPolygonTest(reference_contour, classified[i]['com'], False)
# Final pass : report on the set
print('$'*80)
for (i,c) in enumerate(contours):
print('%d : c-o-m %s : coherent : %d mw %d mh %d' % (i,
classified[i]['com'],
classified[i]['coherent'],
classified[i]['maxWidth'],
classified[i]['maxHeight'],
))
print('$'*80)
# From the contours coherent to the reference contour, build an average/best estimator
count = 0
rect = np.zeros((4, 2), dtype = "float32")
for (i,c) in enumerate(contours):
if classified[i]['coherent'] == 1:
count += 1
for j in range(0,4):
rect[j] += classified[i]['rect'][j]
#pdb.set_trace()
for j in range(0,4):
# BUG to show Alison
# rect[j] = (rect[j]/1.0*count).astype('uint8')
rect[j] = (rect[j]/(1.0*count)).astype('uint32')
time.sleep(2.5)
return rect
erle_rover_followline.py 文件源码
项目:gazebo_python_examples
作者: erlerobot
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
height, width, channels = cv_image.shape
crop_img = cv_image[200:(height)/2+150][1:width]
lower = np.array([0, 0, 79], dtype = "uint8")
upper = np.array([40, 40, 191], dtype = "uint8")
mask = cv2.inRange(crop_img, lower, upper)
extraction = cv2.bitwise_and(crop_img, crop_img, mask = mask)
m = cv2.moments(mask, False)
try:
x, y = m['m10']/m['m00'], m['m01']/m['m00']
except ZeroDivisionError:
x, y = height/2, width/2
cv2.circle(extraction,(int(x), int(y)), 2,(0,255,0),3)
cv2.imshow("Image window", np.hstack([crop_img,extraction]))
cv2.waitKey(1)
yaw = 1500 + (x - width/2) * 1.5
print "center=" + str(width/2) + "point=" + str(x) + "yaw=" + str(yaw)
throttle = 1900
if (yaw > 1900):
yaw = 1900
elif (yaw < 1100):
yaw = 1100
msg = OverrideRCIn()
msg.channels[0] = yaw
msg.channels[1] = 0
msg.channels[2] = throttle
msg.channels[3] = 0
msg.channels[4] = 0
msg.channels[5] = 0
msg.channels[6] = 0
msg.channels[7] = 0
self.pub.publish(msg)
def count_fingers(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Otsu's thresholding after Gaussian filtering
img = cv2.GaussianBlur(img, (5, 5), 0)
ret, mask = cv2.threshold(img, 0, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow("Threshold", mask)
(_, cnts, _) = cv2.findContours(mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
list_far = []
list_end = []
if cnts:
areas = [cv2.contourArea(c) for c in cnts]
max_index = np.argmax(areas)
cnt = cnts[max_index]
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
hull1 = cv2.convexHull(cnt)
hull2 = cv2.convexHull(cnt, returnPoints=False)
try:
defects = cv2.convexityDefects(cnt, hull2)
except Exception, e:
defects = None
print e
counter = 0
if defects is not None:
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
# start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
if d < 20000:
continue
if far[1] >= (cy+40):
continue
diff1 = abs(end[0]-far[0])
if diff1 > 100:
continue
cv2.line(img, end, far, (0, 0, 0), 2, 8)
cv2.imshow("hand", img)
cv2.waitKey(1)
list_far.append(far)
list_end.append(end)
counter += 1
return mask, counter, hull1, (cx, cy), list_far, list_end
def count_fingers(hand_frame):
hand_frame = cv2.cvtColor(hand_frame,cv2.COLOR_BGR2GRAY)
# Otsu's thresholding after Gaussian filtering
hand_frame = cv2.GaussianBlur(hand_frame,(5,5),0)
ret,mask = cv2.threshold(hand_frame,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
(cnts,_)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
list_far=[]
list_end=[]
if cnts:
areas = [cv2.contourArea(c) for c in cnts]
max_index = np.argmax(areas)
cnt=cnts[max_index]
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
hull1 = cv2.convexHull(cnt)
hull2 = cv2.convexHull(cnt,returnPoints = False)
try:
defects = cv2.convexityDefects(cnt,hull2)
except Exception, e:
defects = None
print e
counter = 0
if defects is not None:
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
if d<20000:
continue
if far[1] >= (cy+40):
continue
else:
pass
list_far.append(far)
list_end.append(end)
counter +=1
return mask,counter,hull1,(cx,cy),list_far,list_end
def run(self):
bytes=''
while not self.thread_cancelled:
try:
bytes+=self.stream.raw.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
#lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8)
#upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153]))
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
#### blurred = cv2.GaussianBlur(mask, (5, 5), 0)
blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cv2.filterSpeckles(mask, 0, 100, 25)
## cv2.filterSpeckles(mask, 0, 50, 25)
## cv2.filterSpeckles(mask, 0, 100, 100)
for c in cnts:
M = cv2.moments(c)
if int(M["m00"]) != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
(cX, cY) = (0, 0)
print(cX, cY)
cv2.drawContours(res, [c], -1, (0, 255, 0), 2)
cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1)
# table.putNumber("center X", cX)
cv2.imshow('img',img)
cv2.imshow('mask',mask)
cv2.imshow('Final',res)
cv2.imshow('cam',img)
#sd.putNumber('Center X', cX) ##send the x value of the center
#sd.putNumber('Center Y', cY) ##send the y value of the center
## print(sd.getNumber('Center Y'), sd.getNumber('Center X'))
if cv2.waitKey(1) ==27:
exit(0)
except ThreadError:
self.thread_cancelled = True
def run(self):
bytes=''
while not self.thread_cancelled: ####see lines 18, 80, 88 ....
try:
bytes+=self.stream.raw.read(1024) ##limit max bytes read in 1 itteration? need to read more on this
a = bytes.find('\xff\xd8')##find start of stream of data
b = bytes.find('\xff\xd9')##find our end of data stream
if a!=-1 and b!=-1: ##so as long as we have a stream of data....do the following
jpg = bytes[a:b+2] ##converts to image or a specific variable...
bytes= bytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR) ##decode the data
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) ##converting color format for easier proccessing/ math
# define range of blue color in HSV
#lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8)
#upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153])) ##get colors in the range of these HSV values
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT) ##the next few line create outlines and
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1] ##remove any noise
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #find countors
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cv2.filterSpeckles(mask, 0, 100, 25) ##remove speckles aka random dots and white noise
for c in cnts:
M = cv2.moments(c)
if int(M["m00"]) != 0: ##Checks for division by zero
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
(cX, cY) = (0, 0)
cv2.drawContours(res, [c], -1, (0, 255, 0), 2) ##draw box/highlighting
cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1) ##draw box/highlighting
##Try-Catch for appending cX to table
try:
self.table.putNumber('centerX', cX) ##Adds cX to the networktables
except KeyError:
print("centerX failed.")
cv2.imshow('img',img) ##display original image
cv2.imshow('mask',mask) ##display masked image
cv2.imshow('Final',res) ##show final image
cv2.imshow('cam',img) ##see line 71/comments
if cv2.waitKey(1) ==27: ##now we close if esc key is pressed
exit(0)
except ThreadError:
self.thread_cancelled = True
def find_target(img, lower=np.array([110//2, 10*255//100, 15*255//100]), upper=np.array([180//2, 100*255//100, 100*255//100]), area_threshold=0.025 ** 2):
"""Given an image and thresholds, find the centre of mass of the target.
All arguments must be np.arrays, except for area_threshold, and lower and upper must be a 3-array.
"""
#Converting from RGB to HSV.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#Making the mask.
mask = cv2.inRange(hsv, lower, upper)
#Combining the mask with the frame
res = cv2.bitwise_and(img, img, mask=mask)
height, width = mask.shape
# Get the information for the contours
_, contours, __ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# sort the contours into a list
areas = []
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area/(height*width) > area_threshold:
heapq.heappush(areas, (cv2.contourArea(contour), idx))
areas = heapq.nlargest(2, areas)
areas_x = []
x_coord = 0
for _, idx in areas:
contour = contours[idx]
moments = cv2.moments(contour)
x_coord += moments['m10']/moments['m00'] / len(areas)
areas_x.append(moments['m10']/moments['m00'])
cv2.drawContours(res, (contour, ), -1, (255, 0, 0), 1)
if len(areas) > 0:
cv2.line(res, (int(x_coord), 60), (int(x_coord), 180), (255,255,0), thickness=2, lineType=8, shift=0)
target_sep = 0
if len(areas_x) > 1:
# target sep returned as a % of image width, not in vision coordinates
target_sep = abs(areas_x[0]-areas_x[1]) / width
pos = 2 * x_coord / width - 1
return pos, res, len(areas), target_sep
# Allow easy testing of captured sample images
def image_callback(self, msg):
# convert ROS image to OpenCV image
try:
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
except CvBridgeError as e:
print(e)
# create hsv image of scene
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# find pink objects in the image
lower_pink = numpy.array([139, 0, 240], numpy.uint8)
upper_pink = numpy.array([159, 121, 255], numpy.uint8)
mask = cv2.inRange(hsv, lower_pink, upper_pink)
# dilate and erode with kernel size 11x11
cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11)))
# find all of the contours in the mask image
contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.contourLength = len(contours)
# Check for at least one target found
if self.contourLength < 1:
print "No target found"
else: # target found
## Loop through all of the contours, and get their areas
area = [0.0]*len(contours)
for i in range(self.contourLength):
area[i] = cv2.contourArea(contours[i])
#### Target #### the largest "pink" object
target_image = contours[area.index(max(area))]
# Using moments find the center of the object and draw a red outline around the object
target_m = cv2.moments(target_image)
self.target_u = int(target_m['m10']/target_m['m00'])
self.target_v = int(target_m['m01']/target_m['m00'])
points = cv2.minAreaRect(target_image)
box = cv2.cv.BoxPoints(points)
box = numpy.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v))
self.target_found = True # set flag for depth_callback processing
# show image with target outlined with a red rectangle
cv2.imshow ("Target", image)
cv2.waitKey(3)
# This callback function handles processing Kinect depth image, looking for the depth value
# at the location of the center of the pink target.
def segment(self):
self.im_gray = cv2.medianBlur(self.im_gray, 5)
# Apply adaptive threshold with binary_inv
thresh = cv2.adaptiveThreshold(self.im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
# apply some dilation and erosion to join the gaps
thresh = cv2.dilate(thresh, None, iterations=3)
thresh = cv2.erode(thresh, None, iterations=2)
# finding contours
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
'''
cropped is a dictionary with (cx, cy) centroid tuples as keys, and cropped images as values
centroids is a list of the same centroid tuples, (cx, cy)
- This was done because it was not possible to sort the dictionary directly using tuples as keys using the sort(dict)
function.
- Instead, (cx, cy) was stored in the centroids list, and the list in turn was sorted using centroids.sort().
- The list is then iterated upon to get tuples in order...
- Each tuple iterated upon acts as a key for the dictionary, fetching the cropped images in order
'''
cropped = {(0, 0): '0'}
centroids = [(0, 0)]
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# finding centroid coordinates, so that it can be the basis of sorting cropped images
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# storing centroid tuple and cropped image in dictionary
cropped[(cx, cy)] = self.im_gray[y:y + h, x:x + w]
# inserting centroid tuples to a list
centroids.append((cx, cy))
# since (0, 0) was only a placeholder
del cropped[(0, 0)]
centroids.remove((0, 0))
# sorting the centroid list
centroids.sort()
segments = []
for c in centroids:
segments.append(cropped[c])
return segments
def segment(self):
self.im_gray = cv2.medianBlur(self.im_gray, 5)
# Apply adaptive threshold with binary_inv
thresh = cv2.adaptiveThreshold(self.im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
# apply some dilation and erosion to join the gaps
thresh = cv2.dilate(thresh, None, iterations=3)
thresh = cv2.erode(thresh, None, iterations=2)
# finding contours
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
'''
cropped is a dictionary with (cx, cy) centroid tuples as keys, and cropped images as values
centroids is a list of the same centroid tuples, (cx, cy)
- This was done because it was not possible to sort the dictionary directly using tuples as keys using the sort(dict)
function.
- Instead, (cx, cy) was stored in the centroids list, and the list in turn was sorted using centroids.sort().
- The list is then iterated upon to get tuples in order...
- Each tuple iterated upon acts as a key for the dictionary, fetching the cropped images in order
'''
cropped = {(0, 0): '0'}
centroids = [(0, 0)]
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# finding centroid coordinates, so that it can be the basis of sorting cropped images
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# storing centroid tuple and cropped image in dictionary
cropped[(cx, cy)] = self.im_gray[y:y + h, x:x + w]
# inserting centroid tuples to a list
centroids.append((cx, cy))
# since (0, 0) was only a placeholder
del cropped[(0, 0)]
centroids.remove((0, 0))
# sorting the centroid list
centroids.sort()
segments = []
for c in centroids:
segments.append(cropped[c])
return segments
def open_cw_bank():
"""Finds the visiblest square of the chest in castle wars bank, wors better when viewing from above at shortest distance."""
# gets RS window's position
rsx,rsy = position()
# Takes screenshot, as Hue-saturated-value image
play_window,psx,psy = getPlayingScreen()
psx += rsx
psy += rsy
lower_gray = np.array([0,15,55])
upper_gray = np.array([10,25,125])
# Makes a black/white mask
mask = cv2.inRange(play_window, lower_gray, upper_gray)
# inverts selection
#res = cv2.bitwise_and(play_window, play_window, mask=mask)
kernel = np.ones((5,5), np.uint8)
dilation = cv2.dilate(mask, kernel, iterations = 1)
#cv2.imshow('img', dilation)
#cv2.waitKey(0)
# Finds contours
_,contours,_ = cv2.findContours(dilation.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
# looks for center of grey color with biggest area, > 3000
for con in contours:
if cv2.contourArea(con) > 3000:
M = cv2.moments(con)
# finds centroid
cx,cy = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
psx += cx
psy += cy
# adds randomness to coords
psx += random.randint(-17,17)
psy += random.randint(-17,17)
#move click chest
Mouse.moveClick(psx,psy,1)
RandTime.randTime(0,0,0,0,9,9)
break
except Exception as e:
print("Bank NOT found!\nMove camera around!")