def _get_tip_position(array, contour, verbose = False):
approx_contour = cv2.approxPolyDP(contour, 0.08 * cv2.arcLength(contour, True), True)
convex_points = cv2.convexHull(approx_contour, returnPoints = True)
cx, cy = 999, 999
for point in convex_points:
cur_cx, cur_cy = point[0][0], point[0][1]
if verbose:
cv2.circle(array, (cur_cx, cur_cy), 4, _COLOR_GREEN,4)
if (cur_cy < cy):
cx, cy = cur_cx, cur_cy
(screen_x, screen_y) = pyautogui.size()
height, width, _ = array.shape
x = _round_int((float(cx))/(width-0)*(screen_x+1))
y = _round_int((float(cy))/(height-0)*(screen_y+1))
return (array, (x, y))
python类size()的实例源码
def prevent_logout(top_left_corner, bottom_right_corner, runescape_window):
seed = random.random()
x, y = pyautogui.size()
if seed > 0.5: # opens up the sale history tab for 5 seconds then returns to ge tab
while(True):
realmouse.move_mouse_to(random.randint(0,x), random.randint(0,y))
if len(list(pyautogui.locateAllOnScreen('Tools/screenshots/sale_history_button.png', region=(top_left_corner[0], top_left_corner[1], bottom_right_corner[0]-top_left_corner[0], bottom_right_corner[1]-top_left_corner[1]))))>0:
move_mouse_to_box('Tools/screenshots/sale_history_button.png', top_left_corner, bottom_right_corner)
pyautogui.click()
time.sleep(9*random.random()+1)
move_mouse_to_box('Tools/screenshots/grand_exchange_button.png', top_left_corner, bottom_right_corner)
pyautogui.click()
break
else: # examines the money pouch
examine_money(bottom_right_corner)
# pass in an image and a search region
def basic_api():
x, y = pg.size()
print x, y
'''
pg.moveTo(300,300,2)
pg.moveTo(300,400,2)
pg.moveTo(500,400,2)
pg.moveTo(500,300,2)
pg.moveTo(300,300,2)
#pg.moveTo(300,500,2)
'''
# pg.click(100,100)
word = [u'??', u'???']
pos = [452, 321]
pg.moveTo(pos[0], pos[1])
pg.click()
pg.typewrite(word[0])
def example():
screenWidth, screenHeight = pg.size()
currentMouseX, currentMouseY = pg.position()
pg.moveTo(500, 550)
pg.click()
pg.moveRel(None, 10) # move mouse 10 pixels down
pg.doubleClick()
# pg.moveTo(500, 500, duration=2, tween=pyautogui.tweens.easeInOutQuad) # use tweening/easing function to move mouse over 2 seconds.
pg.typewrite('Hello world!', interval=0.25) # type with quarter-second pause in between each key
pg.press('esc')
pg.keyDown('shift')
pg.press(['left', 'left', 'left', 'left', 'left', 'left'])
pg.keyUp('shift')
pg.hotkey('ctrl', 'c')
delta_y = 50
def handle_screen_res_intent(self, message):
screen = pyautogui.size()
resx = screen[0]
resy = screen[1]
responsex = num2words(resx)
responsey = num2words(resy)
self.speak("Your screen resolution is %s by %s" % (responsex, responsey))
def main():
# gets screen size
w, h = pyautogui.size()
# takes screen screenshot. Returns hsv format image
scrn_scrnshot = Screenshot.this(0, 0, w, h, 'hsv')
#cv2.imshow('img', scrn_scrnshot)
# cv2.waitKey(0)
# find Grand exchange window
lower_hsv = np.array([12, 0, 7])
upper_hsv = np.array([40, 62, 64])
# mask of applied values
mask = cv2.inRange(scrn_scrnshot, lower_hsv, upper_hsv)
cv2.imshow('img', mask)
cv2.waitKey(0)
return
# find contours to get sides of rectangle
_, contours, h = cv2.findContours(mask, 1, 2)
for cnt in contours:
# looks for biggest square
# if cv2.contourArea(cnt) <= 1695.0:
# continue
# checks contour sides
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
# Square found here vvvv
if len(approx) == 4:
#print("square of {}".format(cv2.contourArea(cnt)))
# cv2.drawContours(rs_window,[cnt],0,(255,255,255),-1)
# get geometry of approx
# add rs coords
x, y, w, h = cv2.boundingRect(cnt)
print(cv2.contourArea(cnt))
def _scale_image(file_name, size_w, size_h):
''' This function calculates ratio and scales an image for comparison by _pyautogui() '''
sModuleInfo = inspect.stack()[0][3] + " : " + inspect.getmoduleinfo(__file__).name
# Only used by desktop, so only import here
import pyautogui
from PIL import Image
from decimal import Decimal
try:
# Open image file
file_name = open(file_name, 'rb') # Read file into memory
file_name = Image.open(file_name) # Convert to PIL format
# Read sizes
screen_w, screen_h = pyautogui.size() # Read screen resolution
image_w, image_h = file_name.size # Read the image element's actual size
# Calculate new image size
if size_w > screen_w: # Make sure we create the scaling ratio in the proper direction
ratio = Decimal(size_w) / Decimal(screen_w) # Get ratio (assume same for height)
else:
ratio = Decimal(screen_w) / Decimal(size_w) # Get ratio (assume same for height)
CommonUtil.ExecLog(sModuleInfo, "Scaling ratio %s" %ratio, 0)
size = (int(image_w * ratio), int(image_h * ratio)) # Calculate new resolution of image element
# Scale image
file_name.thumbnail(size, Image.ANTIALIAS) # Resize image per calculation above
return file_name # Return the scaled image object
except:
return CommonUtil.Exception_Handler(sys.exc_info(), None, "Error scaling image")
def display_screensize():
print(pyautogui.size())
def __init__(self):
pyautogui.PAUSE = 1 # set the pause equals to one
pyautogui.FAILSAFE = True # Sets the Fail safe to be one
display_screensize() # Printing the size of the display
self.x, self.y = self.get_positions()
print(self.x)
print(self.y)