def example():
screenWidth, screenHeight = pg.size()
currentMouseX, currentMouseY = pg.position()
pg.moveTo(500, 550)
pg.click()
pg.moveRel(None, 10) # move mouse 10 pixels down
pg.doubleClick()
# pg.moveTo(500, 500, duration=2, tween=pyautogui.tweens.easeInOutQuad) # use tweening/easing function to move mouse over 2 seconds.
pg.typewrite('Hello world!', interval=0.25) # type with quarter-second pause in between each key
pg.press('esc')
pg.keyDown('shift')
pg.press(['left', 'left', 'left', 'left', 'left', 'left'])
pg.keyUp('shift')
pg.hotkey('ctrl', 'c')
delta_y = 50
python类moveRel()的实例源码
def behaviour1() :
pyautogui.moveRel (56, 56, duration = 0.27)
pyautogui.moveRel (11, 11, duration = 0.56)
pyautogui.moveTo (21, 21, duration = 0.11)
def on_click(x, y, button, pressed):
global msg
global lasttime
if pressed:
if Button.left == button:
msg += "."
lasttime = time.time()
if Button.right == button:
msg += "-"
lasttime = time.time()
else:
# print(msg)
newtime = time.time()
#print (newtime - lasttime)
if (newtime - lasttime) > 0.5:
print(msg)
if msg in esrom:
pyautogui.typewrite(esrom[msg])
if esrom[msg]=="w":
pyautogui.moveRel(0, -50, duration=0.5)
if esrom[msg]=="a":
pyautogui.moveRel(-50, 0, duration=0.5)
if esrom[msg]=="s":
pyautogui.moveRel(0, 50, duration=0.5)
if esrom[msg]=="d":
pyautogui.moveRel(50, 0, duration=0.5)
msg = ""
return True
# Collect events forever
def single_log_line(cls, log_string):
cls.delay(PressKey, 0x27)
ReleaseKey(0x27)
cls.delay(pa.typewrite, log_string)
cls.delay(cls.click_pic, 'submit.png') # Pressing enter doesnt seem to work here?
cls.delay(PressKey, 0x27)
ReleaseKey(0x27)
pa.moveRel(25, 25) # Cannot recognise submit button if cursor still over it
def input_control(self, count_defects, img_src):
# update position difference with previous frame (for move mouse)
d_x, d_y = 0, 0
if self.preCX is not None:
d_x = self.ROIx - self.preCX
d_y = self.ROIy - self.preCY
# checking current command, and filter out unstable hand gesture
cur_cmd = 0
if self.cmd_switch:
if self.last_cmds.count(count_defects) >= self.last_cmds.n_maj:
cur_cmd = count_defects
#print 'major command is ', cur_cmd
else:
cur_cmd = 0 # self.last_cmds.major()
else:
cur_cmd = count_defects
# send mouse input event depend on hand gesture
if cur_cmd == 1:
str1 = '2, move mouse dx,dy = ' + str(d_x*3) + ', ' + str(d_y*3)
cv2.putText(img_src, str1, (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.moveRel(d_x*3, d_y*3)
self.last_cmds.push(count_defects)
#pyautogui.mouseDown(button='left')
#pyautogui.moveRel(d_x, d_y)
#else:
# pyautogui.mouseUp(button='left')
elif cur_cmd == 2:
cv2.putText(img_src, '3 Left (rotate)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='left')
self.last_cmds.push(count_defects)
#pyautogui.scroll(d_y,pause=0.2)
elif cur_cmd == 3:
cv2.putText(img_src, '4 middle (zoom)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='middle')
self.last_cmds.push(count_defects)
elif cur_cmd == 4:
cv2.putText(img_src, '5 right (pan)', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
pyautogui.dragRel(d_x, d_y, button='right')
self.last_cmds.push(count_defects)
elif cur_cmd == 5:
cv2.putText(img_src, '1 fingertip show up', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
self.last_cmds.push(count_defects)
else:
cv2.putText(img_src, 'No finger detect!', (50, 50), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 2)
if self.cmd_switch:
self.last_cmds.push(count_defects) # no finger detect or wrong gesture
# testing pyautogui