def process_request(self, request, spider):
if spider.name == "jobbole":
self.browser.get(request.url)
import time
time.sleep(3)
print ("??:{0}".format(request.url))
return HtmlResponse(url=self.browser.current_url, body=self.browser.page_source, encoding="utf-8", request=request)
#linux?
# from pyvirtualdisplay import Display
# display = Display(visible=0, size=(800, 600))
# display.start()
#
# browser = webdriver.Chrome()
# browser.get()
python类sleep()的实例源码
def test_worker_alarm(manager):
called = []
def handler(signal, frame):
called.append(True)
signal.signal(signal.SIGALRM, handler)
@manager.task
def foo(sleep):
time.sleep(sleep)
w = Worker(manager, task_timeout=1)
w.process_one(make_task('foo', args=(0.1,)))
assert not called
w.process_one(make_task('foo', args=(1.1,)))
assert called
def ShowOnePage(self, now_page_items, page):
for idx, item in enumerate(now_page_items):
print "\ndownload " + item[1]
self.saveFile(item[0], page, idx)
#print '========one page done.================='
print '========Please hit the Enter.================='
if self.unload_page_num == page:
print '========all pages done. clean the repeated files.=========='
self.CleanRepeatImage() #at last, deal with the repeated images.
print 'Nothing left. Now close this application.'
# self.enable = False #let the main thread know it's time to quit
os._exit(0) #can teminal main thread.
# ???????
time.sleep(1)
print 'take a snap for 1s.'
# myInput = raw_input()
# if myInput == ":q":
# self.CleanRepeatImage() #if break manually, must clean work dir.
# self.enable = False
# deal with the repeated image
def Start(self):
self.enable = True
page = self.page
print u'????????......'
# ????????????????
thread.start_new_thread(self.LoadPage, ())
time.sleep(2) #wait the sub thread to be done.
# ----------- ???????? -----------
while self.enable:
# ??self?page???????
if len(self.pages) > 0:
now_page_items = self.pages[0]
# del now page items
del self.pages[0]
print '---main thred --', page
self.ShowOnePage(now_page_items, page)
page += 1
print self.enable
# ----------- ?????? -----------
def blinking():
if keepalive:
threading.Timer(10.0, blinking).start()
# Only blink when we are actually building
if building or error:
# If error, blink red.
if error:
color = "red"
else:
color = "yellow"
alloff()
pin = getcode(color)
GPIO.output(pin, True)
time.sleep(3)
GPIO.output(pin, False)
# Check every 10s if we are building, if not or done get latest status
def compute_task(task=None):
import time
client = yield task.receive() # first message is client task
result = 0
while True:
n = yield task.receive()
if n is None: # end of requests
client.send(result)
break
# long-running computation (without 'yield') is simulated with
# 'time.sleep'; during this time client may send messages to this task
# (which will be received and put in this task's message queue) or this
# task can send messages to client
time.sleep(n)
result += n
# client (local) task runs computations
def __discover_node(self, msg, task=None):
for _ in range(10):
node_task = yield Task.locate('dispycos_node', location=msg.location,
timeout=MsgTimeout)
if not isinstance(node_task, Task):
yield task.sleep(0.1)
continue
self._disabled_nodes.pop(msg.location.addr, None)
node = self._nodes.pop(msg.location.addr, None)
if node:
logger.warning('Rediscovered dispycosnode at %s; discarding previous incarnation!',
msg.location.addr)
self._disabled_nodes.pop(node.addr, None)
if self._cur_computation:
status_task = self._cur_computation.status_task
else:
status_task = None
if status_task:
for server in node.servers.itervalues():
for rtask, job in server.rtasks.itervalues():
status = pycos.MonitorException(rtask, (Scheduler.TaskAbandoned, None))
status_task.send(status)
status_task.send(DispycosStatus(Scheduler.ServerAbandoned,
server.task.location))
info = DispycosNodeInfo(node.name, node.addr, node.cpus, node.platform,
node.avail_info)
status_task.send(DispycosStatus(Scheduler.NodeAbandoned, info))
node = self._disabled_nodes.get(msg.location.addr, None)
if not node:
node = Scheduler._Node(msg.name, msg.location.addr)
self._disabled_nodes[msg.location.addr] = node
node.task = node_task
yield self.__get_node_info(node, task=task)
raise StopIteration
def compute_task(task=None):
import time
client = yield task.receive() # first message is client task
result = 0
while True:
n = yield task.receive()
if n is None: # end of requests
client.send(result)
break
# long-running computation (without 'yield') is simulated with
# 'time.sleep'; during this time client may send messages to this task
# (which will be received and put in this task's message queue) or this
# task can send messages to client
time.sleep(n)
result += n
# client (local) task runs computations
def compute_task(task=None):
import time
client = yield task.receive() # first message is client task
result = 0
while True:
n = yield task.receive()
if n is None: # end of requests
client.send(result)
break
# long-running computation (without 'yield') is simulated with
# 'time.sleep'; during this time client may send messages to this task
# (which will be received and put in this task's message queue) or this
# task can send messages to client
time.sleep(n)
result += n
# client (local) task runs computations
def restart_pg():
'''
Stops and Starts PLUMgrid service after flushing iptables.
'''
stop_pg()
service_start('plumgrid')
time.sleep(3)
if not service_running('plumgrid'):
if service_running('libvirt-bin'):
raise ValueError("plumgrid service couldn't be started")
else:
if service_start('libvirt-bin'):
time.sleep(8)
if not service_running('plumgrid') \
and not service_start('plumgrid'):
raise ValueError("plumgrid service couldn't be started")
else:
raise ValueError("libvirt-bin service couldn't be started")
status_set('active', 'Unit is ready')
def make_filesystem(blk_device, fstype='ext4', timeout=10):
"""Make a new filesystem on the specified block device."""
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('Gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1
time.sleep(1)
else:
log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
self.log.debug('Deleting OpenStack resource '
'{} ({})'.format(resource_id, msg))
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=None, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
# /!\ DEPRECATION WARNING (beisner):
# This method is prone to races in that no before-time is known.
# Use validate_service_config_changed instead.
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def get_iam_credential_report(self):
report = None
while report == None:
try:
report = self.iam_client.get_credential_report()
except botocore.exceptions.ClientError as e:
if 'ReportNotPresent' in e.message:
self.iam_client.generate_credential_report()
else:
raise e
time.sleep(5)
document = StringIO.StringIO(report['Content'])
reader = csv.DictReader(document)
report_rows = []
for row in reader:
report_rows.append(row)
return report_rows
def serial_clicks_by_control_file(tag):
log('launch click process of ' + tag)
p_open(Res.adb_start_launcher(tag))
sleep(Res.pkg_init_delay) # init taking time
lines = get_usable_lines_from_file(Res.splash_file)
for i in range(len(lines)):
items = lines[i].split(Res.split_mark)
if i == Res.action_cm_launcher_index:
send_key_home(tag)
sleep(1) # invoke default home takes time
p_open(Res.adb_tap_with_tag(tag, items[1], items[2]))
if len(items) > Res.delay_index_one_point:
sleep(items[Res.delay_index_one_point])
sleep(3 if i < Res.action_cm_launcher_index else Res.default_delay_in_click)
if i >= Res.action_always_index:
send_key_home(tag)
def start(self):
while True:
try:
self.connect()
while True:
#time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET
self.client.check_msg()
#time.sleep(0.01) # attempt to reduce number of OSError: [Errno 104] ECONNRESET
self.push()
time.sleep(0.01)
except OSError as e:
Util.log(self,"failed to connect, retrying....", e)
time.sleep(self.config["wait_to_reconnect"])
self.client.disconnect()
def test_transform_service_heartbeat(self, coordinator):
# mock coordinator
fake_kazoo_driver = MagicMock(name="MagicKazooDriver",
spec=KazooDriver)
coordinator.return_value = fake_kazoo_driver
# option1
serv_thread = transform_service.TransformService()
serv_thread.daemon = True
serv_thread.start()
time.sleep(2)
# option2
# mocks dont seem to work when spawning a service
# pid = _spawn_transform_service()
# time.sleep()
# os.kill(pid, signal.SIGNAL_SIGTERM)
fake_kazoo_driver.heartbeat.assert_called_with()
def start(self):
self.client.start()
#~ if not self.disable_auto_login:
#~ while self.client.status == 'offline':
#~ time.sleep(1)
#~ logger.info('Client: %s'%self.client.status)
if self.server_id:
already_added = False
for f in self.client.get_friend_list():
if self.client.friend_get_public_key(f) in self.server_id:
already_added = True
logger.info('Server already in added')
break
if not already_added:
self.client.friend_add_with_request(self.server_id,self.password)
logger.info('Started Friend request to Server')
else:
logger.info('No Server ID given')
katello-publish-cvs.py 文件源码
项目:katello-publish-cvs
作者: RedHatSatellite
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def wait_for_publish(seconds):
"""
Wait for all publishing tasks to terminate. Search string is:
label = Actions::Katello::ContentView::Publish and state = running
"""
count = 0
print "Waiting for publish tasks to finish..."
# Make sure that publish tasks gets the chance to appear before looking for them
time.sleep(2)
while get_json(URL + publish_tasks)["total"] != 0:
time.sleep(seconds)
count += 1
print "Finished waiting after " + str(seconds * count) + " seconds"
def _run_scan(self, scanner):
scanner.action(
action="scans/" + str(scanner.scan_id) + "/launch", method="POST")
scan_uuid = scanner.res["scan_uuid"]
running = True
counter = 0
while running:
scanner.action(
action="scans?folder_id=" + str(scanner.tag_id), method="GET")
for scan in scanner.res["scans"]:
if (scan["uuid"] == scan_uuid
and (scan['status'] == "running" or scan['status'] == "pending")):
time.sleep(2)
counter += 2
if (scan["uuid"] == scan_uuid
and scan['status'] != "running" and scan['status'] != "pending"):
running = False
def WaitForFileServerToStart(port):
"""
Wait for the Flask file server to start up. Test it by trying the
PyUpdater update URL, e.g. http://127.0.0.1:12345. If we receive
a ConnectionError, we continue waiting, but if we receive an HTTP
response code (404), we return True. For a frozen app, e.g. a
Mac .app bundle, the location of the updates must be supplied by
an environment variable, whereas when running from the source repo,
the location of the updates is likely to be ./pyu-data/deploy/
"""
url = 'http://%s:%s/fileserver-is-ready' % (LOCALHOST, port)
attempts = 0
while True:
try:
attempts += 1
requests.get(url, timeout=1)
return True
except requests.exceptions.ConnectionError:
time.sleep(0.25)
if attempts > 10:
logger.warning("WaitForFileServerToStart: timeout")
return
def login():
# Try to login or sleep/wait until logged in, or exit if user/pass wrong
NotLoggedIn = True
while NotLoggedIn:
try:
reddit = praw.Reddit(
user_agent=credsUserAgent,
client_id=credsClientID,
client_secret=credsClientSecret,
username=credsUserName,
password=credsPassword)
print_and_log("Logged in")
NotLoggedIn = False
except praw.errors.InvalidUserPass:
print_and_log("Wrong username or password", error=True)
exit(1)
except Exception as err:
print_and_log(str(err), error=True)
time.sleep(5)
return reddit
def check_fd(self):
'''??fd??
??read ?????????????????
?????????????
'''
while True:
for fd in self.conn_state.keys():
sock_state = self.conn_state[fd]
# fd?read???? read_time ???
# ???fd?epoll?????????????????
if sock_state.state == "read" and sock_state.read_stime \
and (time.time() - sock_state.read_stime) >= sock_state.read_itime:
# ??????????fd
sock_state.state = "closing"
self.state_machine(fd)
# ??????
time.sleep(60)
#}}}
#{{{fork_processes
def _test(stdscr):
import time
colors_init()
label_width = max([len(k) for k in CURSES_COLORPAIRS.keys()])
cols = 4
for idx, k in enumerate(CURSES_COLORPAIRS.keys()):
label = "{{:<{}}}".format(label_width).format(k)
x = (idx % cols) * (label_width + 1)
y = idx // cols
pair = curses.color_pair(CURSES_COLORPAIRS[k])
stdscr.addstr(y, x, label, pair)
time.sleep(0.1)
stdscr.refresh()
stdscr.getch()
def forwarder(tasks, interval, batch_size, source, dest):
'''Forward items from one storage to another.'''
from .utils import RunFlag, load_manager, redis_client
from .store import QueueStore
log = logging.getLogger('dsq.forwarder')
if not tasks and not source:
print('--tasks or --source must be provided')
sys.exit(1)
s = QueueStore(redis_client(source)) if source else load_manager(tasks).queue
d = QueueStore(redis_client(dest))
run = RunFlag()
while run:
batch = s.take_many(batch_size)
if batch['schedule'] or batch['queues']:
try:
d.put_many(batch)
except Exception:
s.put_many(batch)
log.exception('Forward error')
raise
else:
time.sleep(interval)
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
def _talk_to_chief(self, path, data=None, default=None):
tries = 0
while tries < FLAGS.coord_retries:
tries += 1
try:
url = 'http://%s:%d%s' % (FLAGS.coord_host, FLAGS.coord_port, path)
log_traffic('Contacting coordinator - url: %s, tries: %d ...' % (url, tries-1))
res = urllib.request.urlopen(urllib.request.Request(url, data, { 'content-type': 'text/plain' }))
str = res.read()
status = res.getcode()
log_traffic('Coordinator responded - url: %s, status: %s' % (url, status))
if status == 200:
return str
log_traffic('Problem reaching coordinator - url: %s, status: %d' % (url, status))
except Exception as ex:
log_traffic('Problem reaching coordinator - url: %s, exception: %r' % (url, ex))
pass
time.sleep(10)
return default
def _talk_to_chief(self, path, data=None, default=None):
tries = 0
while tries < FLAGS.coord_retries:
tries += 1
try:
url = 'http://%s:%d%s' % (FLAGS.coord_host, FLAGS.coord_port, path)
log_traffic('Contacting coordinator - url: %s, tries: %d ...' % (url, tries-1))
res = urllib.request.urlopen(urllib.request.Request(url, data, { 'content-type': 'text/plain' }))
str = res.read()
status = res.getcode()
log_traffic('Coordinator responded - url: %s, status: %s' % (url, status))
if status == 200:
return str
log_traffic('Problem reaching coordinator - url: %s, status: %d' % (url, status))
except Exception as ex:
log_traffic('Problem reaching coordinator - url: %s, exception: %r' % (url, ex))
pass
time.sleep(10)
return default
def _talk_to_chief(self, path, data=None, default=None):
tries = 0
while tries < FLAGS.coord_retries:
tries += 1
try:
url = 'http://%s:%d%s' % (FLAGS.coord_host, FLAGS.coord_port, path)
log_traffic('Contacting coordinator - url: %s, tries: %d ...' % (url, tries-1))
res = urllib.request.urlopen(urllib.request.Request(url, data, { 'content-type': 'text/plain' }))
str = res.read()
status = res.getcode()
log_traffic('Coordinator responded - url: %s, status: %s' % (url, status))
if status == 200:
return str
log_traffic('Problem reaching coordinator - url: %s, status: %d' % (url, status))
except Exception as ex:
log_traffic('Problem reaching coordinator - url: %s, exception: %r' % (url, ex))
pass
time.sleep(10)
return default