def detect_inactive_hosts(scan_hosts):
"""
Scans the network to find scan_hosts are live or dead
scan_hosts can be like 10.0.2.2-4 to cover range.
See Scapy docs for specifying targets.
"""
global scheduler
scheduler.enter(RUN_FREQUENCY, 1, detect_inactive_hosts, (scan_hosts, ))
inactive_hosts = []
try:
ans, unans = sr(IP(dst=scan_hosts)/ICMP(), retry=0, timeout=1)
ans.summary(lambda r : r.sprintf("%IP.src% is alive"))
for inactive in unans:
print ("%s is inactive" %inactive.dst)
inactive_hosts.append(inactive.dst)
print ("Total %d hosts are inactive" %(len(inactive_hosts)))
except KeyboardInterrupt:
exit(0)
python类scheduler()的实例源码
3_7_detect_inactive_machines.py 文件源码
项目:Python-Network-Programming-Cookbook-Second-Edition
作者: PacktPublishing
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def upload(pv, inverters, scheduler, timestamp, boundary):
"""Retrieves and uploads inverter data, and schedules the next upload."""
values = [inverter.request_values() for inverter in inverters]
# Filter systems with normal operating mode
values = [val for val in values if val['operating_mode'] == 'normal']
if values:
data = {
'd': time.strftime('%Y%m%d'),
't': time.strftime('%H:%M'),
'v1': round(sum(value['energy_today'] for value in values) * 1000),
'v2': sum(value['output_power'] for value in values),
'v5': sum(value['internal_temp'] for value in values) / len(values),
'v6': sum(value['grid_voltage'] for value in values) / len(values)
}
logger.info('Uploading: %s', data)
pv.add_status(data)
else:
logger.info('Not uploading, no inverter has operating mode normal')
sched_args = (pv, inverters, scheduler, timestamp + boundary, boundary)
scheduler.enterabs(timestamp + boundary, 1, upload, sched_args)
def run_update(args):
logging.info('Getting updates...')
db = init_db(args.db)
cur = db.cursor()
cfg = next(yaml.safe_load_all(open(args.chores, 'r', encoding='utf-8')))
chores_avail = []
tasks = sched.scheduler(time.time)
for name, config in cfg.items():
result = cur.execute('SELECT updated, last_result FROM chore_status WHERE name = ?', (name,)).fetchone()
if result:
result = chores.ChoreStatus(*result)
chorename = config.pop('chore')
chore = chores.CHORE_HANDLERS[chorename](name, status=result, **config)
chores_avail.append((chorename, chore))
try:
while 1:
for chorename, chore in chores_avail:
tasks.enterabs(
chore.status.updated + args.keep * 60,
chores.CHORE_PRIO[chorename],
wrap_fetch, (chore, cur)
)
tasks.run()
db.commit()
if args.keep:
logging.info('A round of updating completed.')
else:
break
except KeyboardInterrupt:
logging.warning('Interrupted.')
finally:
db.commit()
def __init__(self, collection=None, **kwargs):
self.logger = logging.getLogger('executor')
self.logger.setLevel(logging.INFO)
self.sche = sched.scheduler(time.time, time.sleep)
self.sessions_queue = {} # {ID: {delay: cmd, delay2: cmd2, ....}}
self.setCollection(collection)
self.reset_prof = kwargs.get('reset_profiling', False)
self.profile_size = int(kwargs.get('profile_size', 1)) # 1 MB by default
self.drop_coll = kwargs.get('drop_collection', False)
self.creat_coll = kwargs.get('create_collection', True)
self.bins = int(kwargs.get('bins', 20))
self.time_scale_factor = float(kwargs.get('time_scale_factor', 1.0))
self.histtype = kwargs.get('histtype', 'step')
self.exec_time_cache = {} # for display execution result
self.type_cache = { # caching for display
'find' : [], # [ID(str), ...]
'insert' : [],
'update' : [],
'delete' : []
}
def _send(self):
"""
Send a batch of randomly selected packets from the sending pool, then ensure the sending pool gets refilled if
necessary. The packets are encapsulated in an Ethernet frame of type 0xcafe and removed from the sending pool,
and finally broadcast in a batch.
This function reschedules itself to occur every sending_freq seconds.
"""
self._scheduler.enter(self._sending_freq, 1, self._send)
log_debug("Sending scheduler queue length: {}".format(len(self._scheduler.queue)))
if self._sending:
batch = []
s = sample(self._sending_pool, self._batch_size)
for pkt in s:
batch.append(Ether(dst="ff:ff:ff:ff:ff:ff", src=self._mac_address, type=ETHERTYPE) / pkt)
self._sending_pool.remove(pkt)
t_before = time()
_gen_send_repeatable(self._sending_socket, batch, iface=self._wireless_interface, verbose=False)
t_after = time()
with open(self._stats_file_name, 'a') as stats_file:
stats_file.write('{},{},{}\n'.format(t_before, t_after, len(batch)))
self._sent_pkt_counter += len(batch)
log_network("snt {} in {}s".format(len(batch), t_after - t_before))
self._prepare_sending_pool()
def start(self):
"""
Run the aDTN network functionality in two threads, one for sending and the other for receiving. Received
Ethernet frames are filtered for ethertype and processed if they match the 0xcafe type. The sending thread runs
a scheduler for periodic sending of aDTN packets.
"""
self._start_time = time()
self._sent_pkt_counter = 0
self._received_pkt_counter = 0
self._decrypted_pkt_counter = 0
self._prepare_sending_pool()
self._scheduler.enter(self._sending_freq, 1, self._send)
self._sniffing = True
self._thread_receive = Thread(target=self._sniff, name="ReceivingThread")
self._sending = True
self._thread_send = Thread(target=self._scheduler.run, name="SendingThread", kwargs={"blocking": True})
log_network("start-{}-{}".format(self._batch_size, self._sending_freq))
self._thread_receive.start()
sleep(5)
self._thread_send.start()
def stop(self):
"""
Stop aDTN. Make sure the two threads created at start are finished properly.
"""
self._sending = False
try:
while not self._scheduler.empty():
event = self._scheduler.queue.pop()
self._scheduler.cancel(event)
# By now the scheduler has run empty, so join the thread:
self._thread_send.join()
sleep(5)
# Now we just have to join the receiving thread to stop aDTN completely:
self._sniffing = False
self._thread_receive.join()
log_network("stop")
except ValueError: # In case the popped event started running in the meantime...
log_debug("Scheduler is not empty, retry stopping.")
self.stop() # ...call the stop function once more.
def autoBlock(config):
stdLog(u'???????', 'info')
stdLog(u'???...', 'info')
if adminLogin(config['user'], config['configFilename'][:-5] + '.co'):
stdLog(u'????', 'success')
while(True):
s = sched.scheduler(time.time, time.sleep)
tomorrow = datetime.datetime.replace(datetime.datetime.now() +
datetime.timedelta(days = 1),
hour = 0,
minute = 0,
second = 0,
microsecond = 0)
s.enter((tomorrow - datetime.datetime.now()).seconds,
1,
_block,
(config,))
s.run()
else:
stdLog(u'????', 'error')
sys.exit(1)
def run(self, debug=False):
try:
self.conn = self.__lirc_conn();
while True:
if self.active and self.__find_sync():
schedule = sched.scheduler(time.time, time.sleep)
for _, p in self.players.items():
if p.moving():
schedule.enter(Race.DELAY * p.nth, 1, self.__send, (p,))
schedule.run()
# Apply state changes as per requests from TCP server.
while not self.q.empty():
self.__handle_message(self.q.get(False))
if debug:
break
except KeyboardInterrupt:
logging.warn("Terminating Race")
finally:
if self.conn:
self.conn.close()
def __init__(self, num, pool_size):
self.pool_size = pool_size
res = self.db().find()
for one in res:
self.db().remove(one)
if num == 0:
self.run_num = 100000
else:
self.run_num = num * 2
self.passed = []
self.hot = []
self.cold = []
self.pending = []
self.count = 0
self.s2 = sched.scheduler(time.time, time.sleep)
self.s2.enter(0, 1, self.get_freeproxy_in_xdaili)
self.s2.enter(3, 1, self.grasp_proxy)
self.s2.run()
self.s1 = sched.scheduler(time.time, time.sleep)
self.s1.enter(3600, 2, self.regular.clean)
self.s1.run()
def notify_new_loans(sleep_time):
global loans_provided
try:
new_provided = api.return_active_loans()['provided']
if loans_provided:
get_id_set = lambda loans: set([x['id'] for x in loans]) # lambda to return a set of ids from the api result
loans_amount = {}
loans_info = {}
for loan_id in get_id_set(new_provided) - get_id_set(loans_provided):
loan = [x for x in new_provided if x['id'] == loan_id][0]
# combine loans with the same rate
k = 'c'+loan['currency']+'r'+loan['rate']+'d'+str(loan['duration'])
loans_amount[k] = float(loan['amount']) + (loans_amount[k] if k in loans_amount else 0)
loans_info[k] = loan
# send notifications with the grouped info
for k, amount in loans_amount.iteritems():
loan = loans_info[k]
t = "{0} {1} loan filled for {2} days at a rate of {3:.4f}%"
text = t.format(amount, loan['currency'], loan['duration'], float(loan['rate']) * 100)
log.notify(text, notify_conf)
loans_provided = new_provided
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Error during new loans notification: {0}".format(ex.message))
scheduler.enter(sleep_time, 1, notify_new_loans, (sleep_time, ))
def __init__(self):
super().__init__()
self.id_it = ID_NOT_SET
self.scheduler = sched.scheduler(time.time, time.sleep)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.setAlignment(QtCore.Qt.AlignTop)
# ..for details
### self.details_ll = QtWidgets.QLabel("-----")
### self.details_ll.setWordWrap(True)
self.question_ll = QtWidgets.QLabel("<h4>Question</h4>")
vbox.addWidget(self.question_ll)
self.question_le = QtWidgets.QLineEdit()
self.question_le.textChanged.connect(self.on_question_text_changed)
vbox.addWidget(self.question_le)
def handle_gathering_event(cls, gather_obj):
'''
This is the method called by the scheduler when an event expires
This basically takes the gatherer instance given as args to the scheduler
And calls its measurement function.
To avoid timing issues while measuring a new event for the next call is
created before the measurement call.
'''
delay_in_sec = ms_to_sec(gather_obj.delayms)
new_gather_event = cls.scheduler.enter(delay_in_sec, GATHERING_EVENT_PRIORITY,
cls.handle_gathering_event,
kwargs={
"gather_obj": gather_obj
})
gather_obj.set_event(new_gather_event)
gather_obj.measure()
digitalocean_flocker_plugin.py 文件源码
项目:digitalocean_flocker_plugin
作者: ngrewe
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _iterations_until(self, completed, update_state, argument):
""" Poll for a state change to complete callable-s
:param completed: A callable accepting argument, returning true if the
state change has successfully completed.
:param update_state: The action to execute in order to poll for a state
change
:param argument: The arguments on which to execute both the check and
the action. Probably a tuple.
:return: The number of iterations taken
:rtype: int
"""
if completed(*argument):
return 0
s = scheduler(time.time, time.sleep)
i = 0
started_at = time.time()
while not completed(*argument) and not self._has_timed_out(started_at):
delta = max(0, min(self._poll,
self._timeout - (time.time() - started_at)))
s.enter(delta, 0, update_state, argument)
s.run()
i += 1
return i
03_07_detect_inactive_machines.py 文件源码
项目:011_python_network_programming_cookbook_demo
作者: jerry-0824
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def detect_inactive_hosts(scan_hosts):
"""
Scans the network to find scan_hosts are live or dead
scan_hosts can be like 10.0.2.2-4 to cover range.
See Scapy docs for spefifying targets.
"""
global scheduler
scheduler.enter(RUN_FREQUENCY, 1, detect_inactive_hosts, (scan_hosts, ))
inactive_hosts = []
try:
ans, unans = sr(IP(dst = scan_hosts)/ICMP(), retry = 0, timeout = 1)
ans.summary(lambda(s, r) : r.sprintf("%IP.src% is alive"))
for inactive in unans:
print "%s is inactive" %inactive.dst
inactive_hosts.append(inactive.dst)
print "Total %d hosts are inactive" %(len(inactive_hosts))
except KeyboardInterrupt:
exit(0)
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
def broadlink_rm_temperature_timer(scheduler, delay, device):
scheduler.enter(delay, 1, broadlink_rm_temperature_timer, [scheduler, delay, device])
try:
temperature = str(device.check_temperature())
topic = topic_prefix + "temperature"
logging.debug("Sending RM temperature " + temperature + " to topic " + topic)
mqttc.publish(topic, temperature, qos=qos, retain=retain)
except:
logging.exception("Error")
def broadlink_sp_energy_timer(scheduler, delay, device):
scheduler.enter(delay, 1, broadlink_sp_energy_timer, [scheduler, delay, device])
try:
energy = str(device.get_energy())
topic = topic_prefix + "energy"
logging.debug("Sending SP energy " + energy + " to topic " + topic)
mqttc.publish(topic, energy, qos=qos, retain=retain)
except:
logging.exception("Error")
def __init__(self, scheduler):
Thread.__init__(self)
self.scheduler = scheduler
def run(self):
try:
self.scheduler.run()
except:
logging.exception("Error")
def schedule(timer, priority, command, conn):
s = sched.scheduler(time.time, time.sleep)
s.enter(timer, priority, execute, (command, conn))
s.run()
#from datetime import datetime, timedelta
# def db_init(db):
# db.execute("create table if not exists scheduler(id primary key, time, action)")
# db.commit()
#split = inp.split(' ')
#timer = int(inp[0])
#action = " ".join(inp[1:])
#command = 'MODE {} -b {}'.format('#uguubot',action)
#run_at = now + timedelta(hours=3)
#delay = (run_at - now).total_seconds()
# now = datetime.now()
# print now
# change = timedelta(weeks=0, days=0, hours=0, minutes=1, seconds=0)
# print change
# future = now + change
# print future
# now = datetime.now()
# run_at = now + timedelta(minutes=1)
# delay = (run_at - now).total_seconds()
# threading.Timer(delay, action('test')).start()
#command = 'PRIVMSG {} :{}'.format('#uguubot',inp)
def _refresh_flows_thread_inner(self):
def refresh_flows(scheduler):
self._register_flows(self.flow_definitions, True)
scheduler.enter(60, 1, refresh_flows, (scheduler,))
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(60, 1, refresh_flows, (scheduler,))
scheduler.run()
def addSession(self, ID, time_table, priority=1):
"""Add a session into executor.
Args:
ID (str): session ID
time_table::
{
time(float): cmd(SON),
time(float): cmd(SON),
time(float): cmd(SON),
.....
}
priority (int): the priority of execution of this session
Note:
"time" in time_table represent the delay of execution after executor begin.
When duration of certain operation is too long, whole execution will delay.
Read more about scheduler in: https://docs.python.org/2/library/sched.html
"""
if ID in self.sessions_queue:
# raise KeyError('ID [%s] already exist!' % ID)
logger.warning('ID [%s] already exist in executor\'s session queue!' % ID)
logger.warning('New operation will overwrite old one')
time_table = {t*self.time_scale_factor: time_table[t] for t in time_table}
self.exec_time_cache[ID] = []
self.sessions_queue[ID] = time_table
cmd_type = time_table.values()[0].keys()[0]
if cmd_type in self.type_cache:
self.type_cache[cmd_type].append(ID)
for t in time_table:
self.sche.enter(t+3, priority, self.runCommand, [ID, time_table[t]])
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def __init__(self, timef=time.time, delayf=time.sleep):
# Declaration
self.__sched_obj = None
# Initialization
self.__sched_obj = sched.scheduler(timef, delayf)