def handle(self, *args, **options):
logger.info("Getting ready to trawl Poloniex...")
schedule.every(1).minutes.do(pull_poloniex_data)
# @Alex
# run resampling for all periods and calculate indicator values
for hor_period in PERIODS_LIST:
schedule.every(hor_period / time_speed).minutes.do(_resample_then_metrics, {'period': hor_period})
keep_going = True
while keep_going:
try:
schedule.run_pending()
time.sleep(1)
except Exception as e:
logger.debug(str(e))
logger.info("Poloniex Trawl shut down.")
keep_going = False
python类run_pending()的实例源码
def mine(self, query, minePeriod, requestFrequency, analyzeFrequency, requestAmount = 50, similarityCutoff = 90):
try:
self.query = query
self.cutoff = similarityCutoff
self.amount = requestAmount
startStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
schedule.every(requestFrequency).seconds.do(self.requestTweets)
schedule.every(analyzeFrequency).seconds.do(self.analyzeGroup)
end = time()+minePeriod
while time() <= end:
schedule.run_pending()
endStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
print("Mine complete from\n" + startStr +" - " + endStr +"\n")
except Exception as e:
print(e)
def handle(self, *args, **kwargs):
logger.info("%s - starting jobs schedule" % (__name__))
try:
'''
schedule.every().hour.do(job_update_entities)
schedule.every().hour.do(job_update_clients)
schedule.every().hour.do(job_update_checks)
schedule.every().hour.do(job_update_trends)
#schedule.every(10).minutes.do(job_update_events)
'''
schedule.every(settings.CACHE_ENTITY_TTL).seconds.do(job_update_entities)
schedule.every(settings.CACHE_CLIENT_TTL).seconds.do(job_update_clients)
schedule.every(settings.CACHE_TRENDS_TTL).seconds.do(job_update_trends)
while True:
schedule.run_pending()
sleep(1)
except KeyboardInterrupt:
logger.info("%s - user signal exit!" % (__name__))
exit(0)
def schedule_thread_worker(schedule, logger):
''' schedule thread, takes care of running processes in the future '''
global CTRL_C
logLine = 'starting thread_worker'
logger.debug(logLine)
while not CTRL_C['STOP']:
#print('looping', CTRL_C)
sys.stdout.flush()
schedule.run_pending()
logLine = 'scheduler woke {0}'.format(
threading.current_thread().getName())
time.sleep(1)
logger.debug(logLine)
logger.debug('Threading stop:{0}'.format(
threading.current_thread().getName()))
sys.exit()
def madness(self):
def fast():
self.pyEfi.redisDb.incr('pyefi:eye:fast')
def pollRateTest():
self.pollRateRedis('pyefi:eye:fast')
schedule.every(1).seconds.do(pollRateTest)
schedule.every(0.2).seconds.do(fast)
while 1:
schedule.run_pending()
# This is the scheduler tick rate,
# nothing will poll faster than this.
sleep(0.005)
def main(greeting=True):
bot = Fanbot.create_from_modules(secrets, compliments)
if greeting:
bot.hello_world()
try:
# Initially hardcoding a schedule. Could be swapped out later
# PLACE YOUR CUSTOM SCHEDULES HERE. SEE SCHEDULE MODULE DOCUMENTATION.
schedule.every(30).minutes.do(bot.respond_to_tweets)
schedule.every().day.at("10:30").do(bot.post_compliment)
while True:
schedule.run_pending()
# You can tune the sleep length too. Should be roughly the same
# as the most frequently scheduled job above
time.sleep(30*60) # seconds
except KeyboardInterrupt:
if greeting:
bot.goodbye()
def polling():
for m in sys.modules['thunderbolt100k.widgets'].modules:
if m.endswith('__init__.py'):
continue
widget_name = os.path.basename(m).replace('.py', '')
widget = getattr(sys.modules['thunderbolt100k.widgets'], widget_name)
if constants.CONFIG.get('{0}_INTERVAL'.format(widget.__name__.upper())):
interval = int(constants.CONFIG.get('{0}_INTERVAL'.format(widget_name.upper())))
else:
interval = 5 # Default is 5 min if not specified
schedule.every(interval).minutes.do(widget_main, widget)
widget_main(widget) # Run the function instantly and then schedule
while True:
schedule.run_pending()
time.sleep(60)
def run_schedule(interval=1):
"""
Continuously run scheduled jobs. Taken from
https://github.com/mrhwick/schedule/blob/8e1d5f806d34d9ecde3c068490c8d1513ed774c3/schedule/__init__.py#L63
"""
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
def __init__(self, app):
super().__init__()
self.app = app
def run(self):
with self.app.app_context():
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread(app)
continuous_thread.start()
return cease_continuous_run
def check_forever(checkers):
global reload_conf_pending, interrupted, open_backdoor
schedule_checks(checkers)
logger.info("Starting infinite loop")
while not reload_conf_pending:
if interrupted:
break
if open_backdoor:
open_backdoor = False
code.interact(
banner="Kibitzr debug shell",
local=locals(),
)
schedule.run_pending()
if interrupted:
break
time.sleep(1)
def dynamically_scrape_data(filename, link_list, interval, num_retries = 10):
"""
Repeatedly runs the scraper every time the specified interval has passed
and continuously appends the data to a file.
"""
def job():
scrape_all_data_from_all_featured_products(filename,
link_list,
num_retries)
job()
schedule.every(interval).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
def clean_links_and_dynamically_scrape(filename, interval, num_retries = 10):
"""
Repeatedly updates the link list and runs the scraper every time the
specified interval has passed and continuously appends the data to a file.
"""
def job():
clean_links_and_scrape(filename, num_retries)
job()
schedule.every(interval).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
def main():
try:
smart_module = SmartModule()
smart_module.asset.load_asset_info()
smart_module.load_site_data()
smart_module.discover()
smart_module.load_influx_settings()
except Exception as excpt:
Log.exception("Error initializing Smart Module. %s.", excpt)
while 1:
try:
time.sleep(0.5)
schedule.run_pending()
except Exception as excpt:
Log.exception("Error in Smart Module main loop. %s.", excpt)
break
def run(slack_client):
'''
Main event loop.
'''
if slack_client.rtm_connect():
while True:
events = slack_client.rtm_read()
for event in events:
try:
process_event(slack_client, event)
except ApiCallException as api_call_exception:
logging.warning(api_call_exception)
schedule.run_pending()
time.sleep(SLEEP_TIME)
else:
raise ConnectionError()
def start(self):
try:
self.modem.check()
self.set_os_time()
Thread(target=self.track_detectors).start()
Thread(target=self.arming_alarm).start()
Thread(target=self.start_photos_scheduler).start()
while True:
schedule.run_pending()
time.sleep(1)
# Tests
# self.sms.send('519585106', 'test')
except Exception as ex:
self.logger.error('Raptor: {0}'.format(ex))
pass
def main():
config.process_args("HEALTH",
default_config_path=cfg.DEFAULT_CONF_PATH,
defaults=cfg.DEFAULT,
validation_schema=cfg.SCHEMA)
# Init Elastic index in backend
for src in CONF["sources"]:
es.ensure_index_exists(CONF["backend"]["elastic"], src["region"])
# Setup periodic job that does aggregation magic
run_every_min = CONF["config"]["run_every_minutes"]
schedule.every(run_every_min).minutes.do(job)
job()
while True:
schedule.run_pending()
time.sleep(1)
def enable_housekeeping(run_interval=3600):
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
@staticmethod
def run():
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(run_interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
schedule.every(6).hours.do(LoggingNight.garbage_collect_cache)
def worker():
while True:
schedule.run_pending()
time.sleep(schedule.idle_seconds())
def run_schedule():
while True:
sleep_time = schedule.next_run() - datetime.now()
print('Next job to run at {}, which is {} from now'
.format(str(schedule.next_run()), str(sleep_time)))
# Sleep an extra second to make up for microseconds
time.sleep(max(1, sleep_time.seconds + 1))
schedule.run_pending()
def validate_loop():
"""
Schedule loop, need for schedule module
"""
while True:
schedule.run_pending()
time.sleep(1)
def __start_schedule_deamon():
def schedule_run():
while True:
schedule.run_pending()
time.sleep(1)
t = threading.Thread(target=schedule_run)
t.setDaemon(True)
t.start()
def run_scheduler():
global scheduler_running
global scheduler_paused
global scheduler_done
while scheduler_running:
if not scheduler_paused:
schedule.run_pending()
sleep(1)
def deferred(self):
"""Called from the thread, schedules pending tasks."""
schedule.every(10).minutes.do(self._add_rssfeedparsertask)
schedule.every().minute.do(self._add_episodedownloadtask)
# Add the RSSFeedParserTask immediately so we don't waste 10 minutes
self._add_rssfeedparsertask()
while not self._stop_event.is_set():
schedule.run_pending()
sleep(1)
def main(period=None):
"""Constantly check services availability.
This runs infinite availability check with given period.
:param period: period in seconds
"""
if not config.get_config().get("regions"):
LOG.error("No regions configured. Quitting.")
return 1
period = period or config.get_config().get("period", 60)
if SERVICE_CONN_TIMEOUT + SERVICE_READ_TIMEOUT > period:
LOG.error("Period can not be lesser than timeout, "
"otherwise threads could crowd round.")
return 1
backend = config.get_config().get("backend")
if backend["type"] != "elastic":
LOG.error("Unexpected backend: %(type)s" % backend)
return 1
if not storage.get_elasticsearch(check_availability=True):
LOG.error("Failed to set up Elasticsearch")
return 1
LOG.info("Start watching with period %s seconds" % period)
schedule.every(period).seconds.do(watch_services)
while True:
time.sleep(1)
schedule.run_pending()
def _run_motd(self):
now = datetime.datetime.now()
await asyncio.sleep(60 - now.second)
while self.run_timer:
schedule.run_pending()
await asyncio.sleep(60)
def main():
'''with open('log.csv', 'r+') as logfile:
get_filing_list(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', None)'''
# run the scraping job every 6 hours
# TODO: move scraping interval to environment var
schedule.every(6).hours.do(get_filing_list, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[7].lower() == 'true', sys.argv[7].lower() == 'true', None)
while True:
schedule.run_pending()
time.sleep(1)
def refresher_run(url=None, fetcher=None, database=None, checker=None):
refresher = Refresher(url_prefix=url, fetcher=fetcher, database=database, checker=checker)
schedule.every(5).minutes.do(refresher.main)
while True:
schedule.run_pending()
time.sleep(5)
def start(self):
self.agent.start()
self.logger.info('Starting monitor agent...')
while not self.stop:
schedule.run_pending()
time.sleep(self.configuration.data['frequency'])
def backgroundThread():
schedule.every().day.at("12:00").do(update)
#schedule.every(5).minutes.do(update)
kill_update = threading.Event()
class SearchUpdateThread(threading.Thread):
def run(self):
while not kill_update.is_set():
schedule.run_pending()
time.sleep(1*60*60) #Every hour.
searchThread = SearchUpdateThread()
searchThread.setDaemon(True)
searchThread.start()
def send_home_forecast(self) -> None:
def __periodically_send_morning_forecast__() -> None:
__send_home_forecast__("sending periodic morning weather forecast...",
lambda weathers: list(filter(lambda w: w.date == datetime.date.today(), weathers)))
def __periodically_send_evening_forecast__() -> None:
__send_home_forecast__("sending periodic evening weather forecast...",
lambda weathers: list(filter(lambda w: w.date != datetime.date.today(), weathers)))
def __send_home_forecast__(logging_header: str, weathers_predicate) -> None:
logging.debug(logging_header)
forecast = self._weather_service.get_forecast(self._home_settings.full_name, self._measurement_system)
forecast.weathers = weathers_predicate(forecast.weathers)
for channel in self._discord_client.get_all_channels():
if self.__should_send_forecast__(channel):
try:
asyncio.get_event_loop().run_until_complete(
SendForecastDiscordCommand(self._discord_client, channel, forecast).execute())
except discord.HTTPException:
msg_template = "failed to send home weather forecast (@{}) to channel {}.{}"
msg = msg_template.format(self._home_settings.full_name, channel.server.name, channel.name)
logging.exception(msg)
if self._home_settings.morning_forecast_time is not None:
schedule.every().day.at(self._home_settings.morning_forecast_time)\
.do(__periodically_send_morning_forecast__)
if self._home_settings.evening_forecast_time is not None:
schedule.every().day.at(self._home_settings.evening_forecast_time)\
.do(__periodically_send_evening_forecast__)
while True:
schedule.run_pending()
await asyncio.sleep(1)
def job_monitor():
while 1:
#print('checking jobs')
schedule.run_pending()
time.sleep(5)