def handle(self, *args, **options):
logger.info("Getting ready to trawl Poloniex...")
schedule.every(1).minutes.do(pull_poloniex_data)
# @Alex
# run resampling for all periods and calculate indicator values
for hor_period in PERIODS_LIST:
schedule.every(hor_period / time_speed).minutes.do(_resample_then_metrics, {'period': hor_period})
keep_going = True
while keep_going:
try:
schedule.run_pending()
time.sleep(1)
except Exception as e:
logger.debug(str(e))
logger.info("Poloniex Trawl shut down.")
keep_going = False
python类every()的实例源码
def mine(self, query, minePeriod, requestFrequency, analyzeFrequency, requestAmount = 50, similarityCutoff = 90):
try:
self.query = query
self.cutoff = similarityCutoff
self.amount = requestAmount
startStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
schedule.every(requestFrequency).seconds.do(self.requestTweets)
schedule.every(analyzeFrequency).seconds.do(self.analyzeGroup)
end = time()+minePeriod
while time() <= end:
schedule.run_pending()
endStr = strftime("[%Y-%m-%d %H:%M:%S]", localtime())
print("Mine complete from\n" + startStr +" - " + endStr +"\n")
except Exception as e:
print(e)
def activate(self):
self.run_timer = True
try:
with open(self.plugin_config.motd_file, "r", encoding="utf8") as f:
self.motds = json.load(f)
schedule.every().day.at("00:00").do(self._display_motd)
asyncio.ensure_future(self._run_motd())
except FileNotFoundError:
with open(self.plugin_config.motd_file, "w", encoding="utf8") as f:
self.motds = {}
f.write("{}")
except json.decoder.JSONDecodeError:
self.logger.exception(f"Could not decode {self.plugin_config.motd_file}! ", exc_info=True)
# This is stupid
self.valid_months = {
"January", "February", "March", "April", "May", "June", "July",
"August", "September", "October", "November", "December", "Any"
}
self.valid_days = {
"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", "Any",
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"
}
def handle(self, *args, **kwargs):
logger.info("%s - starting jobs schedule" % (__name__))
try:
'''
schedule.every().hour.do(job_update_entities)
schedule.every().hour.do(job_update_clients)
schedule.every().hour.do(job_update_checks)
schedule.every().hour.do(job_update_trends)
#schedule.every(10).minutes.do(job_update_events)
'''
schedule.every(settings.CACHE_ENTITY_TTL).seconds.do(job_update_entities)
schedule.every(settings.CACHE_CLIENT_TTL).seconds.do(job_update_clients)
schedule.every(settings.CACHE_TRENDS_TTL).seconds.do(job_update_trends)
while True:
schedule.run_pending()
sleep(1)
except KeyboardInterrupt:
logger.info("%s - user signal exit!" % (__name__))
exit(0)
def auto_delete_delivery():
def delete_job():
from xiaodi.api.mysql import session_scope, Task
now = datetime.utcnow()
for deadline, id_ in TaskDepot.scan_delete_task():
deadline, id_ = tranform_utctime(deadline), int(id_)
id_, deadline = int(id_), tranform_utctime(deadline)
if now < deadline:
break
with session_scope() as session:
delivery = session.query(Task).filter(Task.id == id_).first()
user = gen_temp_object(id=delivery.puser_id)
DeliveryManager.delete(user, delivery, session=session)
LOG.info('auto delete task finished')
schedule.every(5).minutes.do(delete_job)
def auto_finish_delivery():
def finish_job():
from xiaodi.api.mysql import session_scope, Task
now = datetime.utcnow()
for id_, deadline in TaskDepot.scan_finish_task():
deadline, id_ = tranform_utctime(deadline), int(id_)
if now < deadline:
break
with session_scope() as session:
delivery = session.query(Task).filter(Task.id == id_).first()
user = gen_temp_object(id=delivery.puser_id)
DeliveryManager.finish(user, delivery, session=session)
LOG.info('auto finish task finished')
schedule.every(30).minutes.do(finish_job)
def madness(self):
def fast():
self.pyEfi.redisDb.incr('pyefi:eye:fast')
def pollRateTest():
self.pollRateRedis('pyefi:eye:fast')
schedule.every(1).seconds.do(pollRateTest)
schedule.every(0.2).seconds.do(fast)
while 1:
schedule.run_pending()
# This is the scheduler tick rate,
# nothing will poll faster than this.
sleep(0.005)
def main(greeting=True):
bot = Fanbot.create_from_modules(secrets, compliments)
if greeting:
bot.hello_world()
try:
# Initially hardcoding a schedule. Could be swapped out later
# PLACE YOUR CUSTOM SCHEDULES HERE. SEE SCHEDULE MODULE DOCUMENTATION.
schedule.every(30).minutes.do(bot.respond_to_tweets)
schedule.every().day.at("10:30").do(bot.post_compliment)
while True:
schedule.run_pending()
# You can tune the sleep length too. Should be roughly the same
# as the most frequently scheduled job above
time.sleep(30*60) # seconds
except KeyboardInterrupt:
if greeting:
bot.goodbye()
def polling():
for m in sys.modules['thunderbolt100k.widgets'].modules:
if m.endswith('__init__.py'):
continue
widget_name = os.path.basename(m).replace('.py', '')
widget = getattr(sys.modules['thunderbolt100k.widgets'], widget_name)
if constants.CONFIG.get('{0}_INTERVAL'.format(widget.__name__.upper())):
interval = int(constants.CONFIG.get('{0}_INTERVAL'.format(widget_name.upper())))
else:
interval = 5 # Default is 5 min if not specified
schedule.every(interval).minutes.do(widget_main, widget)
widget_main(widget) # Run the function instantly and then schedule
while True:
schedule.run_pending()
time.sleep(60)
def dynamically_scrape_data(filename, link_list, interval, num_retries = 10):
"""
Repeatedly runs the scraper every time the specified interval has passed
and continuously appends the data to a file.
"""
def job():
scrape_all_data_from_all_featured_products(filename,
link_list,
num_retries)
job()
schedule.every(interval).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
def clean_links_and_dynamically_scrape(filename, interval, num_retries = 10):
"""
Repeatedly updates the link list and runs the scraper every time the
specified interval has passed and continuously appends the data to a file.
"""
def job():
clean_links_and_scrape(filename, num_retries)
job()
schedule.every(interval).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(30)
print "Dynamic scraping finished"
def prepare_jobs(self, jobs):
suffixed_names = {
'week': 'weekly',
'day': 'daily',
'hour': 'hourly',
'minute': 'minutes',
'second': 'seconds',
}
for job in jobs:
if not job.enabled:
continue
interval_name = job.time_unit.lower()
if job.interval > 0: # There can't be a job less than 0 (0 minutes? 0 seconds?)
plural_interval_name = interval_name + 's'
d = getattr(schedule.every(job.interval), plural_interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s.", suffixed_names[interval_name], job.name)
elif interval_name == 'day':
schedule.every().day.at(job.at_time).do(self.run_job, job)
Log.info(" Loading time-based job: " + job.name)
else:
d = getattr(schedule.every(), interval_name)
d.do(self.run_job, job)
Log.info(" Loading %s job: %s", interval_name, job.name)
def display_weather(self):
if not self._scheduled:
# Start a fresh thread for weather updates
cg.send('Starting update_weather()')
self.update_weather()
self._checkSchedule = True
self._scheduled = True
schedule.every(5).minutes.do(self.update_weather)
cg.thread(self.run_sched) # Start a separate thread
elif self._checkSchedule:
cg.send('Error: update_weather() is already running')
elif not self._checkSchedule:
# Allow the weather updates to continue
cg.send('Toggling weather updates back on')
self._checkSchedule = True
cg.thread(self.run_sched) # Start a separate thread
else:
cg.send('No appropriate display_weather() action...')
def main():
config.process_args("HEALTH",
default_config_path=cfg.DEFAULT_CONF_PATH,
defaults=cfg.DEFAULT,
validation_schema=cfg.SCHEMA)
# Init Elastic index in backend
for src in CONF["sources"]:
es.ensure_index_exists(CONF["backend"]["elastic"], src["region"])
# Setup periodic job that does aggregation magic
run_every_min = CONF["config"]["run_every_minutes"]
schedule.every(run_every_min).minutes.do(job)
job()
while True:
schedule.run_pending()
time.sleep(1)
def enable_housekeeping(run_interval=3600):
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
@staticmethod
def run():
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(run_interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
schedule.every(6).hours.do(LoggingNight.garbage_collect_cache)
def start(job):
schedule.every(30).minutes.do(job)
threading.Thread(target=worker).start()
def build_schedule():
print('Starting sqlcron. Current time: {}'
.format(str(datetime.now())))
interval = int(os.getenv('INTERVAL', '1'))
unit = os.getenv('UNIT', 'day')
time_of_day = os.getenv('TIME')
evaluation_string = 'schedule.every(interval).' + unit
if time_of_day:
evaluation_string += '.at(time_of_day)'
evaluation_string += '.do(run)'
eval(evaluation_string)
def download_elections():
call_command('download_elections')
## check if there's an Election today; if so, start checking every minute whether to set live and start import
def start_update_engagement():
def update_job():
with DBSession() as session:
for engagement in session.query(Engagement).all():
en = json.loads(engagement.engagement)
en.append(0)
engagement.engagement = json.dumps(en)
session.add(engagement)
session.commit()
schedule.every().day.at('00:00').do(update_job)
def deferred(self):
"""Called from the thread, schedules pending tasks."""
schedule.every(10).minutes.do(self._add_rssfeedparsertask)
schedule.every().minute.do(self._add_episodedownloadtask)
# Add the RSSFeedParserTask immediately so we don't waste 10 minutes
self._add_rssfeedparsertask()
while not self._stop_event.is_set():
schedule.run_pending()
sleep(1)
def main(period=None):
"""Constantly check services availability.
This runs infinite availability check with given period.
:param period: period in seconds
"""
if not config.get_config().get("regions"):
LOG.error("No regions configured. Quitting.")
return 1
period = period or config.get_config().get("period", 60)
if SERVICE_CONN_TIMEOUT + SERVICE_READ_TIMEOUT > period:
LOG.error("Period can not be lesser than timeout, "
"otherwise threads could crowd round.")
return 1
backend = config.get_config().get("backend")
if backend["type"] != "elastic":
LOG.error("Unexpected backend: %(type)s" % backend)
return 1
if not storage.get_elasticsearch(check_availability=True):
LOG.error("Failed to set up Elasticsearch")
return 1
LOG.info("Start watching with period %s seconds" % period)
schedule.every(period).seconds.do(watch_services)
while True:
time.sleep(1)
schedule.run_pending()
def main():
'''with open('log.csv', 'r+') as logfile:
get_filing_list(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6].lower() == 'true', sys.argv[7].lower() == 'true', None)'''
# run the scraping job every 6 hours
# TODO: move scraping interval to environment var
schedule.every(6).hours.do(get_filing_list, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[7].lower() == 'true', sys.argv[7].lower() == 'true', None)
while True:
schedule.run_pending()
time.sleep(1)
def schedule_jobs(api, api_twitter):
schedule.every(settings.PULL_REQUEST_POLLING_INTERVAL_SECONDS).seconds.do(
lambda: poll_pull_requests(api, api_twitter))
schedule.every(settings.ISSUE_COMMENT_POLLING_INTERVAL_SECONDS).seconds.do(
lambda: poll_read_issue_comments(api))
schedule.every(settings.ISSUE_CLOSE_STALE_INTERVAL_SECONDS).seconds.do(
lambda: poll_issue_close_stale(api))
# Call manually the first time, so that we are guaranteed this will run
# at least once in the interval...
poll_issue_close_stale(api)
light_scheduler.py 文件源码
项目:Python-Programming-with-Raspberry-Pi
作者: PacktPublishing
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def init_schedule(self):
# set the schedule
schedule.every().day.at(self.start_time).do(self.on)
schedule.every().day.at(self.stop_time).do(self.off)
def refresher_run(url=None, fetcher=None, database=None, checker=None):
refresher = Refresher(url_prefix=url, fetcher=fetcher, database=database, checker=checker)
schedule.every(5).minutes.do(refresher.main)
while True:
schedule.run_pending()
time.sleep(5)
def job():
trello = TrelloApi('API-KEY-HERE')
#get cards from trello
cards = trello.lists.get_card('LIST-ID-HERE')
# open premade card_names main file in append mode (a+) to keep track of card names
card_names = open('./card_names.txt', 'a+')
# open premade tasks file in append mode (a+) to use for printer
tasks = open('./tasks.txt', 'a+')
# iterate over each card check that card name is NOT in the card_names file
for obj in cards:
# if name is in the card_names file do nothing
if obj['id'] in open('card_names.txt').read():
print 'already in the master list'
pass
else: # add name into task file and the card_names file
card_names.write(obj['id']+'\n')
tasks.write('\n\n\n\n\n'+ obj['name'] + '\n\n\n\n\n:)')
print "yo i'm printing"
# print task file if there's content
if os.stat('tasks.txt').st_size > 0:
call(['lpr','-o','fit-to-page', 'tasks.txt'])
# clear task file
f= open('tasks.txt', 'r+')
f.truncate()
# schedule job to run the code every 2 seconds
def start(self):
"""Sets up the schedule based on the configuration file."""
schedule.every(self.configuration.data['frequency']).seconds.do(self.execute)
def backgroundThread():
schedule.every().day.at("12:00").do(update)
#schedule.every(5).minutes.do(update)
kill_update = threading.Event()
class SearchUpdateThread(threading.Thread):
def run(self):
while not kill_update.is_set():
schedule.run_pending()
time.sleep(1*60*60) #Every hour.
searchThread = SearchUpdateThread()
searchThread.setDaemon(True)
searchThread.start()
def send_home_forecast(self) -> None:
def __periodically_send_morning_forecast__() -> None:
__send_home_forecast__("sending periodic morning weather forecast...",
lambda weathers: list(filter(lambda w: w.date == datetime.date.today(), weathers)))
def __periodically_send_evening_forecast__() -> None:
__send_home_forecast__("sending periodic evening weather forecast...",
lambda weathers: list(filter(lambda w: w.date != datetime.date.today(), weathers)))
def __send_home_forecast__(logging_header: str, weathers_predicate) -> None:
logging.debug(logging_header)
forecast = self._weather_service.get_forecast(self._home_settings.full_name, self._measurement_system)
forecast.weathers = weathers_predicate(forecast.weathers)
for channel in self._discord_client.get_all_channels():
if self.__should_send_forecast__(channel):
try:
asyncio.get_event_loop().run_until_complete(
SendForecastDiscordCommand(self._discord_client, channel, forecast).execute())
except discord.HTTPException:
msg_template = "failed to send home weather forecast (@{}) to channel {}.{}"
msg = msg_template.format(self._home_settings.full_name, channel.server.name, channel.name)
logging.exception(msg)
if self._home_settings.morning_forecast_time is not None:
schedule.every().day.at(self._home_settings.morning_forecast_time)\
.do(__periodically_send_morning_forecast__)
if self._home_settings.evening_forecast_time is not None:
schedule.every().day.at(self._home_settings.evening_forecast_time)\
.do(__periodically_send_evening_forecast__)
while True:
schedule.run_pending()
await asyncio.sleep(1)
def update():
global bike_info
try:
bike_info = update_data(to_update)
except Exception as e:
print('something bad happened: ' + str(e))
# schedule execution of update every minute