def after_return(self, status, retval, task_id, args, kwargs, einfo):
self.logger.debug('Task %s("%s") returned %s. Result: """%s"""', self.name, args, status, retval)
meta = kwargs.get('meta', {})
nolog = meta.get('nolog', False)
# In case of emergency log this task
if not nolog and not self.all_done:
if isinstance(retval, dict):
result = retval.copy()
else:
if einfo:
result = {'detail': str(einfo.exception)}
else:
result = {'detail': str(retval)}
if 'meta' not in result:
result['meta'] = meta
result['meta']['cb_name'] = LOGTASK
meta['task_status'] = status
meta['cleanup'] = True
t = send_task_forever(task_id, LOGTASK, nolog=nolog, args=(result, task_id), kwargs=meta,
queue=Q_MGMT, expires=None, task_id=task_id_from_task_id(task_id))
self.logger.warn('Created emergency log task %s', t.id)
python类Task()的实例源码
def __call__(self, *args, **kwargs):
try:
_t_args = _walk_obj(args, self._maybe_transform_argument)
_t_kwargs = _walk_obj(kwargs, self._maybe_transform_argument)
results = super(Task, self).__call__(*_t_args, **_t_kwargs)
if hasattr(self.request, 'girder_result_hooks'):
if not isinstance(results, tuple):
results = (results, )
results = tuple([self._maybe_transform_result(i, r)
for i, r in enumerate(results)])
return results
finally:
_walk_obj(args, self._maybe_cleanup)
_walk_obj(kwargs, self._maybe_cleanup)
def test_patch_task(self):
"""
When celery.Task is patched
we patch the __init__, apply, apply_async, and run methods
"""
# Assert base class methods are patched
self.assertIsInstance(celery.Task.__init__, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.apply, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.run, wrapt.BoundFunctionWrapper)
# Create an instance of a Task
task = celery.Task()
# Assert instance methods are patched
self.assertIsInstance(task.__init__, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.apply, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.apply_async, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.run, wrapt.BoundFunctionWrapper)
def test_unpatch_task(self):
"""
When unpatch_task is called on a patched task
we unpatch the __init__, apply, apply_async, and run methods
"""
# Assert base class methods are patched
self.assertIsInstance(celery.Task.__init__, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.apply, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper)
self.assertIsInstance(celery.Task.run, wrapt.BoundFunctionWrapper)
# Unpatch the base class
unpatch_task(celery.Task)
# Assert the methods are no longer wrapper
self.assertFalse(isinstance(celery.Task.__init__, wrapt.BoundFunctionWrapper))
self.assertFalse(isinstance(celery.Task.apply, wrapt.BoundFunctionWrapper))
self.assertFalse(isinstance(celery.Task.apply_async, wrapt.BoundFunctionWrapper))
self.assertFalse(isinstance(celery.Task.run, wrapt.BoundFunctionWrapper))
def on_failure(self,
exc: Exception,
task_id: int,
args: list,
kwargs: dict,
einfo: ExceptionInfo): # pragma: no cover
logger = get_logger('celery.worker')
warning = ('Task {task}[{t_id}] had unexpected failure:\n'
'\nargs: {args}\n\nkwargs: {kwargs}\n'
'\n{einfo}').format(task=self.name,
t_id=task_id,
args=args,
kwargs=kwargs,
einfo=einfo)
logger.warning(warning)
super().on_failure(exc, task_id, args, kwargs, einfo)
def make_task(self):
"""Create an celery task responsible of running reporter covers
asynchronously.
:returns: An celery task.
:rtype: :class:`~celery.Task`
"""
class CallbackTask(Task):
def on_success(task, retval, task_id, args, kwargs):
schedule = self.backend.get_schedule(args[0])
self.on_cover_success(schedule, retval)
def on_failure(task, exc, task_id, args, kwargs, einfo):
schedule = self.backend.get_schedule(args[0])
self.on_cover_failure(schedule, exc)
# make `run_cover` method into a celery task
run_cover = self._make_run_cover()
return self.celery.task(bind=True, base=CallbackTask)(run_cover)
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Handler called after the task returns.
:param status: Current task state.
:param retval: Task return value/exception.
:param task_id: Unique id of the task.
:param args: Original arguments for the task that returned.
:param kwargs: Original keyword arguments for the task that returned.
:param einfo: ExceptionInfo instance, containing the traceback (if any).
:return: None
"""
logger.debug(
"In %s.after_return: %s, %s, %s, %s."
% (self.__class__.__name__, status, retval, task_id, einfo)
)
self.__decrement_request_tags()
super(WebSightBaseTask, self).after_return(status, retval, task_id, args, kwargs, einfo)
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
link=None, link_error=None, shadow=None, **options):
# Pass girder related job information through to
# the signals by adding this information to options['headers']
# This sets defaults for reserved_options based on the class defaults,
# or values defined by the girder_job() dectorator
headers = {
'girder_job_title': self._girder_job_title,
'girder_job_type': self._girder_job_type,
'girder_job_public': self._girder_job_public,
'girder_job_handler': self._girder_job_handler,
'girder_job_other_fields': self._girder_job_other_fields,
}
# Certain keys may show up in either kwargs (e.g. via
# .delay(girder_token='foo') or in options (e.g.
# .apply_async(args=(), kwargs={}, girder_token='foo') For
# those special headers, pop them out of kwargs or options and
# put them in headers so they can be picked up by the
# before_task_publish signal.
for key in self.reserved_headers + self.reserved_options:
if kwargs is not None and key in kwargs:
headers[key] = kwargs.pop(key)
if key in options:
headers[key] = options.pop(key)
if 'headers' in options:
options['headers'].update(headers)
else:
options['headers'] = headers
return super(Task, self).apply_async(
args=args, kwargs=kwargs, task_id=task_id, producer=producer,
link=link, link_error=link_error, shadow=shadow, **options)
def is_revoked(task):
"""
Utility function to check is a task has been revoked.
:param task: The task.
:type task: celery.app.task.Task
:return True, if this task is in the revoked list for this worker, False
otherwise.
"""
return task.request.id in _revoked_tasks(task)
def task_id_sent_handler(sender=None, result = None, **kwargs):
# information about task are located in headers for task messages
# using the task protocol version 2.
print(type(result))
print(result)
print('\n ***Task Success***')
url = 'http://localhost:8000'
requests.post(url)
# @task_revoked.connect
# def my_task_revoked_handler(sender=None, body=None, *args, **kwargs):
# print(kwargs['request'].args)
def setUp(self):
self.broker_url = 'redis://127.0.0.1:{port}/0'.format(port=REDIS_CONFIG['port'])
self.tracer = get_dummy_tracer()
self.pin = Pin(service='celery-test', tracer=self.tracer)
patch_app(celery.Celery, pin=self.pin)
patch_task(celery.Task, pin=self.pin)
def tearDown(self):
unpatch_app(celery.Celery)
unpatch_task(celery.Task)
def test_task_init(self):
"""
Creating an instance of a patched celery.Task
will yield a patched instance
"""
task = celery.Task()
# Assert instance methods are patched
self.assertIsInstance(task.__init__, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.apply, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.apply_async, wrapt.BoundFunctionWrapper)
self.assertIsInstance(task.run, wrapt.BoundFunctionWrapper)
def __call__(self, cmd, *args, **kwargs):
self.logger = get_task_logger('que.tasks')
self.all_done = False
task = 'Task %s("%s")' % (self.name, cmd)
lock = kwargs.pop('lock', False)
block = kwargs.pop('block', None)
check_user_tasks = kwargs.pop('check_user_tasks', False)
tid = self.request.id
blocked = False
if lock:
task_lock = TaskLock(lock, desc=task, logger=self.logger)
else:
task_lock = NoLock()
try:
if check_user_tasks: # Wait for task to appear in UserTasks - bug #chili-618
UserTasks.check(tid, logger=self.logger) # Will raise an exception in case the task does not show up
task_lock.task_check() # Will raise an exception in case the lock does not exist
if block and redis.exists(block):
blocked = True
self.retry(exc=TaskRetry(None)) # Will raise special exception
return super(MetaTask, self).__call__(cmd, *args, **kwargs) # run()
finally:
if not blocked: # Lock must _not_ be deleted when failing on retry
task_lock.delete()
def test_run(django_scheduler, django_schedule):
django_scheduler.set_task()
assert(isinstance(django_scheduler.celery_task, celery.Task))
def after_return(self, *args, **kwargs):
logger.debug('Task returned: {0!r}'.format(self.request))
def send_email(self: celery.Task, to_name: str, to_addr: str, subject: str, text: str, html: str):
"""Send an email to a single address."""
# WARNING: when changing the signature of this function, also change the
# self.retry() call below.
cfg = current_app.config
# Construct the message
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = Address(cfg['MAIL_DEFAULT_FROM_NAME'], addr_spec=cfg['MAIL_DEFAULT_FROM_ADDR'])
msg['To'] = (Address(to_name, addr_spec=to_addr),)
msg.set_content(text)
msg.add_alternative(html, subtype='html')
# Refuse to send mail when we're testing.
if cfg['TESTING']:
log.warning('not sending mail to %s <%s> because we are TESTING', to_name, to_addr)
return
log.info('sending email to %s <%s>', to_name, to_addr)
# Send the message via local SMTP server.
try:
with smtplib.SMTP(cfg['SMTP_HOST'], cfg['SMTP_PORT'], timeout=cfg['SMTP_TIMEOUT']) as smtp:
if cfg.get('SMTP_USERNAME') and cfg.get('SMTP_PASSWORD'):
smtp.login(cfg['SMTP_USERNAME'], cfg['SMTP_PASSWORD'])
smtp.send_message(msg)
except (IOError, OSError) as ex:
log.exception('error sending email to %s <%s>, will retry later: %s',
to_name, to_addr, ex)
self.retry((to_name, to_addr, subject, text, html), countdown=cfg['MAIL_RETRY'])
else:
log.info('mail to %s <%s> successfully sent', to_name, to_addr)
def _config_celery(self):
"""Disables Celery by entirely mocking it.
Without this, actual Celery tasks will be created while the tests are running.
"""
from celery import Celery, Task
self.celery = unittest.mock.MagicMock(Celery)
def fake_task(*task_args, bind=False, **task_kwargs):
def decorator(f):
def delay(*args, **kwargs):
if bind:
return f(decorator.sender, *args, **kwargs)
else:
return f(*args, **kwargs)
f.delay = delay
f.si = unittest.mock.MagicMock()
f.s = unittest.mock.MagicMock()
return f
if bind:
decorator.sender = unittest.mock.MagicMock(Task)
return decorator
self.celery.task = fake_task
def apply_async(self, *args, **kwargs):
"""
Override the default Celery Task apply_async to allow for the passing of
tags to tasks.
:param args: Positional arguments.
:param kwargs: Keyword arguments.
:return: The results of calling super.apply_async.
"""
tags = kwargs.get("tags", [])
headers = kwargs.get("headers", {})
retries = kwargs.get("retries", 0)
task_args, task_kwargs = args
tags.extend(self.__get_tags_from_task_kwargs(task_kwargs))
try:
del kwargs["chord"]["options"]["producer"]
except (TypeError, KeyError):
pass
if tags is not None and not isinstance(tags, list):
raise ValueError(
"Got an unexpected value for the tags keyword argument to apply_async: %s."
% (tags,)
)
if len(tags) > 0:
if retries > 0:
logger.debug(
"Not incrementing tags %s as apply_async resulted from retry."
% (tags,)
)
else:
self.__increment_tags(tags)
headers["tags"] = tags
kwargs["headers"] = headers
return super(WebSightBaseTask, self).apply_async(*args, **kwargs)
def start_time(self):
"""
Get the time at which this task started. Note that this relies on the task_prerun_handler
signal hook in app.py.
:return: the time at which this task started. Note that this relies on the
task_prerun_handler signal hook in app.py.
"""
if self._start_time is None:
logger.warning(
"Start time not set! Task was %s (ID %s)."
% (self.name, self.request.id)
)
return self._start_time
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Ensures that any database session opened by the Task is closed.
:param status: Current task state.
:param retval: Task return value/exception.
:param task_id: Unique id of the task.
:param args: Original arguments for the task that returned.
:param kwargs: Original keyword arguments for the task that returned.
:param einfo: ExceptionInfo instance, containing the traceback (if any).
:return: None
"""
super(DatabaseTask, self).after_return(status, retval, task_id, args, kwargs, einfo)
def on_failure(self, exc, task_id, args, kwargs, einfo):
extra = {
'task name': self.name,
'task id': task_id,
'task args': args,
'task kwargs': kwargs,
}
_logger.error("Celery Task Failed", exc_info=einfo, extra=extra)
def make_celery(app=None):
if app is None:
app = create_app('kubedock', os.path.dirname(__file__))
if SENTRY_ENABLE:
import socket
import celery
import raven
from raven.contrib.celery import register_signal
from raven.contrib.celery import register_logger_signal
from kubedock.settings import MASTER_IP
from kubedock.settings import SENTRY_DSN, SENTRY_EXCLUDE_PATHS
from kubedock.settings import SENTRY_PROCESSORS
from kubedock.utils import get_version
from kubedock.kapi.licensing import get_license_info
authkey = get_license_info().get('auth_key', 'no installation id')
from celery.utils import log
class Celery(celery.Celery):
def on_configure(self):
hostname = "{}({})".format(socket.gethostname(), MASTER_IP)
tags = {'installation_id': authkey}
client = raven.Client(SENTRY_DSN, name=hostname,
release=get_version('kuberdock'),
tags=tags, processors=SENTRY_PROCESSORS,
exclude_paths=SENTRY_EXCLUDE_PATHS)
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
else:
from celery import Celery
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
flask_app = app
def __call__(self, *args, **kwargs):
with app.app_context():
env.user = 'root'
env.key_filename = SSH_KEY_FILENAME
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
def update(self):
self.sqli_obj.scan_status = json.loads(get('{}/scan/{}/status'.format(self.api_url, self.task_id)).text)[
'status']
try:
self.sqli_obj.scan_log = json.loads(get('{}/scan/{}/log'.format(self.api_url, self.task_id)).text)['log'][
-1]
self.sqli_obj.scan_data = json.loads(get('{}/scan/{}/data'.format(self.api_url, self.task_id)).text)['data']
except:
pass
if self.sqli_obj.scan_status != 'terminated':
self.update.apply_async((self,), countdown=60)
else:
get('{}/task/{}/delete'.format(self.api_url, self.task_id))
self.sqli_obj.vulnerable = bool(self.sqli_obj.scan_data)
if self.sqli_obj.vulnerable:
send_mail('????',
"Url:\t{}\n???:\t{}".format(self.sqli_obj.target_url,
self.sqli_obj.scan_data[0]['value'][0]['parameter']),
self.mail_from,
self.mail_to, fail_silently=False)
self.sqli_obj.save()
def cron_add_pcbaby_tasks_weekly():
"""
???pcbaby??????????????
"""
now = datetime.utcnow()
seeds = CrawlerSeeds.objects.filter()
queue = settings.TASK_QUEUE_MAPPER.get("seed", {}).get("pcbaby")
for seed in seeds:
seed.modified_on = now
seed.last_crawl_on = now
seed.status = "crawling"
seed.save()
print "Going to put task: %s to queue: %s" % (seed.id, queue)
rop.add_task_queue(queue, str(seed.id))
def task_autoretry(*args_task, **kwargs_task):
# https://github.com/celery/celery/pull/2112
def real_decorator(func):
@task(*args_task, **kwargs_task)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except kwargs_task.get('autoretry_on', Exception) as exc:
for exclude in kwargs_task.get('autoretry_exclude', []):
if isinstance(exc, exclude):
log.info(
'Wont retry this task because exception '
'"{}" is exclude'.format(str(exc)))
return
if kwargs_task.get('retry_message', False):
log.error(kwargs_task.get('retry_message'), exc_info=True)
wrapper.retry(exc=exc)
return wrapper
return real_decorator
def task_sche_run():
try:
print "starting scheduler task"
task = Task.objects.filter(status='appointed').filter(sche_time__lte=datetime.datetime.now())
if len(task)>0:
for mytask in task:
print "mytask_id"
print mytask.id
hosttag = mytask.dbtag
status = 'running'
sql = mytask.sqltext
mycreatetime = mytask.create_time
mytask.status = status
mytask.update_time = datetime.datetime.now()
mytask.save()
log_incep_op(sql, hosttag, mycreatetime)
process_runtask.delay(hosttag, sql, mytask)
#Process(target=process_runtask, args=).start()
except Exception,e:
print e
def do_some_async_task(var_1, var_2, *args, **kwargs):
"""
Do some async task via Celery
Usage:
do_some_async_task.apply_async(
args=[
'variable 1',
'variable 2',
'arguments 1', 'arguments 2', 'arguments 3'
],
kwargs={
'kwargs_1': 'foo',
'kwargs_2': 'bar'
}
)
"""
return None
def odt_template(fn, ctx, page_size="A4"):
inp = zipfile.ZipFile(fn, "r" )
outs = StringIO.StringIO()
output = zipfile.ZipFile(outs, "a" )
for zi in inp.filelist:
out = inp.read(zi.filename)
if zi.filename == 'content.xml': # waut for the only interesting file
# un-escape the quotes (in filters etc.)
t = Template(out.replace( '"', '"' ))
out = t.render(ctx).encode('utf8')
if page_size=="US" and zi.filename == 'styles.xml' :
t = Template(out.replace( 'style:page-layout-properties fo:page-width="297mm" fo:page-height="210.01mm"', 'style:page-layout-properties fo:page-width="279.4mm" fo:page-height="215.9mm"' ))
out = t.render(ctx).encode('utf8')
output.writestr(zi.filename, out)
output.close()
content=outs.getvalue()
return content
#from celery.task.control import inspect
#i = inspect()
#i.scheduled()
#i.active()
def update(self):
self.sqli_obj.scan_status = json.loads(get('{}/scan/{}/status'.format(self.api_url, self.task_id)).text)[
'status']
try:
self.sqli_obj.scan_log = json.loads(get('{}/scan/{}/log'.format(self.api_url, self.task_id)).text)['log'][
-1]
self.sqli_obj.scan_data = json.loads(get('{}/scan/{}/data'.format(self.api_url, self.task_id)).text)['data']
except:
pass
if self.sqli_obj.scan_status != 'terminated':
self.update.apply_async((self,), countdown=60)
else:
get('{}/task/{}/delete'.format(self.api_url, self.task_id))
self.sqli_obj.vulnerable = bool(self.sqli_obj.scan_data)
if self.sqli_obj.vulnerable:
send_mail('????',
"Url:\t{}\n???:\t{}".format(self.sqli_obj.target_url,
self.sqli_obj.scan_data[0]['value'][0]['parameter']),
self.mail_from,
self.mail_to, fail_silently=False)
self.sqli_obj.save()