def _initialize(self, worker_id):
if getattr(self, '_initialized', False):
return
self._initialized = True
if self.name is None:
self.name = self.__class__.__name__
self.worker_id = worker_id
self.pid = os.getpid()
pname = os.path.basename(sys.argv[0])
self._title = "%(name)s(%(worker_id)d) [%(pid)d]" % dict(
name=self.name, worker_id=self.worker_id, pid=self.pid)
# Set process title
setproctitle.setproctitle(
"%(pname)s - %(name)s(%(worker_id)d)" % dict(
pname=pname, name=self.name,
worker_id=self.worker_id))
python类setproctitle()的实例源码
def data_munging(self):
"""Reads data from replpication queue and writes to mongo
See Also:
:meth:`.replicator`
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_datamunging')
module_instance = ParseData()
mongo = MyMongoDB(config['mongodb'])
munging = DataMunging(mongo, self.queues['replicator_out'])
munging.run(module_instance)
def __ping(self, title):
hostname = "www.google.com"
was_down = False
setproctitle.setproctitle(title)
while not self.exit.is_set():
# ping hostname ...
response = os.system("ping -c 1 -w2 " + hostname + " > /dev/null 2>&1")
# then check the response
if response != 0:
self._log.error(hostname + ' is unreachable!')
was_down = False
elif was_down:
self._log.error(hostname + ' is up again!')
if self._gpio != None:
self._gpio.led(1, True) # LED 1 on
time.sleep(0.2)
self._gpio.led(1, False) # LED 1 off
time.sleep(15)
def run(self, max_loops=-1):
''' loops events structures to execute raise events and execute tasks.
Args:
max_loops: number of loops to run. If positive, limits number of loops.
defaults to negative, which would run loops until there are no events to raise and
no task to run.
'''
if setproctitle is not None:
run_id = "%s." % self.run_id if self.run_id else ''
setproctitle("eventor: %s" % (run_id,))
if max_loops < 0:
result=self.loop_session()
else:
result=None
for _ in range(max_loops):
#module_logger.debug('Starting loop cycle')
result=self.loop_cycle()
human_result="success" if result else 'failure'
total_todo, _=self.count_todos(with_delayeds=True)
module_logger.info('Processing finished with: %s; outstanding tasks: %s' % (human_result, total_todo))
#module_logger.info('Processing finished')
return result
def run(self):
if setproctitle:
oldproctitle = getproctitle()
setproctitle('[backing up %d: %s]' % (self.pk, self.friendly_name))
try:
self.run_rsync()
self.snapshot_rotate()
self.snapshot_create()
# Atomic update of size.
size = bfs.parse_backup_sizes(
self.dest_pool, self.hostgroup.name, self.friendly_name,
self.date_complete)['size']
size_mb = size[0:-6] or '0' # :P
HostConfig.objects.filter(pk=self.pk).update(
backup_size_mb=size_mb)
# Send signal that we're done.
self.signal_done(True)
except:
# Send signal that we've failed.
self.signal_done(False)
# Propagate.
raise
finally:
if setproctitle:
setproctitle(oldproctitle)
def setproctitle(*args, **kwargs):
pass
def setproctitle(name):
pass
def _start(self, foreground = True):
if self.username is None:
if os.getuid() == 0:
sys.stderr.write("Refusing to run as superuser\n")
sys.exit(1)
self.pw = pwd.getpwuid(os.getuid())
else:
self.pw = pwd.getpwnam(self.username)
if os.getuid() not in (0, self.pw.pw_uid):
sys.stderr.write("Cannot run as user \"%s\"\n" % (self.username, ))
sys.exit(1)
setproctitle(self.procname)
if not foreground:
self._drop_priv()
self.pre_daemonize()
self._daemonize()
if self.pidfile:
self._write_pid()
self._open_log(syslog = self.syslog)
else:
self._drop_priv()
self.pre_daemonize()
self._open_log(syslog = False, debug = True)
self.run()
def run(self):
setproctitle.setproctitle('testdaemon')
self.out = 0;
while True:
GPIO.output(LED, self.out)
self.out = self.out ^ 1
time.sleep(0.2)
def post_worker_init(dummy_worker):
setproctitle.setproctitle(
settings.GUNICORN_WORKER_READY_PREFIX + setproctitle.getproctitle()
)
def run(self):
'''Main execute of the class'''
def cb_exit_gracefully(signum, frame):
'''Callback to exit gracefully'''
self.logger.info("Grace exit command received signum %d" % (signum))
for proc in self.current_subprocs:
if proc.poll() is None:
# Switching to a kill -9 as the nice option seems to require it.
# proc.send_signal(signal.SIGINT)
proc.terminate()
#subprocess.check_call("kill -9 " + proc.pid())
sys.exit(0)
compressor_workers = int(self.config.get("compression", "compressor_workers"))
self.logger.info("Compressor process starting up")
self.pool = ThreadPool(compressor_workers)
setproctitle("[compress] " + getproctitle())
signal.signal(signal.SIGINT, cb_exit_gracefully)
signal.signal(signal.SIGTERM, cb_exit_gracefully)
while True:
tocompress_dir = os.path.join(self.config.get(
"main", "working_directory"), "tocompress")
files = self.get_files(tocompress_dir, ".mak")
if files:
self.pool.map(self.compress_filename, files)
time.sleep(float(self.config.get(
"compression", "compression_check_interval")))
sys.exit(0)
def run(self):
"""
Repeatedly call :meth:`loop` method every :attribute:`interval`
seconds. In case of *separate_process* is :const:`True` exit
when parent process has exited.
"""
if self.separate_process:
setproctitle.setproctitle(self.name)
self.context.config.configure_logging()
# Register SIGINT handler which will exit service process
def sigint_handler(dummy_signum, dummy_frame):
"""
Exit service process when SIGINT is reached.
"""
self.stop()
signal.signal(signal.SIGINT, sigint_handler)
next_loop_time = 0
while 1:
# Exit if service process is run in separated process and pid
# of the parent process has changed (parent process has exited
# and init is new parent) or if stop flag is set.
if (
(self.separate_process and os.getppid() != self._parent_pid) or
self._stop_event.is_set()
):
break
# Repeatedly call loop method. After first call set ready flag.
if time.time() >= next_loop_time:
self.loop()
if not next_loop_time and not self.ready:
self._lock.acquire()
try:
self._ready.value = True
finally:
self._lock.release()
next_loop_time = time.time() + self.interval
else:
time.sleep(0.1)
def tornado_worker(tornado_app, sockets, parent_pid):
"""
Tornado worker which process HTTP requests.
"""
setproctitle.setproctitle(
"{:s}: worker {:s}".format(
tornado_app.settings['context'].config.name,
tornado_app.settings['interface'].name
)
)
tornado_app.settings['context'].config.configure_logging()
# Run HTTP server
http_server = tornado.httpserver.HTTPServer(tornado_app)
http_server.add_sockets(sockets)
# Register SIGINT handler which will stop worker
def sigint_handler(dummy_signum, dummy_frame):
"""
Stop HTTP server and IOLoop if SIGINT.
"""
# Stop HTTP server (stop accept new requests)
http_server.stop()
# Stop IOLoop
tornado.ioloop.IOLoop.instance().add_callback(
tornado.ioloop.IOLoop.instance().stop)
signal.signal(signal.SIGINT, sigint_handler)
# Register job which will stop worker if parent process PID is changed
stop_callback = tornado.ioloop.PeriodicCallback(
functools.partial(stop_child, http_server, parent_pid), 250)
stop_callback.start()
# Run IOLoop
tornado.ioloop.IOLoop.instance().start()
def command(self):
setproctitle.setproctitle(
"{:s}: master process '{:s}'".format(
self.context.config.name, " ".join(sys.argv)
))
# For each interface create workers
for tornado_app in get_tornado_apps(self.context, debug=False):
self.init_workers(tornado_app)
# Run workers
try:
start_workers(self.workers, max_restarts=100)
except KeyboardInterrupt:
pass
def set_process_name(name):
pass
def setproctitle(t):
return None
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, 'config', None) is not None:
options.configs.append(options.config)
if options.subparser in ('report', 'logs', 'metrics', 'run'):
_default_region(options)
_default_account_id(options)
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit('.', 1)[0]),
command.rsplit('.', 1)[-1])
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(' '.join(process_name))
command(options)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
def run(self):
"""Runs the daemon
Thims method runs the daemon and creates all the process needed. Then waits forever
"""
self.logger = logging.getLogger(__name__)
sys.stderr = self.log_err
try:
util.find_spec('setproctitle')
self.setproctitle = True
import setproctitle
setproctitle.setproctitle('mymongo')
except ImportError:
self.setproctitle = False
self.logger.info("Running")
self.queues = dict()
self.queues['replicator_out'] = Queue()
procs = dict()
procs['scheduler'] = Process(name='scheduler', target=self.scheduler)
procs['scheduler'].daemon = True
procs['scheduler'].start()
procs['replicator'] = Process(name='replicator', target=self.replicator)
procs['replicator'].daemon = True
procs['replicator'].start()
procs['datamunging'] = Process(name='datamunging', target=self.data_munging)
procs['datamunging'].daemon = True
procs['datamunging'].start()
procs['dataprocess'] = Process(name='dataprocess', target=self.data_process)
procs['dataprocess'].daemon = True
procs['dataprocess'].start()
while True:
self.logger.info('Working...')
time.sleep(60)
def scheduler(self):
"""Runs the daemon scheduler
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_scheduler')
sched = BlockingScheduler()
try:
sched.add_job(self.dummy_sched, 'interval', minutes=1)
sched.start()
except Exception as e:
self.logger.error('Cannot start scheduler. Error: ' + str(e))
def replicator(self):
"""Main process for replication. It writes entry in the replication queue
See Also:
:meth:`.data_munging`
"""
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_replicator')
mongo = MyMongoDB(config['mongodb'])
mysql.mysql_stream(config['mysql'], mongo, self.queues['replicator_out'])
def data_process(self):
self.write_pid(str(os.getpid()))
if self.setproctitle:
import setproctitle
setproctitle.setproctitle('mymongo_dataprocess')
mongo = MyMongoDB(config['mongodb'])
process_instance = ProcessData(mongo)
process_instance.run()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nTrials', type=int, default=10)
args = parser.parse_args()
setproctitle.setproctitle('bamos.optnet.prof-linear')
npr.seed(0)
prof(args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nTrials', type=int, default=10)
args = parser.parse_args()
setproctitle.setproctitle('bamos.optnet.prof-gurobi')
npr.seed(0)
prof(args)
def appendproctitle(name):
'''
Append "name" to the current process title
'''
if HAS_SETPROCTITLE:
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)
def post_worker_init(dummy_worker):
setproctitle.setproctitle(
settings.GUNICORN_WORKER_READY_PREFIX + setproctitle.getproctitle()
)
def init(ident = None, args = None):
"""
Initialize logging system.
Default logging destination is stderr if "args" is not specified.
"""
# pylint: disable=E1103
if ident is None:
ident = os.path.basename(sys.argv[0])
if args is None:
args = argparse.Namespace(log_level = logging.WARNING,
log_handler = logging.StreamHandler)
handler = args.log_handler()
handler.setFormatter(Formatter(ident, handler))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(args.log_level)
if ident and have_setproctitle and use_setproctitle:
if proctitle_extra:
setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
else:
setproctitle.setproctitle(ident)
def init(ident = None, args = None):
"""
Initialize logging system.
Default logging destination is stderr if "args" is not specified.
"""
# pylint: disable=E1103
if ident is None:
ident = os.path.basename(sys.argv[0])
if args is None:
args = argparse.Namespace(log_level = logging.WARNING,
log_handler = logging.StreamHandler)
handler = args.log_handler()
handler.setFormatter(Formatter(ident, handler))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(args.log_level)
if ident and have_setproctitle and use_setproctitle:
if proctitle_extra:
setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
else:
setproctitle.setproctitle(ident)
def init(ident = None, args = None):
"""
Initialize logging system.
Default logging destination is stderr if "args" is not specified.
"""
# pylint: disable=E1103
if ident is None:
ident = os.path.basename(sys.argv[0])
if args is None:
args = argparse.Namespace(log_level = logging.WARNING,
log_handler = logging.StreamHandler)
handler = args.log_handler()
handler.setFormatter(Formatter(ident, handler))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(args.log_level)
if ident and have_setproctitle and use_setproctitle:
if proctitle_extra:
setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
else:
setproctitle.setproctitle(ident)
def init(ident = None, args = None):
"""
Initialize logging system.
Default logging destination is stderr if "args" is not specified.
"""
# pylint: disable=E1103
if ident is None:
ident = os.path.basename(sys.argv[0])
if args is None:
args = argparse.Namespace(log_level = logging.WARNING,
log_handler = logging.StreamHandler)
handler = args.log_handler()
handler.setFormatter(Formatter(ident, handler))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(args.log_level)
if ident and have_setproctitle and use_setproctitle:
if proctitle_extra:
setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
else:
setproctitle.setproctitle(ident)
def init(ident = None, args = None):
"""
Initialize logging system.
Default logging destination is stderr if "args" is not specified.
"""
# pylint: disable=E1103
if ident is None:
ident = os.path.basename(sys.argv[0])
if args is None:
args = argparse.Namespace(log_level = logging.WARNING,
log_handler = logging.StreamHandler)
handler = args.log_handler()
handler.setFormatter(Formatter(ident, handler))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(args.log_level)
if ident and have_setproctitle and use_setproctitle:
if proctitle_extra:
setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
else:
setproctitle.setproctitle(ident)