def init_logger(min_level=logging.WARNING, path=None, max_bytes=10 << 100):
global _instance
logging.basicConfig(format='[%(asctime)-15s] (%(levelname)s) %(threadName)s: %(message)s')
_instance = logging.getLogger()
_instance.propagate = False
_instance.setLevel(min_level)
if path:
handler = logging.handlers.RotatingFileHandler(path, 'a', maxBytes=max_bytes)
_instance.addHandler(handler)
atexit.register(logging.shutdown)
python类shutdown()的实例源码
def errorCallback(self, msg, fail):
if fail:
self.logger.critical(msg)
if self.verbose:
self.dumpMemory()
logging.shutdown()
exit(-1)
else:
logging.warning(msg)
# <summary>
# Dumps important program memory in the event of critical failure
# </summary>
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
raise DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
# active_threads = [t for t in self.threads if t.isAlive()]
# while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def pre_arg_parse_setup():
"""Setup logging before command line arguments are parsed.
Terminal logging is setup using
`certbot.constants.QUIET_LOGGING_LEVEL` so Certbot is as quiet as
possible. File logging is setup so that logging messages are
buffered in memory. If Certbot exits before `post_arg_parse_setup`
is called, these buffered messages are written to a temporary file.
If Certbot doesn't exit, `post_arg_parse_setup` writes the messages
to the normal log files.
This function also sets `logging.shutdown` to be called on program
exit which automatically flushes logging handlers and
`sys.excepthook` to properly log/display fatal exceptions.
"""
temp_handler = TempHandler()
temp_handler.setFormatter(logging.Formatter(FILE_FMT))
temp_handler.setLevel(logging.DEBUG)
memory_handler = MemoryHandler(temp_handler)
stream_handler = ColoredStreamHandler()
stream_handler.setFormatter(logging.Formatter(CLI_FMT))
stream_handler.setLevel(constants.QUIET_LOGGING_LEVEL)
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(memory_handler)
root_logger.addHandler(stream_handler)
# logging.shutdown will flush the memory handler because flush() and
# close() are explicitly called
util.atexit_register(logging.shutdown)
sys.excepthook = functools.partial(
pre_arg_parse_except_hook, memory_handler,
debug='--debug' in sys.argv, log_path=temp_handler.path)
def flush(self, force=False): # pylint: disable=arguments-differ
"""Flush the buffer if force=True.
If force=False, this call is a noop.
:param bool force: True if the buffer should be flushed.
"""
# This method allows flush() calls in logging.shutdown to be a
# noop so we can control when this handler is flushed.
if force:
if sys.version_info < (2, 7): # pragma: no cover
logging.handlers.MemoryHandler.flush(self)
else:
super(MemoryHandler, self).flush()
def init_logger():
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger()
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def __beforeShutdownCallback(self):
self.logger.debug("Running shutdown hooks")
while len(self.__shutdownHooks) > 0:
func, args, kwargs = self.__shutdownHooks.pop(0)
if hasattr(func, "im_class"):
# This is a bound method
hookName = "%s.%s.%s" % (func.im_class.__module__,
func.im_class.__name__,
func.im_func.__name__)
else:
# This is an ordinary function
hookName = "%s.%s" % (func.__module__, func.__name__)
self.logger.debug("Calling shutdown hook: %s", hookName)
func(*args, **kwargs)
def run(self):
"""Reads data from arecord and passes to processors."""
self._arecord = subprocess.Popen(self._cmd, stdout=subprocess.PIPE)
logger.info("started recording")
# Check for race-condition when __exit__ is called at the same time as
# the process is started by the background thread
if self._closed:
self._arecord.kill()
return
this_chunk = b''
while True:
input_data = self._arecord.stdout.read(self._chunk_bytes)
if not input_data:
break
this_chunk += input_data
if len(this_chunk) >= self._chunk_bytes:
self._handle_chunk(this_chunk[:self._chunk_bytes])
this_chunk = this_chunk[self._chunk_bytes:]
if not self._closed:
logger.error('Microphone recorder died unexpectedly, aborting...')
# sys.exit doesn't work from background threads, so use os._exit as
# an emergency measure.
logging.shutdown()
os._exit(1) # pylint: disable=protected-access
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
# active_threads = [t for t in self.threads if t.isAlive()]
# while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def index_bam(self):
"""
Indexes a bam using samtools ('samtools index file.bam').
Returns
-------
bool
True if successful.
Raises
------
RuntimeError
If return code from external call is not 0.
"""
self.logger.info("Indexing bam file: {}".format(self.filepath))
idx_start = time.time()
rc = subprocess.call([self.samtools, "index", self.filepath])
if rc == 0:
self.logger.info("Indexing complete. Elapsed time: {} seconds".format(
time.time() - idx_start))
return True
else:
self.logger.error("Unable to index bamfile {}. Exiting".format(
self.filepath))
logging.shutdown()
raise RuntimeError("Unable to index bamfile. Exiting")
def get_chrom_length(self, chrom):
"""
Extract chromosome length from BAM header.
Parameters
----------
chrom : str
The name of the chromosome or scaffold.
Returns
-------
length : int
The length (integer) of the chromsome/scaffold
Raises
------
RuntimeError
If chromosome name not present in bam header
"""
bamfile = pysam.AlignmentFile(self.filepath, "rb")
lengths = dict(zip(bamfile.references, bamfile.lengths))
try:
lens = lengths[chrom]
bamfile.close()
return lens
except:
self.logger.error(
"{} not present in bam header for {}. Exiting.".format(
chrom, self.filepath))
logging.shutdown()
raise RuntimeError(
"Chromosome name not present in bam header. Exiting")
def compress_vcf(self):
"""
Compresses vcf file using bgzip.
Returns
-------
bool
True if successful
Raises
-------
RuntimeError
If return code from external call is not 0
"""
self.logger.info("Compressing vcf file {}".format(self.filepath))
bgzip_start = time.time()
rc = subprocess.call([self.bgzip, "-f", self.filepath])
if rc == 0:
self.logger.info("Compression complete. Elapsed time: {} seconds".format(
time.time() - bgzip_start))
self.filepath = self.filepath + ".gz"
return True
else:
self.logger.error("Unable to compress vcf file: {}. Exiting.".format(
self.filepath))
logging.shutdown()
raise RuntimeError("Unable to compress vcf file. Exiting.")
def index_vcf(self):
"""
Indexes vcf file using tabix. If file does not end in .gz, will
compress with bgzip (by calling self.compress_vcf).
Note: Files MUST be compressed using bgzip.
Returns
-------
bool
True if successful.
Raises
------
RuntimeError
If return code from external call is not 0.
"""
self.logger.info("Indexing vcf file: {}".format(self.filepath))
index_start = time.time()
rc = subprocess.call([self.tabix, "-f", "-p", "vcf", self.filepath])
if rc == 0:
self.logger.info("Indexing complete. Elapsed time: {} seconds.".format(
time.time() - index_start))
return True
else:
self.logger.info("Unable to index vcf file: {}. Exiting".format(
self.filepath))
logging.shutdown()
raise RuntimeError("unable to index vcf file. Exiting.")
def index_bwa(self):
"""
Index reference using bwa
Returns
-------
bool
True if successful
Raises
------
RuntimeError
If return code from external call is not 0
"""
self.logger.info("Creating bwa indices for: {}".format(
self.filepath))
bwa_idx_start = time.time()
rc = subprocess.call([self.bwa, "index", self.filepath])
if rc == 0:
self.logger.info(
"BWA indexing complete. Elapsed time: {} seconds".format(
time.time() - bwa_idx_start))
return True
else:
self.logger.error(
"Unable to create bwa indices for {}. Exiting".format(
self.filepath))
logging.shutdown()
raise RuntimeError("Unable to create bwa indicies. Exiting")
def get_chrom_length(self, chrom):
"""
Extract chromosome length from fasta.
Parameters
----------
chrom : str
The name of the chromosome or scaffold.
Returns
-------
length : int
The length (integer) of the chromsome/scaffold
Raises
------
RuntimeError
If chromosome name not present in bam header
"""
fastafile = pysam.FastaFile(self.filepath)
lengths = dict(zip(fastafile.references, fastafile.lengths))
try:
lens = lengths[chrom]
fastafile.close()
return lens
except:
self.logger.error(
"{} not present in {}. Exiting.".format(
chrom, self.filepath))
logging.shutdown()
raise RuntimeError(
"Chromosome name not present in fasta. Exiting")