def initialize_cli(options):
from ._logger import set_log_level
debug_format_str = (
"[{record.level_name}] {record.channel} {record.func_name} "
"({record.lineno}): {record.message}")
if options.log_level == logbook.DEBUG:
info_format_str = debug_format_str
else:
info_format_str = (
"[{record.level_name}] {record.channel}: {record.message}")
logbook.StderrHandler(
level=logbook.DEBUG, format_string=debug_format_str
).push_application()
logbook.StderrHandler(
level=logbook.INFO, format_string=info_format_str
).push_application()
set_log_level(options.log_level)
spr.SubprocessRunner.is_save_history = True
if options.is_output_stacktrace:
spr.SubprocessRunner.is_output_stacktrace = (
options.is_output_stacktrace)
python类DEBUG的实例源码
def run_command_helper(command, error_regexp, message, exception=None):
if logger.level != logbook.DEBUG:
spr.set_logger(is_enable=False)
proc = spr.SubprocessRunner(command)
proc.run()
if logger.level != logbook.DEBUG:
spr.set_logger(is_enable=True)
if proc.returncode == 0:
return 0
match = error_regexp.search(proc.stderr)
if match is None:
logger.error(proc.stderr)
return proc.returncode
if typepy.is_not_null_string(message):
logger.notice(message)
if exception is not None:
raise exception(command)
return proc.returncode
def test_stderr(
self, capsys, command, ignore_stderr_regexp, out_regexp, expected):
import logbook
import subprocrunner
logbook.StderrHandler(
level=logbook.DEBUG).push_application()
subprocrunner.set_log_level(logbook.INFO)
runner = SubprocessRunner(
command, ignore_stderr_regexp=ignore_stderr_regexp)
runner.run()
assert is_null_string(runner.stdout.strip())
assert is_not_null_string(runner.stderr.strip())
out, err = capsys.readouterr()
print("[sys stdout]\n{}\n".format(out))
print("[sys stderr]\n{}\n".format(err))
print("[proc stdout]\n{}\n".format(runner.stdout))
print("[proc stderr]\n{}\n".format(runner.stderr))
actual = out_regexp.search(err) is not None
assert actual == expected
def __init__(self, command, ignore_stderr_regexp=None, dry_run=None):
if typepy.type.List(command).is_type():
# concatenate command arguments to create a command if the command
# argument is list.
command = " ".join(command)
self.__command = command
if dry_run is not None:
self.__dry_run = dry_run
else:
self.__dry_run = self.default_is_dry_run
self.__stdout = None
self.__stderr = None
self.__returncode = None
self.__ignore_stderr_regexp = ignore_stderr_regexp
self.__debug_logging_method = self.__get_logging_method(logbook.DEBUG)
self.error_log_level = self.default_error_log_level
if self.is_save_history:
if len(self.__command_history) >= self.history_size:
self.__command_history.pop(0)
self.__command_history.append(command)
def initialize_log_handler(log_level):
debug_format_str = (
"[{record.level_name}] {record.channel} {record.func_name} "
"({record.lineno}): {record.message}")
if log_level == logbook.DEBUG:
info_format_str = debug_format_str
else:
info_format_str = (
"[{record.level_name}] {record.channel}: {record.message}")
logbook.StderrHandler(
level=logbook.DEBUG, format_string=debug_format_str
).push_application()
logbook.StderrHandler(
level=logbook.INFO, format_string=info_format_str
).push_application()
def init_event(options):
global eventThread
if (1):
if (options.user):
options.event_mode="user:in %f:out %f" % (
options.pro_in, options.pro_out)
# run event simulate threading
eventThread=runTest.UserDefThread()
# eventThreadx=None
print "****DEBUG:UserDefThread:", eventThread.stats
# raw_input()
if (options.crash):
options.event_mode="accident:in %f:out %f" % (
options.pro_in, options.pro_out)
eventThread=runTest.CrashThread()
print "****DEBUG:CrashThread:", eventThread.stats
if (eventThread):
eventThread.setDaemon(True)
eventThread.setProJoin(options.pro_in)
eventThread.setProLeave(options.pro_out)
eventThread.setInterval(options.interval)
eventThread.setTimeLimit(options.timelimit)
eventThread.ready()
def main():
options = parse_option()
# ptw.set_log_level(logbook.DEBUG)
write_result_matrix(options.output_dir)
write_example(options.output_dir)
return 0
def debug():
logbook.StreamHandler(sys.stdout, level=logbook.DEBUG).push_application()
def get_color(self, record):
level = record.level
if level >= ERROR:
return 'red'
if level >= NOTICE:
return 'yellow'
if level == DEBUG:
return 'darkteal'
return 'lightgray'
def __get_logbook_logging_level(level_str):
# logbook levels:
# CRITICAL = 15
# ERROR = 14
# WARNING = 13
# NOTICE = 12
# INFO = 11
# DEBUG = 10
# TRACE = 9
# NOTSET = 0
level_str = level_str.upper().strip()
if level_str == 'CRITICAL':
return logbook.CRITICAL
elif level_str == 'ERROR':
return logbook.ERROR
elif level_str == 'WARNING':
return logbook.WARNING
elif level_str == 'NOTICE':
return logbook.NOTICE
elif level_str == 'INFO':
return logbook.INFO
elif level_str == 'DEBUG':
return logbook.DEBUG
elif level_str == 'TRACE':
return logbook.TRACE
elif level_str == 'NOTSET':
return logbook.NOTSET
else:
raise ValueError("Unknown logbook log level: {}".format(level_str))
def _add_log_level_argument_group(self):
dest = "log_level"
group = self.parser.add_mutually_exclusive_group()
group.add_argument(
"--debug", dest=dest, action="store_const",
const=logbook.DEBUG, default=logbook.INFO,
help="for debug print.")
group.add_argument(
"--quiet", dest=dest, action="store_const",
const=logbook.NOTSET, default=logbook.INFO,
help="suppress execution log messages.")
return group
def __get_logging_method(log_level):
method_table = {
logbook.DEBUG: logger.debug,
logbook.INFO: logger.info,
logbook.WARNING: logger.warning,
logbook.ERROR: logger.error,
logbook.CRITICAL: logger.critical,
}
method = method_table.get(log_level)
if method is None:
raise ValueError("unknown log level: {}".format(log_level))
return method
log_service.py 文件源码
项目:cookiecutter-pyramid-talk-python-starter
作者: mikeckennedy
项目源码
文件源码
阅读 30
收藏 0
点赞 0
评论 0
def __get_logbook_logging_level(level_str):
# logbook levels:
# CRITICAL = 15
# ERROR = 14
# WARNING = 13
# NOTICE = 12
# INFO = 11
# DEBUG = 10
# TRACE = 9
# NOTSET = 0
level_str = level_str.upper().strip()
if level_str == 'CRITICAL':
return logbook.CRITICAL
elif level_str == 'ERROR':
return logbook.ERROR
elif level_str == 'WARNING':
return logbook.WARNING
elif level_str == 'NOTICE':
return logbook.NOTICE
elif level_str == 'INFO':
return logbook.INFO
elif level_str == 'DEBUG':
return logbook.DEBUG
elif level_str == 'TRACE':
return logbook.TRACE
elif level_str == 'NOTSET':
return logbook.NOTSET
else:
raise ValueError("Unknown logbook log level: {}".format(level_str))
def __get_logbook_logging_level(level_str):
# logbook levels:
# CRITICAL = 15
# ERROR = 14
# WARNING = 13
# NOTICE = 12
# INFO = 11
# DEBUG = 10
# TRACE = 9
# NOTSET = 0
level_str = level_str.upper().strip()
if level_str == 'CRITICAL':
return logbook.CRITICAL
elif level_str == 'ERROR':
return logbook.ERROR
elif level_str == 'WARNING':
return logbook.WARNING
elif level_str == 'NOTICE':
return logbook.NOTICE
elif level_str == 'INFO':
return logbook.INFO
elif level_str == 'DEBUG':
return logbook.DEBUG
elif level_str == 'TRACE':
return logbook.TRACE
elif level_str == 'NOTSET':
return logbook.NOTSET
else:
raise ValueError("Unknown logbook log level: {}".format(level_str))
def lazy_simple_capture(request, caps_directory):
"""
Does not fill the cap with packets.
"""
cap_path = os.path.join(caps_directory, 'capture_test.pcapng')
cap = pyshark.FileCapture(cap_path)
cap.log.level = logbook.DEBUG
def finalizer():
cap.close()
cap.eventloop.stop()
request.addfinalizer(finalizer)
return cap
def set_debug(self):
"""
Sets the capture to debug mode.
"""
self.log.level = logbook.DEBUG
self.debug = True
def _get_logging_level(verbosity):
# noinspection PyPackageRequirements
import logbook
return {
1: logbook.CRITICAL,
2: logbook.ERROR,
3: logbook.WARNING,
4: logbook.NOTICE,
5: logbook.INFO,
6: logbook.DEBUG,
7: logbook.TRACE,
}[verbosity]
def cli(ctx, verbosity, colored):
"""
Command Line Interface. Use --help for details.
"""
if verbosity > 0:
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import logbook
# noinspection PyUnresolvedReferences,PyPackageRequirements
import logbook.more
except ImportError:
click.echo('Please install saltyrtc.server[logging] for logging support.',
err=True)
ctx.exit(code=_ErrorCode.import_error)
# Translate logging level
level = _get_logging_level(verbosity)
# Enable asyncio debug logging if verbosity is high enough
# noinspection PyUnboundLocalVariable
if level <= logbook.DEBUG:
os.environ['PYTHONASYNCIODEBUG'] = '1'
# Enable logging
util.enable_logging(level=level, redirect_loggers={
'asyncio': level,
'websockets': level,
})
# Get handler class
if colored:
handler_class = logbook.more.ColorizedStderrHandler
else:
handler_class = logbook.StderrHandler
# Set up logging handler
handler = handler_class(level=level)
handler.push_application()
ctx.obj['logging_handler'] = handler
def parse_option():
parser = argparse.ArgumentParser()
if is_use_stdin():
parser.add_argument(
"destination_or_file", nargs="+",
help="")
parser.add_argument(
"--max-workers", type=int,
help="""a number of threads for when multiple destination/file
specified. defaults to equals to two times number of cores.
""")
parser.add_argument(
"--indent", type=int, default=4,
help="""JSON output will be pretty-printed with the indent level.
(default= %(default)s)
""")
loglevel_dest = "log_level"
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--debug", dest=loglevel_dest, action="store_const",
const=logbook.DEBUG, default=logbook.INFO,
help="for debug print.")
group.add_argument(
"--quiet", dest=loglevel_dest, action="store_const",
const=logbook.NOTSET, default=logbook.INFO,
help="suppress execution log messages.")
group = parser.add_argument_group("Ping Options")
group.add_argument(
"-c", "--count", type=int,
help="""stop after sending the count.
see also ping(8) [-c count] option description.
""")
group.add_argument(
"-w", "--deadline", type=float,
help="""timeout in seconds.
see also ping(8) [-w deadline] option description.
""")
group.add_argument(
"-I", "--interface", dest="interface",
help="network interface")
return parser.parse_args()