def tolog(self, msg, level=None):
try:
level = level if level else self._level
level = str(level).lower()
level = self.get_map_level(level)
if level == logging.DEBUG:
self._logger.debug(msg)
if level == logging.INFO:
self._logger.info(msg)
if level == logging.WARN:
self._logger.warn(msg)
if level == logging.ERROR:
self._logger.error(msg)
if level == logging.CRITICAL:
self._logger.critical(msg)
except Exception as expt:
print expt
python类WARN的实例源码
def parse_args(args):
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', help='Verbose (debug) logging',
action='store_const', const=logging.DEBUG,
dest='loglevel')
group.add_argument('-q', '--quiet', help='Silent mode, only log warnings',
action='store_const', const=logging.WARN,
dest='loglevel')
parser.add_argument("-s", '--skip', action='store_true',
dest='skip_errors',
help="Do not stop if one steps is failed")
parser.add_argument("-e", '--callback',
help='Callback for backup file (backup path '
'passed as a 1st arg)')
parser.add_argument(
'backup_dir', help="Destination for all created files")
parser.set_defaults(func=do_node_backup)
return parser.parse_args(args)
def set_log(level, filename='jumpserver.log'):
"""
return a log file object
??????log??
"""
log_file = os.path.join(LOG_DIR, filename)
if not os.path.isfile(log_file):
os.mknod(log_file)
os.chmod(log_file, 0777)
log_level_total = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARN, 'error': logging.ERROR,
'critical': logging.CRITICAL}
logger_f = logging.getLogger('jumpserver')
logger_f.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(log_level_total.get(level, logging.DEBUG))
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger_f.addHandler(fh)
return logger_f
def single_help(bot, cmd, cmd_name) -> Embed:
"""
Generate help embed for a given embed.
:return: the embed object for the given command.
"""
doc = cmd.help
try:
help_dict = safe_load(doc)
except (YAMLError, AttributeError) as e:
bot.logger.log(WARN, str(e))
return Embed(colour=bot.colour, description=doc)
else:
embed = Embed(
colour=bot.colour, description=help_dict.pop('Description')
)
embed.set_author(name=cmd_name, icon_url=bot.user.avatar_url)
if cmd.aliases:
embed.add_field(name='Aliases', value=f'`{", ".join(cmd.aliases)}`')
for key, val in help_dict.items():
try:
val = val.format(prefix=bot.prefix)
except KeyError:
val = val.replace('{prefix}', bot.prefix)
embed.add_field(name=key, value=val, inline=False)
return embed
def generate_report(args):
verbose_to_log = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARN,
3: logging.INFO,
4: logging.DEBUG
}
logging_level = logging.DEBUG if args.verbose > 4 else verbose_to_log[args.verbose]
log.setLevel(logging_level)
log.debug("args: %s" % args)
args.output_state_results = True if args.verbose > 1 else args.output_state_results
if args.job_group_urls:
root_url = urljoin('/'.join(args.job_group_urls.split("/")[0:3]), '/')
else:
root_url = urljoin(args.host, '/')
browser = Browser(args, root_url)
job_groups = get_job_groups(browser, root_url, args)
assert not (args.builds and len(job_groups) > 1), "builds option and multiple job groups not supported"
assert len(job_groups) > 0, "No job groups were found, maybe misspecified '--job-groups'?"
return Report(browser, args, root_url, job_groups)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def set_log(level, filename='spider.log'):
"""
return a log file object
??????log??
"""
if not os.path.isdir(LOG_DIR):
os.mkdir(LOG_DIR)
log_file = os.path.join(LOG_DIR, filename)
if not os.path.isfile(log_file):
os.mknod(log_file)
os.chmod(log_file, 0777)
log_level_total = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARN, 'error': logging.ERROR,
'critical': logging.CRITICAL}
logger_f = logging.getLogger('spider')
logger_f.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file,'a')
fh.setLevel(log_level_total.get(level, logging.DEBUG))
formatter = logging.Formatter('%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s')
fh.setFormatter(formatter)
logger_f.addHandler(fh)
keep_fds = [fh.stream.fileno()]
return logger_f,keep_fds
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def test_set_default_log_level(self, mock_connect):
import logging
from datastore import get_logger
from logging.handlers import RotatingFileHandler
from datastore.postgresstore import PostgresLogHandler
self.dsb.add_file_db("config-example.json", logging.CRITICAL)
self.dsb.add_postgres_db("", logging.WARN)
self.dsb.set_default_log_level(logging.INFO)
self.assertEqual(DataStore.LOG_LEVEL, logging.INFO)
logger = get_logger()
fdbh = None
pdbh = None
for handler in logger.handlers:
if isinstance(handler, RotatingFileHandler):
fdbh = handler
if isinstance(handler, PostgresLogHandler):
pdbh = handler
self.assertEqual(fdbh.level, logging.CRITICAL)
self.assertEqual(pdbh.level, logging.WARNING)
def emit(self, record):
message = self.format(record)
logger_name = record.name
if record.name == "root":
logger_name = LogManager.ROOT_LOGGER_NAME
logger = LogManager.getLogger(logger_name)
level = record.levelno
if level == logging.DEBUG:
logger.debug(message)
elif level == logging.INFO:
logger.info(message)
elif level == logging.WARN:
logger.warn(message)
elif level == logging.ERROR:
logger.error(message)
elif level == logging.CRITICAL:
logger.fatal(message)
else:
logger.fatal("unknown logger level: " + str(level))
def configure_logging(verbosity):
"""Reconfigure logging with selected verbosity
Sets the root logger and updates the args so oauth logging
will also be configured properly
Args:
args (Object): Application args
verbosity (int): The logging veribisty...
0: WARN
1: INFO
>1: DEBUG
"""
if verbosity <= 0:
level = logging.WARN
elif verbosity == 1:
level = logging.INFO
else:
assert verbosity > 1
level = logging.DEBUG
logging.basicConfig(level=level)
def set_log(level, filename='jumpserver.log'):
"""
return a log file object
??????log??
"""
log_file = os.path.join(LOG_DIR, filename)
if not os.path.isfile(log_file):
os.mknod(log_file)
os.chmod(log_file, 0777)
log_level_total = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARN, 'error': logging.ERROR,
'critical': logging.CRITICAL}
logger_f = logging.getLogger('jumpserver')
logger_f.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(log_level_total.get(level, logging.DEBUG))
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger_f.addHandler(fh)
return logger_f
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def DEFAULT_LOGGING_CONFIG(level=logging.WARN, format=LOG_FORMAT):
"""Returns a default logging config in dict format.
Compatible with logging.config.dictConfig(), this default set the root
logger to `level` with `sys.stdout` console handler using a formatter
initialized with `format`. A simple 'brief' formatter is defined that
shows only the message portion any log entries."""
return {
"version": 1,
"formatters": {"generic": {"format": format},
"brief": {"format": "%(message)s"},
},
"handlers": {"console": {"class": "logging.StreamHandler",
"level": "NOTSET",
"formatter": "generic",
"stream": "ext://sys.stdout",
},
},
"root": {"level": level,
"handlers": ["console"],
},
"loggers": {},
}
def printDebug(message, priority=logging.WARN):
""" Logs message given the priority specified
arguments:
message - the string message to be logged
priority - the integer priority of the message; uses the priority levels in the logging module
returns:
nothing
"""
# this function (and hence the library) originally did not use the logging module from the standard library
global sparki_logger
# for compatibility, we will recognize the "old" priority levels, but new code should be written to conform to the
# priority levels in the logging module
if priority == DEBUG_DEBUG or priority == logging.DEBUG:
sparki_logger.debug(message)
elif priority == DEBUG_INFO or priority == logging.INFO:
sparki_logger.info(message)
elif priority == DEBUG_WARN or priority == logging.WARN:
sparki_logger.warn(message)
elif priority == DEBUG_ERROR or priority == logging.ERROR:
sparki_logger.error(message)
else:
sparki_logger.critical(message)
def setup_logging(args):
"""Setup the logging level and configure the basic logger
"""
if args.verbose == 1:
level = logging.INFO
elif args.verbose >= 2:
level = logging.DEBUG
else:
level = logging.WARN
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s",
level=level,
)
global LOG
LOG = logging.getLogger(__name__)
def setup_logging(args):
"""Setup the logging level and configure the basic logger
"""
if args.verbose == 1:
level = logging.INFO
elif args.verbose >= 2:
level = logging.DEBUG
else:
level = logging.WARN
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s",
level=level,
)
global LOG
LOG = logging.getLogger(__name__)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)