def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
python类get_lexer_by_name()的实例源码
def block_code(text, lang, inlinestyles=False, linenos=False):
if not lang:
text = text.strip()
return u'<pre><code>%s</code></pre>\n' % mistune.escape(text)
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter(
noclasses=inlinestyles, linenos=linenos
)
code = highlight(text, lexer, formatter)
if linenos:
return '<div class="highlight-wrapper">%s</div>\n' % code
return code
except BaseException:
return '<pre class="%s"><code>%s</code></pre>\n' % (
lang, mistune.escape(text)
)
def block_code(self, code, lang):
"Block code highlighter and formater"
try:
if not lang:
lexer = guess_lexer(code, stripall=True)
else:
lexer = get_lexer_by_name(lang, stripall=True)
detected = True
code = highlight(code, lexer, self.code_formatter)
except:
code = escape(code)
lang = None
self.info.code.append(code)
template = self.jinja2.get_template('code')
rv = template.render(code=code, lang=lang, site=self.site, meta=self.meta)
rv = rv.encode('utf-8')
return rv
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [ lang ]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
"""
lexer = None
if code.endswith('\n\n'):
code = code[:-1]
if HtmlFormatter and lang:
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ClassNotFound:
code = lang + '\n' + code
if not lexer or not HtmlFormatter:
return '\n<pre><code>%s</code></pre>\n' % mistune.escape(code)
formatter = HtmlFormatter()
return highlight(code, lexer, formatter)
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def save(self, *args, **kwargs):
"""
Use the `pygments` library to create a highlighted HTML
representation of the code snippet.
"""
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos,
full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
# limit the number of instances retained
snippets = Snippet.objects.all()
if len(snippets) > 100:
snippets[0].delete()
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def block_code(text, lang, inlinestyles=False, linenos=False):
if not lang:
text = text.strip()
return u'<pre><code>%s</code></pre>\n' % mistune.escape(text)
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter(
noclasses=inlinestyles, linenos=linenos
)
code = highlight(text, lexer, formatter)
if linenos:
return '<div class="highlight">%s</div>\n' % code
return code
except:
return '<pre class="%s"><code>%s</code></pre>\n' % (
lang, mistune.escape(text)
)
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [ lang ]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def block_code(self, text, lang):
linenos = inlinestyles = False
if not lang:
text = text.strip()
return u'<pre><code>%s</code></pre>\n' % mistune.escape(text)
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = HtmlFormatter(
noclasses=inlinestyles, linenos=linenos, cssclass='codehilite'
)
code = highlight(text, lexer, formatter)
if linenos:
return '<div class="highlight-wrapper">%s</div>\n' % code
return '<div class="doc doc-code">%s</div>%s' % (lang.upper(), code)
except:
return '<pre class="%s"><code>%s</code></pre>\n' % (
lang, mistune.escape(text)
)
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def code_block(self, node, entering):
'''Output Pygments if required else use default html5 output'''
if self.use_pygments:
self.cr()
info_words = node.info.split() if node.info else []
if len(info_words) > 0 and len(info_words[0]) > 0:
try:
lexer = get_lexer_by_name(info_words[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
else:
lexer = TextLexer()
formatter = HtmlFormatter(**self.pygments_options)
parsed = highlight(node.literal, lexer, formatter)
self.lit(parsed)
self.cr()
else:
super().code_block(node, entering)
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
def color_stack_trace():
def excepthook(type_, value, trace):
text = ''.join(traceback.format_exception(type_, value, trace))
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter
lexer = get_lexer_by_name('pytb', stripall=True)
formatter = TerminalFormatter()
sys.stderr.write(highlight(text, lexer, formatter))
except Exception:
sys.stderr.write(text)
sys.stderr.write('Failed to colorize the traceback.')
sys.excepthook = excepthook
setup_thread_excepthook()
def tokenize_file(source_file, language=None, literal_option=3):
print(source_file)
code = ""
try:
with codecs.open(source_file, "r",encoding='utf-8', errors='ignore') as f:
code = f.read()
except UnicodeDecodeError:
return '', []
if language is None:
try:
lexer = get_lexer_for_filename(source_file)
language = languageForLexer(lexer)
except KeyError: # Not a valid extension
lexer = guess_lexer(code)
language = languageForLexer(lexer)
else:
lexer = get_lexer_by_name(language)
return tokenize_code(code, lexer, language, literal_option)
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [lang]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
code_analyzer.py 文件源码
项目:tf_aws_ecs_instance_draining_on_scale_in
作者: terraform-community-modules
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def block_code(self, code, lang):
guess = 'python3'
if code.lstrip().startswith('<?php'):
guess = 'php'
elif code.lstrip().startswith(('<', '{%')):
guess = 'html+jinja'
elif code.lstrip().startswith(('function', 'var', '$')):
guess = 'javascript'
lexer = get_lexer_by_name(lang or guess, stripall=True)
return highlight(code, lexer, HtmlFormatter())
# ???????md?????????????????????????
def syntax_hl(src, lang=None, guess_lang=False, inline=False):
"""Highlight."""
css_class = 'inline-highlight' if inline else 'highlight'
src = src.strip('\n')
try:
lexer = get_lexer_by_name(lang)
except ValueError:
try:
if guess_lang:
lexer = guess_lexer(src)
else:
lexer = get_lexer_by_name('text')
except ValueError:
lexer = get_lexer_by_name('text')
if inline:
formatter = SublimeInlineHtmlFormatter(
cssclass=css_class,
classprefix=css_class + ' '
)
else:
formatter = SublimeBlockFormatter(
cssclass=css_class
)
return highlight(src, lexer, formatter)
def blockcode(self, text, lang):
if not lang:
return '\n<pre><code>{}</code></pre>\n'.format(houdini.escape_html(text.strip()))
lexer = get_lexer_by_name(lang)
formatter = HtmlFormatter()
return highlight(text, lexer, formatter)