def handle_signature(self, sig, signode):
print("looking for: " + sig)
cache = _APP_CACHES.get(self.env.app, {})
key = CursorKind.FUNCTION_DECL, (sig, )
if key in cache:
print("KEY FOunD!")
node, comment, start, end, _ = cache[key]
result_type = node.type.get_result()
signode += addnodes.desc_type(result_type.spelling, result_type.spelling + ' ')
signode += addnodes.desc_name(node.spelling, node.spelling)
paramlist = addnodes.desc_parameterlist()
for argument in node.get_arguments():
parameter = addnodes.desc_parameter()
parameter += addnodes.desc_type(argument.type.spelling, argument.type.spelling + ' ')
parameter += nodes.Text(argument.spelling, argument.spelling)
paramlist += parameter
signode += paramlist
self.content = ViewList()
comment = ''.join(comment)
for lineno, line in enumerate(comment.splitlines(), start[0]):
self.content.append(line, '<unknown>', lineno)
return sig
python类Text()的实例源码
def doctree_resolved(app, doctree, docname):
secnums = app.builder.env.toc_secnumbers
for node in doctree.traverse(nodes.reference):
if 'refdocname' in node:
refdocname = node['refdocname']
if refdocname in secnums:
secnum = secnums[refdocname]
toclist = app.builder.env.tocs[refdocname]
for child in node.traverse():
if isinstance(child, nodes.Text):
text = child.astext()
anchorname = None
for refnode in toclist.traverse(nodes.reference):
if refnode.astext() == text:
anchorname = refnode['anchorname']
if anchorname is None:
continue
linktext = '.'.join(map(str, secnum[anchorname]))
child.parent.replace(child, nodes.Text(linktext))
def expand_show_urls(self):
show_urls = self.document.settings.env.config.latex_show_urls
if show_urls is False or show_urls == 'no':
return
for node in self.document.traverse(nodes.reference):
uri = node.get('refuri', '')
if uri.startswith(URI_SCHEMES):
if uri.startswith('mailto:'):
uri = uri[7:]
if node.astext() != uri:
index = node.parent.index(node)
if show_urls == 'footnote':
if list(traverse_parent(node, nodes.topic)):
# should not expand references in topics
pass
else:
footnote_nodes = self.create_footnote(uri)
for i, fn in enumerate(footnote_nodes):
node.parent.insert(index + i + 1, fn)
self.expanded = True
else: # all other true values (b/w compat)
textnode = nodes.Text(" (%s)" % uri)
node.parent.insert(index + 1, textnode)
def maybe_resolve_abbreviations(app, env, node, contnode):
domainname = node.get('refdomain')
target = node['reftarget']
type = node['reftype']
if target.startswith('@'):
newtarget, src_dict = resolve(target, type)
node['reftarget'] = newtarget
# shorten text if '~' is not enabled.
if len(contnode) and isinstance(contnode[0], nodes.Text):
contnode[0] = modify_textnode(target, newtarget, node,
src_dict, type)
if domainname:
try:
domain = env.domains[node.get('refdomain')]
except KeyError:
raise NoUri
return domain.resolve_xref(env, node['refdoc'], app.builder,
type, newtarget,
node, contnode)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError, error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
(term_node.source,
term_node.line) = self.state_machine.get_source_and_line(lineno)
term_node.rawsource = unescape(lines[0])
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError, error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
(term_node.source,
term_node.line) = self.state_machine.get_source_and_line(lineno)
term_node.rawsource = unescape(lines[0])
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
def _strong_node(self, refuri, text_nodes):
cp = docutils_nodes.inline(classes=['link-container'])
n1 = docutils_nodes.strong()
n1.extend(text_nodes)
cp.append(n1)
paramlink = docutils_nodes.reference(
'', '',
docutils_nodes.Text(u"¶", u"¶"),
refid="",
# paramlink is our own CSS class, headerlink
# is theirs. Trying to get everything we can for existing
# symbols...
classes=['paramlink', 'headerlink']
)
cp.append(paramlink)
return cp
pyspecific.py 文件源码
项目:integration-prototype
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
# Support for documenting decorators
def format_parser_error(name, error, filename, state, lineno, do_unicode_warning):
warning = '%s: Unable to parse xml file "%s". ' % (name, filename)
explanation = 'Reported error: %s. ' % error
unicode_explanation_text = ""
unicode_explanation = []
if do_unicode_warning:
unicode_explanation_text = textwrap.dedent("""
Parsing errors are often due to unicode errors associated with the encoding of the original
source files. Doxygen propagates invalid characters from the input source files to the
output xml.""").strip().replace("\n", " ")
unicode_explanation = [nodes.paragraph("", "", nodes.Text(unicode_explanation_text))]
return [nodes.warning("",
nodes.paragraph("", "", nodes.Text(warning)),
nodes.paragraph("", "", nodes.Text(explanation)),
*unicode_explanation
),
state.document.reporter.warning(warning + explanation + unicode_explanation_text, line=lineno)
]
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
# Support for documenting decorators
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError, error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
format_str = '\n'.join(self.content) or '%Y-%m-%d'
if sys.version_info< (3, 0):
try:
format_str = format_str.encode(locale_encoding or 'utf-8')
except UnicodeEncodeError:
raise self.warning(u'Cannot encode date format string '
u'with locale encoding "%s".' % locale_encoding)
text = time.strftime(format_str)
if sys.version_info< (3, 0):
# `text` is a byte string that may contain non-ASCII characters:
try:
text = text.decode(locale_encoding or 'utf-8')
except UnicodeDecodeError:
text = text.decode(locale_encoding or 'utf-8', 'replace')
raise self.warning(u'Error decoding "%s"'
u'with locale encoding "%s".' % (text, locale_encoding))
return [nodes.Text(text)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
(term_node.source,
term_node.line) = self.state_machine.get_source_and_line(lineno)
term_node.rawsource = unescape(lines[0])
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
def run(self):
env = self.state.document.settings.env
if not env.config.show_authors:
return []
para = nodes.paragraph(translatable=False)
emph = nodes.emphasis()
para += emph
if self.name == 'sectionauthor':
text = _('Section author: ')
elif self.name == 'moduleauthor':
text = _('Module author: ')
elif self.name == 'codeauthor':
text = _('Code author: ')
else:
text = _('Author: ')
emph += nodes.Text(text, text)
inodes, messages = self.state.inline_text(self.arguments[0],
self.lineno)
emph.extend(inodes)
return [para] + messages
def _resolve_citation(self, builder, fromdocname, node, contnode):
docname, labelid = self.citations.get(node['reftarget'], ('', ''))
if docname:
try:
newnode = make_refnode(builder, fromdocname,
docname, labelid, contnode)
return newnode
except NoUri:
# remove the ids we added in the CitationReferences
# transform since they can't be transfered to
# the contnode (if it's a Text node)
if not isinstance(contnode, nodes.Element):
del node['ids'][:]
raise
elif 'ids' in node:
# remove ids attribute that annotated at
# transforms.CitationReference.apply.
del node['ids'][:]
def apply(self):
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
# for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
def describe_signature(self, signode, mode, env, prefix, parentScope):
_verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + text_type(self)
pnode = addnodes.pending_xref(
'', refdomain='cpp', reftype='type',
reftarget=targetText, modname=None, classname=None)
pnode['cpp:parent'] = [parentScope]
pnode += nodes.Text(text_type(self.identifier))
signode += pnode
elif mode == 'lastIsName':
name = text_type(self.identifier)
signode += addnodes.desc_name(name, name)
else:
raise Exception('Unknown description mode: %s' % mode)
if self.templateArgs:
signode += nodes.Text('<')
first = True
for a in self.templateArgs:
if not first:
signode += nodes.Text(', ')
first = False
a.describe_signature(signode, 'markType', env,
parentScope=parentScope)
signode += nodes.Text('>')
def describe_signature(self, modifiers):
def _add(modifiers, text):
if len(modifiers) > 0:
modifiers.append(nodes.Text(' '))
modifiers.append(addnodes.desc_annotation(text, text))
if self.storage:
_add(modifiers, self.storage)
if self.inline:
_add(modifiers, 'inline')
if self.virtual:
_add(modifiers, 'virtual')
if self.explicit:
_add(modifiers, 'explicit')
if self.constexpr:
_add(modifiers, 'constexpr')
if self.volatile:
_add(modifiers, 'volatile')
if self.const:
_add(modifiers, 'const')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
if typ == 'menuselection':
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
spans = _amp_re.split(text)
node = nodes.inline(rawtext=rawtext)
for i, span in enumerate(spans):
span = span.replace('&&', '&')
if i == 0:
if len(span) > 0:
textnode = nodes.Text(span)
node += textnode
continue
accel_node = nodes.inline()
letter_node = nodes.Text(span[0])
accel_node += letter_node
accel_node['classes'].append('accelerator')
node += accel_node
textnode = nodes.Text(span[1:])
node += textnode
node['classes'].append(typ)
return [node], []
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
def expand_show_urls(self):
show_urls = self.document.settings.env.config.latex_show_urls
if show_urls is False or show_urls == 'no':
return
for node in self.document.traverse(nodes.reference):
uri = node.get('refuri', '')
if uri.startswith(URI_SCHEMES):
if uri.startswith('mailto:'):
uri = uri[7:]
if node.astext() != uri:
index = node.parent.index(node)
if show_urls == 'footnote':
footnote_nodes = self.create_footnote(uri)
for i, fn in enumerate(footnote_nodes):
node.parent.insert(index + i + 1, fn)
self.expanded = True
else: # all other true values (b/w compat)
textnode = nodes.Text(" (%s)" % uri)
node.parent.insert(index + 1, textnode)
def dispatch_visit(self, node):
nodetype = type(node)
if issubclass(nodetype, comment):
raise SkipNode
if issubclass(nodetype, raw):
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
# content
nodetext = re.sub(r'(?is)<style.*?</style>', '', node.astext())
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
raise SkipNode
if issubclass(nodetype, Text):
self.found_words.extend(self.lang.split(node.astext()))
elif issubclass(nodetype, title):
self.found_title_words.extend(self.lang.split(node.astext()))