def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
python类NEWLINE的实例源码
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING:
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print >> sys.stderr, _(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}
self.__state = self.__waiting
def _create_astmissing_lines(self) -> List[Optional[Token]]:
"""
Return a copy of line_tokens containing lines ignored by the AST
(comments and blanks-only lines)
"""
lines: List[Optional[Token]] = []
nl_token = Token(token_module.NEWLINE, '\n', (0, 0), (0, 0), '\n')
for i, linetokens in enumerate(self._all_lines):
if len(linetokens) == 1 and linetokens[0].name == 'NL':
lines.append(nl_token)
else:
for token in linetokens:
if token.name == 'COMMENT' and \
token.rawvalue.lstrip().startswith('#'):
lines.append(token)
break
else:
lines.append(None)
assert len(lines) == len(self._all_lines)
for i, linetokens2 in enumerate(lines):
if linetokens2:
self._current_line = i
break
else:
self._current_line = len(lines)
return lines
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))
def decorators(self, nodelist):
# decorators: decorator ([NEWLINE] decorator)* NEWLINE
items = []
for dec_nodelist in nodelist:
assert dec_nodelist[0] == symbol.decorator
items.append(self.decorator(dec_nodelist[1:]))
return Decorators(items)
def suite(self, nodelist):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if len(nodelist) == 1:
return self.com_stmt(nodelist[0])
stmts = []
for node in nodelist:
if node[0] == symbol.stmt:
self.com_append_stmt(stmts, node)
return Stmt(stmts)
# --------------------------------------------------------------
#
# EXPRESSION NODES (invoked by com_node())
#
def com_NEWLINE(self, *args):
# A ';' at the end of a line can make a NEWLINE token appear
# here, Render it harmless. (genc discards ('discard',
# ('const', xxxx)) Nodes)
return Discard(Const(None))
def _parse_parameters(self):
""" Parses parameter list """
# Check and skip over '('
t = self._next_token()
if t.type != TokenType.OP or t.value != '(':
raise ParseError("Expected '(' of parameter list, got '%s'" % (t.value,))
parameters = []
while self._tokens_left():
# Check for ')' that would end parameter list
t = self._peek_token()
if t.type == TokenType.OP and t.value == ')':
self._next_token()
return parameters
# Parse one parameter
parameters.append(self._parse_parameter())
# Check if next token is either ')' or ','
t = self._peek_token()
while t.type == TokenType.NEWLINE or t.value == "\n":
self._next_token()
if not self._tokens_left():
raise ParseError("Expected ',' or end of parameter list after parameter '%s'" % (parameters[-1],))
t = self._peek_token()
if t.type == TokenType.OP and t.value == ')':
pass
elif t.type == TokenType.OP and t.value == ',':
self._next_token()
else:
raise ParseError("Expected ',' or end of parameter list after parameter '%s'" % (parameters[-1],))
# Code shouldn't reach here, unless there is not closing ')' in parameter list
raise ParseError("Unmatched parenthesis")
def get_text_range(self, node):
"""
After mark_tokens() has been called, returns the (startpos, endpos) positions in source text
corresponding to the given node. Returns (0, 0) for nodes (like `Load`) that don't correspond
to any particular text.
"""
if not hasattr(node, 'first_token'):
return (0, 0)
start = node.first_token.startpos
if any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
# Multi-line nodes would be invalid unless we keep the indentation of the first node.
start = self._text.rfind('\n', 0, start) + 1
return (start, node.last_token.endpos)
def _visit_after_children(self, node, parent_token, token):
# This processes the node generically first, after all children have been processed.
# Get the first and last tokens that belong to children. Note how this doesn't assume that we
# iterate through children in order that corresponds to occurrence in source code. This
# assumption can fail (e.g. with return annotations).
first = token
last = None
for child in self._iter_children(node):
if not first or child.first_token.index < first.index:
first = child.first_token
if not last or child.last_token.index > last.index:
last = child.last_token
# If we don't have a first token from _visit_before_children, and there were no children, then
# use the parent's token as the first token.
first = first or parent_token
# If no children, set last token to the first one.
last = last or first
# Statements continue to before NEWLINE. This helps cover a few different cases at once.
if util.is_stmt(node):
last = self._find_last_in_line(last)
# Capture any unmatched brackets.
first, last = self._expand_to_matching_pairs(first, last, node)
# Give a chance to node-specific methods to adjust.
nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
if (nfirst, nlast) != (first, last):
# If anything changed, expand again to capture any unmatched brackets.
nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
node.first_token = nfirst
node.last_token = nlast
def _find_last_in_line(self, start_token):
try:
newline = self._code.find_token(start_token, token.NEWLINE)
except IndexError:
newline = self._code.find_token(start_token, token.ENDMARKER)
return self._code.prev_token(newline)
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))
def decorators(self, nodelist):
# decorators: decorator ([NEWLINE] decorator)* NEWLINE
items = []
for dec_nodelist in nodelist:
assert dec_nodelist[0] == symbol.decorator
items.append(self.decorator(dec_nodelist[1:]))
return Decorators(items)
def suite(self, nodelist):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if len(nodelist) == 1:
return self.com_stmt(nodelist[0])
stmts = []
for node in nodelist:
if node[0] == symbol.stmt:
self.com_append_stmt(stmts, node)
return Stmt(stmts)
# --------------------------------------------------------------
#
# EXPRESSION NODES (invoked by com_node())
#
def com_NEWLINE(self, *args):
# A ';' at the end of a line can make a NEWLINE token appear
# here, Render it harmless. (genc discards ('discard',
# ('const', xxxx)) Nodes)
return Discard(Const(None))
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING:
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))