def currentQuotedString(self):
# Handle quoted strings - pity shlex doesn't handle it.
assert self.token.startswith('"'), self.token
bits = [self.token]
while 1:
tok = self.getToken()
if not tok.startswith('"'):
self.ungetToken()
break
bits.append(tok)
sval = "".join(bits)[1:-1] # Remove end quotes.
# Fixup quotes in the body, and all (some?) quoted characters back
# to their raw value.
for i, o in ('""', '"'), ("\\r", "\r"), ("\\n", "\n"), ("\\t", "\t"):
sval = sval.replace(i, o)
return sval
python类shlex()的实例源码
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
# Dupe ID really isn't a problem - most consumers
# want to go from name->id, and this is OK.
# It means you can't go from id->name though.
pass
# ignore AppStudio special ones
#if not n.startswith("_APS_"):
# print "Duplicate id",i,"for",n,"is", self.names[i]
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def __init__(self, synfiledata):
#init
self.instructions = []
self.dnscache = {}
lexer = list(shlex.shlex(synfiledata))
itr_ctr = 0
while len(lexer) > 0:
token = lexer[0]
#should be the start of a new line
if (token.lower() == 'flow'):
(flowdecl, lexer) = self.lex_flow(lexer[1:])
self.instructions.append(flowdecl)
else:
#treat as an event
(eventdecl, lexer) = self.lex_event(lexer)
self.instructions.append(eventdecl)
itr_ctr = itr_ctr + 1
def __init__(self, data):
self.shape = {}
self.unit = {}
self.fields = []
for line in data:
line = line.replace('\n', '')
s = shlex.shlex(line)
s.whitespace_split = True
s.commenters = ''
s.quotes = '"'
line = list(s)
# select the keys list and default values array
if line[0] in self._KEYS:
key_list = self._KEYS[line[0]]
values = line[1:] + ['' for n in range(len(key_list) - len(line[1:]))]
if line[0] == 'S':
self.shape = dict(zip(key_list,values))
elif line[0] == 'U':
self.unit = dict(zip(key_list,values))
elif line[0][0] == 'F':
key_list = self._F_KEYS
values = line + ['' for n in range(len(key_list) - len(line))]
self.fields.append(dict(zip(key_list,values)))
def parse_for_var(self, line):
lexer = shlex.shlex(line)
lexer.wordchars = self.wordchars
varname = lexer.get_token()
is_env_var = lexer.get_token() == '='
value = ''.join(lexer)
if is_env_var:
return (varname, value)
raise CronVarError("Not a variable.")
def safe_split_line(inputline):
parser = shlex.shlex(inputline, posix=True)
parser.whitespace_split = True
res = []
# track whether or not we're looking at quoted text -- it should suppress a
# lot of types of completion if we see an open quote without a close quote
quoted = False
try:
for val in parser:
res.append(val)
# No closing quotation
except ValueError:
quoted = True
# grab the last token from the shlexer
if parser.token:
res.append(parser.token)
return res, quoted
def rename_window(self, new_name):
"""Return :class:`Window` object ``$ tmux rename-window <new_name>``.
:param new_name: name of the window
:type new_name: str
"""
import shlex
lex = shlex.shlex(new_name)
lex.escape = ' '
lex.whitespace_split = False
try:
self.cmd(
'rename-window',
new_name
)
self['window_name'] = new_name
except Exception as e:
logger.error(e)
self.server._update_windows()
return self
def testQuote(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
unsafe = '"`$\\!' + unicode_sample
self.assertEqual(shlex.quote(''), "''")
self.assertEqual(shlex.quote(safeunquoted), safeunquoted)
self.assertEqual(shlex.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(shlex.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(shlex.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
# Allow this test to be used with old shlex.py
def currentQuotedString(self):
# Handle quoted strings - pity shlex doesn't handle it.
assert self.token.startswith('"'), self.token
bits = [self.token]
while 1:
tok = self.getToken()
if not tok.startswith('"'):
self.ungetToken()
break
bits.append(tok)
sval = "".join(bits)[1:-1] # Remove end quotes.
# Fixup quotes in the body, and all (some?) quoted characters back
# to their raw value.
for i, o in ('""', '"'), ("\\r", "\r"), ("\\n", "\n"), ("\\t", "\t"):
sval = sval.replace(i, o)
return sval
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
# Dupe ID really isn't a problem - most consumers
# want to go from name->id, and this is OK.
# It means you can't go from id->name though.
pass
# ignore AppStudio special ones
#if not n.startswith("_APS_"):
# print "Duplicate id",i,"for",n,"is", self.names[i]
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def currentQuotedString(self):
# Handle quoted strings - pity shlex doesn't handle it.
assert self.token.startswith('"'), self.token
bits = [self.token]
while 1:
tok = self.getToken()
if not tok.startswith('"'):
self.ungetToken()
break
bits.append(tok)
sval = "".join(bits)[1:-1] # Remove end quotes.
# Fixup quotes in the body, and all (some?) quoted characters back
# to their raw value.
for i, o in ('""', '"'), ("\\r", "\r"), ("\\n", "\n"), ("\\t", "\t"):
sval = sval.replace(i, o)
return sval
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
# Dupe ID really isn't a problem - most consumers
# want to go from name->id, and this is OK.
# It means you can't go from id->name though.
pass
# ignore AppStudio special ones
#if not n.startswith("_APS_"):
# print "Duplicate id",i,"for",n,"is", self.names[i]
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def split_args(line):
"""Version of shlex.split that silently accept incomplete strings."""
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
lex.commenters = ''
res = []
try:
while True:
res.append(next(lex))
except ValueError: # No closing quotation
pass
except StopIteration: # End of loop
pass
if lex.token:
res.append(lex.token)
return res
def do_bash_complete(cli, prog_name):
comp_words = os.environ['COMP_WORDS']
try:
cwords = shlex.split(comp_words)
quoted = False
except ValueError: # No closing quotation
cwords = split_args(comp_words)
quoted = True
cword = int(os.environ['COMP_CWORD'])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ''
choices = get_choices(cli, prog_name, args, incomplete)
if quoted:
echo('\t'.join(opt for opt, _ in choices), nl=False)
else:
echo('\t'.join(re.sub(r"""([\s\\"'])""", r'\\\1', opt) for opt, _ in choices), nl=False)
return True
def _expand_args(command):
"""Parses command strings and returns a Popen-ready list."""
# Prepare arguments.
if isinstance(command, STR_TYPES):
splitter = shlex.shlex(command.encode('utf-8'))
splitter.whitespace = '|'
splitter.whitespace_split = True
command = []
while True:
token = splitter.get_token()
if token:
command.append(token)
else:
break
command = list(map(shlex.split, command))
return command
def testQuote(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
unsafe = '"`$\\!' + unicode_sample
self.assertEqual(shlex.quote(''), "''")
self.assertEqual(shlex.quote(safeunquoted), safeunquoted)
self.assertEqual(shlex.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(shlex.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(shlex.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
# Allow this test to be used with old shlex.py
def parse_config_string(config_string, issue_warnings=True):
"""
Parses a config string (comma-separated key=value components) into a dict.
"""
config_dict = {}
my_splitter = shlex.shlex(config_string, posix=True)
my_splitter.whitespace = ','
my_splitter.whitespace_split = True
for kv_pair in my_splitter:
kv_pair = kv_pair.strip()
if not kv_pair:
continue
kv_tuple = kv_pair.split('=', 1)
if len(kv_tuple) == 1:
if issue_warnings:
TheanoConfigWarning.warn(
("Config key '%s' has no value, ignoring it"
% kv_tuple[0]),
stacklevel=1)
else:
k, v = kv_tuple
# subsequent values for k will override earlier ones
config_dict[k] = v
return config_dict
def __init__(self, data):
self.shape = {}
self.unit = {}
self.fields = []
for line in data:
line = line.replace('\n', '')
s = shlex.shlex(line)
s.whitespace_split = True
s.commenters = ''
s.quotes = '"'
line = list(s)
# select the keys list and default values array
if line[0] in self._KEYS:
key_list = self._KEYS[line[0]]
values = line[1:] + ['' for n in range(len(key_list) - len(line[1:]))]
if line[0] == 'S':
self.shape = dict(zip(key_list,values))
elif line[0] == 'U':
self.unit = dict(zip(key_list,values))
elif line[0][0] == 'F':
key_list = self._F_KEYS
values = line + ['' for n in range(len(key_list) - len(line))]
self.fields.append(dict(zip(key_list,values)))
def testQuote(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
unsafe = '"`$\\!' + unicode_sample
self.assertEqual(shlex.quote(''), "''")
self.assertEqual(shlex.quote(safeunquoted), safeunquoted)
self.assertEqual(shlex.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(shlex.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(shlex.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
# Allow this test to be used with old shlex.py
def _run_hook(self, hook_name: str):
key = f"exec_{hook_name}"
value = self.config[key]
if value == "/usr/bin/true":
return
self.logger.verbose(
f"Running {hook_name} hook for {self.humanreadable_name}"
)
lex = shlex.shlex(value) # noqa: T484
lex.whitespace_split = True
command = list(lex)
return iocage.lib.helpers.exec(
command,
logger=self.logger,
env=self.env
)
def smartsplit(string, sep):
"""Split while allowing escaping.
So far, this seems to do what I expect - split at the separator,
allow escaping via \, and allow the backslash itself to be escaped.
One problem is that it can raise a ValueError when given a backslash
without a character to escape. I'd really like a smart splitter
without manually scan the string. But maybe that is exactly what should
be done.
"""
assert string is not None # or shlex will read from stdin
if not six.PY3:
# On 2.6, shlex fails miserably with unicode input
is_unicode = isinstance(string, unicode)
if is_unicode:
string = string.encode('utf8')
l = shlex.shlex(string, posix=True)
l.whitespace += ','
l.whitespace_split = True
l.quotes = ''
if not six.PY3 and is_unicode:
return map(lambda s: s.decode('utf8'), list(l))
else:
return list(l)
def parse_binary(cls, string):
r"""
Parse a string for a binary (executable). Allow multiple arguments
to indicate the binary (as parsed by shlex).
Return a list of arguments suitable for passing to subprocess
functions.
>>> ExternalTool.parse_binary('/usr/bin/lessc')
['/usr/bin/lessc']
>>> ExternalTool.parse_binary('node node_modules/bin/lessc')
['node', 'node_modules/bin/lessc']
>>> ExternalTool.parse_binary('"binary with spaces"')
['binary with spaces']
>>> ExternalTool.parse_binary(r'binary\ with\ spaces')
['binary with spaces']
>>> ExternalTool.parse_binary('')
[]
"""
return shlex.split(string)
def split_args(line):
"""Version of shlex.split that silently accept incomplete strings."""
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
lex.commenters = ''
res = []
try:
while True:
res.append(next(lex))
except ValueError: # No closing quotation
pass
except StopIteration: # End of loop
pass
if lex.token:
res.append(lex.token)
return res
def do_bash_complete(cli, prog_name):
comp_words = os.environ['COMP_WORDS']
try:
cwords = shlex.split(comp_words)
quoted = False
except ValueError: # No closing quotation
cwords = split_args(comp_words)
quoted = True
cword = int(os.environ['COMP_CWORD'])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ''
choices = get_choices(cli, prog_name, args, incomplete)
if quoted:
echo('\t'.join(opt for opt, _ in choices), nl=False)
else:
echo('\t'.join(re.sub(r"""([\s\\"'])""", r'\\\1', opt) for opt, _ in choices), nl=False)
return True
def read_file(file_path):
f = open(file_path)
# remove every comments and empty lines from the file content to avoid
# false positives
file = shlex.shlex(f, False)
#file = filter(None, re.sub("#.*[^\n]", "", f.read()).splitlines())
return file
def _parse_flink_line(line, final_flags):
"""private"""
lexer = shlex.shlex(line, posix = True)
lexer.whitespace_split = True
t = lexer.get_token()
tmp_flags = []
while t:
t = _parse_flink_token(lexer, t, tmp_flags)
final_flags.extend(tmp_flags)
return final_flags
def _parse_flink_line(line, final_flags):
"""private"""
lexer = shlex.shlex(line, posix = True)
lexer.whitespace_split = True
t = lexer.get_token()
tmp_flags = []
while t:
t = _parse_flink_token(lexer, t, tmp_flags)
final_flags.extend(tmp_flags)
return final_flags
def _parse_flink_line(line, final_flags):
"""private"""
lexer = shlex.shlex(line, posix = True)
lexer.whitespace_split = True
t = lexer.get_token()
tmp_flags = []
while t:
t = _parse_flink_token(lexer, t, tmp_flags)
final_flags.extend(tmp_flags)
return final_flags
def shsplit(s):
""" Returs original list from what shjoin returned """
lex = shlex.shlex(s, posix=True)
lex.escapedquotes = b'"\''
lex.whitespace_split = True
return [ x.decode('utf-8') for x in list(lex) ]
def parameter_string_to_list(par):
import shlex
parameter_string = shlex.shlex(par)
parameter_string.quotes = '"'
parameter_string.whitespace_split = True
parameter_string.commenters = ''
parameters = list(parameter_string)
return parameters