def parser(self):
join_type = (pp.Literal("LEFT") | pp.Literal("RIGHT") | pp.Literal("INNER") | pp.Literal("OUTER"))
node_name = pp.Word(pp.alphas, pp.alphanums + "_$")
col_name = pp.Word(pp.alphas, pp.alphanums + "_$")
col_name_list = pp.Group(pp.delimitedList(col_name, delim=","))
l_brac = pp.Suppress("[")
r_brac = pp.Suppress("]")
single_join = (join_type + pp.Suppress("(") + node_name + l_brac +
col_name_list + r_brac + pp.Suppress("==>") + node_name +
l_brac + col_name_list + r_brac + pp.Suppress(")"))
single_join.addParseAction(lambda x: self._add_join(join_type=x[0],
child_node_name=x[1],
child_cols=x[2],
parent_node_name=x[3],
parent_cols=x[4]))
join_block = pp.OneOrMore(single_join)
return join_block
python类Literal()的实例源码
def _build_parser():
date_literal = pp.Regex(r'(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})') \
.setParseAction(lambda s,l,t: schema.Date(t.year, t.month, t.day))
dollars_literal = pp.Regex(r'\$\d+(\.\d{2})') \
.setParseAction(lambda s,l,t: schema.Dollars(t[0]))
string_literal = (pp.QuotedString('"', escChar='\\') | pp.QuotedString("'", escChar='\\')) \
.setParseAction(lambda s,l,t: schema.String(t[0]))
literal = date_literal | dollars_literal | string_literal
ident = pp.Word(pp.alphas)
match_op = pp.oneOf(operator_map.keys())
match = ident + match_op + literal
assign_op = pp.Literal('=')
assign = ident + assign_op + literal
part = (match | assign).setParseAction(lambda s,l,t: [t])
rule = pp.delimitedList(part) + pp.LineEnd()
return rule
def __init__(self, calc = SimpleCalculator()):
self.exprStack = []
def pushStack(s, l, t):
self.exprStack.append(t[0])
integer = Word(nums).addParseAction(pushStack)
addop = Literal('+') | Literal('-')
mulop = Literal('*') | Literal('/')
lpar = Literal('(')
rpar = Literal(')')
expr = Forward()
atom = integer | lpar + expr + rpar
term = atom + ZeroOrMore((mulop + atom).addParseAction(pushStack))
expr << term + ZeroOrMore((addop + term).addParseAction(pushStack))
self.expr = expr + StringEnd()
self.opfun = {
'+' : (lambda a, b: calc.add(a,b)),
'-' : (lambda a, b: calc.sub(a,b)),
'*' : (lambda a, b: calc.mul(a,b)),
'/' : (lambda a, b: calc.div(a,b)) }
def anything_beetween(opener_and_closer):
"""Builds a (pyparsing) parser for the content inside delimiters.
Args:
opener_and_closer: a string containing two elements: opener and closer
Returns:
A (pyparsing) parser for the content inside delimiters.
"""
opener = pyparsing.Literal(opener_and_closer[0])
closer = pyparsing.Literal(opener_and_closer[1])
char_removal_mapping = dict.fromkeys(map(ord, opener_and_closer))
other_chars = unicode(string.printable).translate(char_removal_mapping)
word_without_delimiters = pyparsing.Word(other_chars).setName(
"other_chars")
anything = pyparsing.Forward()
delimited_block = opener + anything + closer
# pylint: disable=expression-not-assigned
anything << pyparsing.ZeroOrMore(
word_without_delimiters.setName("word_without_delimiters")
| delimited_block.setName("delimited_block")
)
# Combine all the parts into a single string.
return pyparsing.Combine(anything)
def anything_beetween(opener_and_closer):
"""Builds a (pyparsing) parser for the content inside delimiters.
Args:
opener_and_closer: a string containing two elements: opener and closer
Returns:
A (pyparsing) parser for the content inside delimiters.
"""
opener = pyparsing.Literal(opener_and_closer[0])
closer = pyparsing.Literal(opener_and_closer[1])
char_removal_mapping = dict.fromkeys(map(ord, opener_and_closer))
other_chars = unicode(string.printable).translate(char_removal_mapping)
word_without_delimiters = pyparsing.Word(other_chars).setName(
"other_chars")
anything = pyparsing.Forward()
delimited_block = opener + anything + closer
# pylint: disable=expression-not-assigned
anything << pyparsing.ZeroOrMore(
word_without_delimiters.setName("word_without_delimiters")
| delimited_block.setName("delimited_block")
)
# Combine all the parts into a single string.
return pyparsing.Combine(anything)
def parse_format(format):
definition = []
# define pattern grammar
variable_ptn = pp.QuotedString("{", endQuoteChar="}")("variable")
escape_open_ptn = pp.Literal("{{")("escape_open")
escape_close_ptn = pp.Literal("}}")("escape_close")
escape_ptn = escape_open_ptn | escape_close_ptn
literal_ptn = pp.CharsNotIn("{}")("literal")
element_ptn = escape_ptn | variable_ptn | literal_ptn
for toks, start, end in element_ptn.leaveWhitespace().scanString(format):
try:
definition.append({
"literal": lambda: Literal(toks[0]),
"variable": lambda: Variable.create(toks[0]),
"escape_open": lambda: OpenBrace(),
"escape_close": lambda: CloseBrace(),
}[toks.items()[0][0]]())
except KeyError:
raise FormatStringError
return definition
def FromString(cls, desc):
"""Parse this stop condition from a string representation.
The string needs to match:
run_time number [seconds|minutes|hours|days|months|years]
Args:
desc (str): The description
Returns:
TimeBasedStopCondition
"""
parse_exp = Literal(u'run_time').suppress() + time_interval(u'interval')
try:
data = parse_exp.parseString(desc)
return TimeBasedStopCondition(data[u'interval'][0])
except ParseException:
raise ArgumentError(u"Could not parse time based stop condition")
def _create_simple_statements():
global binary, ident, rvalue, simple_statement, semi, comp, number, slot_id, callrpc_stmt, generic_statement, streamer_stmt, stream, selector
if simple_statement is not None:
return
meta_stmt = Group(Literal('meta').suppress() + ident + Literal('=').suppress() + rvalue + semi).setResultsName('meta_statement')
require_stmt = Group(Literal('require').suppress() + ident + comp + rvalue + semi).setResultsName('require_statement')
set_stmt = Group(Literal('set').suppress() - (ident | number) - Literal("to").suppress() - (rvalue | binary) - Optional(Literal('as').suppress() + config_type) + semi).setResultsName('set_statement')
callrpc_stmt = Group(Literal("call").suppress() + (ident | number) + Literal("on").suppress() + slot_id + Optional(Literal("=>").suppress() + stream('explicit_stream')) + semi).setResultsName('call_statement')
streamer_stmt = Group(Optional(Literal("manual")('manual')) + Optional(oneOf(u'encrypted signed')('security')) + Optional(Literal(u'realtime')('realtime')) + Literal('streamer').suppress() -
Literal('on').suppress() - selector('selector') - Optional(Literal('to').suppress() - slot_id('explicit_tile')) - Optional(Literal('with').suppress() - Literal('streamer').suppress() - number('with_other')) - semi).setResultsName('streamer_statement')
copy_stmt = Group(Literal("copy").suppress() - Optional(oneOf("all count average")('modifier')) - Optional(stream('explicit_input') | number('constant_input')) - Literal("=>") - stream("output") - semi).setResultsName('copy_statement')
trigger_stmt = Group(Literal("trigger") - Literal("streamer") - number('index') - semi).setResultsName('trigger_statement')
simple_statement = meta_stmt | require_stmt | set_stmt | callrpc_stmt | streamer_stmt | trigger_stmt | copy_stmt
# In generic statements, keep track of the location where the match started for error handling
locator = Empty().setParseAction(lambda s, l, t: l)('location')
generic_statement = Group(locator + Group(ZeroOrMore(Regex(u"[^{};]+")) + Literal(u';'))('match')).setResultsName('unparsed_statement')
def _create_block_bnf():
global block_bnf, time_interval, slot_id, statement, block_id, ident, stream
if block_bnf is not None:
return
trigger_clause = Group(stream_trigger | Group(stream).setResultsName('stream_always') | Group(ident).setResultsName('identifier'))
every_block_id = Group(Literal(u'every').suppress() - time_interval).setResultsName('every_block')
when_block_id = Group(Literal(u'when').suppress() + Literal("connected").suppress() - Literal("to").suppress() - slot_id).setResultsName('when_block')
latch_block_id = Group(Literal(u'when').suppress() - stream_trigger).setResultsName('latch_block')
config_block_id = Group(Literal(u'config').suppress() - slot_id).setResultsName('config_block')
on_block_id = Group(Literal(u'on').suppress() - trigger_clause.setResultsName('triggerA') - Optional((Literal("and") | Literal("or")) - trigger_clause.setResultsName('triggerB'))).setResultsName('on_block')
block_id = every_block_id | when_block_id | latch_block_id | config_block_id | on_block_id
block_bnf = Forward()
statement = generic_statement | block_bnf
block_bnf << Group(block_id + Group(Literal(u'{').suppress() + ZeroOrMore(statement) + Literal(u'}').suppress())).setResultsName('block')
def make_grammar():
"""Creates the grammar to be used by a spec matcher."""
# This is apparently how pyparsing recommends to be used,
# as http://pyparsing.wikispaces.com/share/view/644825 states that
# it is not thread-safe to use a parser across threads.
unary_ops = (
# Order matters here (so that '=' doesn't match before '==')
Literal("==") | Literal("=") |
Literal("!=") | Literal("<in>") |
Literal(">=") | Literal("<=") |
Literal(">") | Literal("<") |
Literal("s==") | Literal("s!=") |
# Order matters here (so that '<' doesn't match before '<=')
Literal("s<=") | Literal("s<") |
# Order matters here (so that '>' doesn't match before '>=')
Literal("s>=") | Literal("s>"))
or_ = Literal("<or>")
# An atom is anything not an keyword followed by anything but whitespace
atom = ~(unary_ops | or_) + Regex(r"\S+")
unary = unary_ops + atom
disjunction = OneOrMore(or_ + atom)
# Even-numbered tokens will be '<or>', so we drop them
disjunction.setParseAction(lambda _s, _l, t: ["<or>"] + t[1::2])
expr = disjunction | unary | atom
return expr
def parse_filter_str(self, filter_str):
"""
method to parse filter string
"""
prop = pp.WordStart(pp.alphas) + pp.Word(pp.alphanums +
"_").setResultsName("prop")
value = (pp.QuotedString("'") | pp.QuotedString('"') | pp.Word(
pp.printables, excludeChars=",")).setResultsName("value")
types_ = pp.oneOf("re eq ne gt ge lt le").setResultsName("types")
flags = pp.oneOf("C I").setResultsName("flags")
comma = pp.Literal(',')
quote = (pp.Literal("'") | pp.Literal('"')).setResultsName("quote")
type_exp = pp.Group(pp.Literal("type") + pp.Literal(
"=") + quote + types_ + quote).setResultsName("type_exp")
flag_exp = pp.Group(pp.Literal("flag") + pp.Literal(
"=") + quote + flags + quote).setResultsName("flag_exp")
semi_expression = pp.Forward()
semi_expression << pp.Group(pp.Literal("(") +
prop + comma + value +
pp.Optional(comma + type_exp) +
pp.Optional(comma + flag_exp) +
pp.Literal(")")
).setParseAction(
self.parse_filter_obj).setResultsName("semi_expression")
expr = pp.Forward()
expr << pp.operatorPrecedence(semi_expression, [
("not", 1, pp.opAssoc.RIGHT, self.not_operator),
("and", 2, pp.opAssoc.LEFT, self.and_operator),
("or", 2, pp.opAssoc.LEFT, self.or_operator)
])
result = expr.parseString(filter_str)
return result
def parseformat(classname=None, formatstring=None):
attribmarker = (p.Literal('@')|p.Literal('!')).suppress()
cellseparator = '||'
concatmarker = p.Optional(p.Literal('+'))
attribgroup = attribmarker + concatmarker + p.Word(p.alphanums)
cells = []
_splitstring = [cell.strip() for cell in formatstring.split(cellseparator)]
for cell in _splitstring:
_scan = attribgroup.scanString(cell)
_templist = []
prestart = 0
end = 0
for match in _scan:
start = match[1]
end = match[2]
_start = cell[prestart:start]
if len(_start) > 0:
# conditional logic avoids empty leading output cells
_templist.append(om.Filler(_start))
_templist.append(om.AttributeMatch(cell[start + 1:end])) #, classname=classname))
prestart = end
# print('templist:', _templist)
_end = cell[end:]
if len(_end) > 0:
# conditional logic avoids empty trailing output cells
_templist.append(om.Filler(cell[end:]))
cells.append(_templist)
return cells
# --- static ---
def getnewnick(sentence):
"""Parse detail to get new nick."""
nick = pp.Regex('[a-zA-Z0-9\-_|^]+').setResultsName('nick')
parser = pp.Literal('is now known as') + nick + pp.restOfLine
try:
result = parser.parseString(sentence)
return result.nick
except pp.ParseException as x:
pass
def _build_input_source_parser(legalChars, commentInProgress):
"""Builds a PyParsing parser for alternate user input sources (from file, pipe, etc.)"""
input_mark = pyparsing.Literal('<')
input_mark.setParseAction(lambda x: '')
file_name = pyparsing.Word(legalChars + '/\\')
input_from = file_name('inputFrom')
input_from.setParseAction(replace_with_file_contents)
# a not-entirely-satisfactory way of distinguishing < as in "import from" from <
# as in "lesser than"
inputParser = input_mark + pyparsing.Optional(input_from) + pyparsing.Optional('>') + \
pyparsing.Optional(file_name) + (pyparsing.stringEnd | '|')
inputParser.ignore(commentInProgress)
return inputParser
def __init__(self, ffilter, queue_out):
FuzzQueue.__init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def __init__(self):
if PYPARSING:
category = Word( alphas + "_-*", alphanums + "_-*" )
operator = oneOf("and or ,")
neg_operator = "not"
elementRef = category
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
neg_nestedformula = Optional(neg_operator) + nestedformula
self.finalformula = neg_nestedformula + ZeroOrMore( operator + neg_nestedformula)
elementRef.setParseAction(self.__compute_element)
neg_nestedformula.setParseAction(self.__compute_neg_formula)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
def getrule():
"""
Using pyparsing, get rule out of a string.
"""
arrow = pp.Literal("==>")
buff = pp.Word(pp.alphas, "".join([pp.alphanums, "_"]))
special_valueLHS = pp.oneOf([x for x in _LHSCONVENTIONS.keys()])
end_buffer = pp.Literal(">")
special_valueRHS = pp.oneOf([x for x in _RHSCONVENTIONS.keys()])
chunk = getchunk()
rule_reader = pp.Group(pp.OneOrMore(pp.Group(special_valueLHS + buff + end_buffer + pp.Group(pp.Optional(chunk))))) + arrow + pp.Group(pp.OneOrMore(pp.Group(special_valueRHS + buff + end_buffer + pp.Group(pp.Optional(chunk)))))
return rule_reader
def __update_grammar(self, grammar):
registers = list(self.registers)
grammar.kRegisterNames << pp.Or(map(pp.Literal, list(self.registers)))
return grammar
def parse_search_query(query):
unicode_printables = u''.join(unichr(c) for c in xrange(65536) if not unichr(c).isspace())
word = TextNode.group(Word(unicode_printables))
exact = ExactNode.group(QuotedString('"', unquoteResults=True, escChar='\\'))
term = exact | word
comparison_name = Word(unicode_printables, excludeChars=':')
comparison = ComparisonNode.group(comparison_name + Literal(':') + term)
content = OneOrMore(comparison | term)
return content.parseString(query)
def is_ok(self):
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
import pyparsing
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
return f is not None
except (ImportError, TypeError):
return False
def __init__(self, ffilter, queue_out):
FuzzQueue.__init__(self, queue_out)
Thread.__init__(self)
self.setName('filter_thread')
self.queue_out = queue_out
if PYPARSING:
element = oneOf("c l w h")
digits = "XB0123456789"
integer = Word( digits )#.setParseAction( self.__convertIntegers )
elementRef = Group(element + oneOf("= != < > >= <=") + integer)
operator = oneOf("and or")
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
self.finalformula = nestedformula + ZeroOrMore( operator + nestedformula)
elementRef.setParseAction(self.__compute_element)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
self.res = None
self.hideparams = ffilter
if "XXX" in self.hideparams['codes']:
self.hideparams['codes'].append("0")
self.baseline = None
def __init__(self):
if PYPARSING:
category = Word( alphas + "_-*", alphanums + "_-*" )
operator = oneOf("and or ,")
neg_operator = "not"
elementRef = category
definition = elementRef + ZeroOrMore( operator + elementRef)
nestedformula = Group(Suppress(Optional(Literal("("))) + definition + Suppress(Optional(Literal(")"))))
neg_nestedformula = Optional(neg_operator) + nestedformula
self.finalformula = neg_nestedformula + ZeroOrMore( operator + neg_nestedformula)
elementRef.setParseAction(self.__compute_element)
neg_nestedformula.setParseAction(self.__compute_neg_formula)
nestedformula.setParseAction(self.__compute_formula)
self.finalformula.setParseAction(self.__myreduce)
def parse_filter_str(self, filter_str):
"""
method to parse filter string
"""
prop = pp.WordStart(pp.alphas) + pp.Word(pp.alphanums +
"_").setResultsName("prop")
value = (pp.QuotedString("'") | pp.QuotedString('"') | pp.Word(
pp.printables, excludeChars=",")).setResultsName("value")
types_ = pp.oneOf("re eq ne gt ge lt le").setResultsName("types")
flags = pp.oneOf("C I").setResultsName("flags")
comma = pp.Literal(',')
quote = (pp.Literal("'") | pp.Literal('"')).setResultsName("quote")
type_exp = pp.Group(pp.Literal("type") + pp.Literal(
"=") + quote + types_ + quote).setResultsName("type_exp")
flag_exp = pp.Group(pp.Literal("flag") + pp.Literal(
"=") + quote + flags + quote).setResultsName("flag_exp")
semi_expression = pp.Forward()
semi_expression << pp.Group(pp.Literal("(") +
prop + comma + value +
pp.Optional(comma + type_exp) +
pp.Optional(comma + flag_exp) +
pp.Literal(")")
).setParseAction(
self.parse_filter_obj).setResultsName("semi_expression")
expr = pp.Forward()
expr << pp.operatorPrecedence(semi_expression, [
("not", 1, pp.opAssoc.RIGHT, self.not_operator),
("and", 2, pp.opAssoc.LEFT, self.and_operator),
("or", 2, pp.opAssoc.LEFT, self.or_operator)
])
result = expr.parseString(filter_str)
return result
def __update_grammar(self, grammar):
registers = list(self.registers)
grammar.kRegisterNames << pp.Or(map(pp.Literal, list(self.registers)))
return grammar
def parse(self, ping_message):
headline, packet_info_line, body_line_list = self._preprocess_parse(
line_list=ping_message)
packet_pattern = (
pp.Word(pp.nums) +
pp.Literal("packets transmitted,") +
pp.Word(pp.nums) +
pp.Literal("received,")
)
self._destination = self._parse_destination(headline)
parse_list = packet_pattern.parseString(_to_unicode(packet_info_line))
self._packet_transmit = int(parse_list[0])
self._packet_receive = int(parse_list[2])
self._duplicates = self.__parse_duplicate(packet_info_line)
try:
rtt_line = body_line_list[1]
except IndexError:
return
if typepy.is_null_string(rtt_line):
return
rtt_pattern = (
pp.Literal("rtt min/avg/max/mdev =") +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") +
pp.Word(pp.nums + "ms")
)
parse_list = rtt_pattern.parseString(_to_unicode(rtt_line))
self._rtt_min = float(parse_list[1])
self._rtt_avg = float(parse_list[3])
self._rtt_max = float(parse_list[5])
self._rtt_mdev = float(parse_list[7])
def __parse_duplicate(line):
packet_pattern = (
pp.SkipTo(pp.Word("+" + pp.nums) + pp.Literal("duplicates,")) +
pp.Word("+" + pp.nums) +
pp.Literal("duplicates,")
)
try:
duplicate_parse_list = packet_pattern.parseString(
_to_unicode(line))
except pp.ParseException:
return 0
return int(duplicate_parse_list[-2].strip("+"))
def parse(self, ping_message):
headline, packet_info_line, body_line_list = self._preprocess_parse(
line_list=ping_message)
packet_pattern = (
pp.Literal("Packets: Sent = ") +
pp.Word(pp.nums) +
pp.Literal(", Received = ") +
pp.Word(pp.nums)
)
self._destination = self._parse_destination(headline)
parse_list = packet_pattern.parseString(_to_unicode(packet_info_line))
self._packet_transmit = int(parse_list[1])
self._packet_receive = int(parse_list[3])
try:
rtt_line = body_line_list[2].strip()
except IndexError:
return
if typepy.is_null_string(rtt_line):
return
rtt_pattern = (
pp.Literal("Minimum = ") +
pp.Word(pp.nums) +
pp.Literal("ms, Maximum = ") +
pp.Word(pp.nums) +
pp.Literal("ms, Average = ") +
pp.Word(pp.nums)
)
parse_list = rtt_pattern.parseString(_to_unicode(rtt_line))
self._rtt_min = float(parse_list[1])
self._rtt_avg = float(parse_list[5])
self._rtt_max = float(parse_list[3])
def parse(self, ping_message):
headline, packet_info_line, body_line_list = self._preprocess_parse(
line_list=ping_message)
packet_pattern = (
pp.Word(pp.nums) +
pp.Literal("packets transmitted,") +
pp.Word(pp.nums) +
pp.Literal("packets received,")
)
self._destination = self._parse_destination(headline)
parse_list = packet_pattern.parseString(_to_unicode(packet_info_line))
self._packet_transmit = int(parse_list[0])
self._packet_receive = int(parse_list[2])
try:
rtt_line = body_line_list[1]
except IndexError:
return
if typepy.is_null_string(rtt_line):
return
rtt_pattern = (
pp.Literal("round-trip min/avg/max/stddev =") +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") +
pp.Word(pp.nums + "ms")
)
parse_list = rtt_pattern.parseString(_to_unicode(rtt_line))
self._rtt_min = float(parse_list[1])
self._rtt_avg = float(parse_list[3])
self._rtt_max = float(parse_list[5])
self._rtt_mdev = float(parse_list[7])
def parse(self, ping_message):
headline, packet_info_line, body_line_list = self._preprocess_parse(
line_list=ping_message)
packet_pattern = (
pp.Word(pp.nums) +
pp.Literal("packets transmitted,") +
pp.Word(pp.nums) +
pp.Literal("packets received,")
)
self._destination = self._parse_destination(headline)
parse_list = packet_pattern.parseString(_to_unicode(packet_info_line))
self._packet_transmit = int(parse_list[0])
self._packet_receive = int(parse_list[2])
self._duplicates = self.__parse_duplicate(packet_info_line)
try:
rtt_line = body_line_list[1]
except IndexError:
return
if typepy.is_null_string(rtt_line):
return
rtt_pattern = (
pp.Literal("round-trip min/avg/max =") +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") + "/" +
pp.Word(pp.nums + ".") +
pp.Word(pp.nums + "ms")
)
parse_list = rtt_pattern.parseString(_to_unicode(rtt_line))
self._rtt_min = float(parse_list[1])
self._rtt_avg = float(parse_list[3])
self._rtt_max = float(parse_list[5])
def wrapped_elem(wrapper, elem):
wrap = pp.Literal(wrapper).suppress()
return wrap + elem + wrap