python类lex()的实例源码

cpp.py 文件源码 项目:Frida 作者: Alejandro-Valdes 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
cpp.py 文件源码 项目:quickstart-git2s3 作者: aws-quickstart 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
common.py 文件源码 项目:dice-notation-python 作者: Bernardo-MG 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self, **kw):
        super(PlyParser, self).__init__()
        self.debug = kw.get('debug', 0)
        self.names = {}
        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[
                          1] + "_" + self.__class__.__name__
        except:
            modname = "parser" + "_" + self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"
        # print self.debugfile, self.tabmodule

        # Builds the lexer and parser
        lex.lex(module=self, debug=self.debug)
        yacc.yacc(module=self,
                  debug=self.debug,
                  debugfile=self.debugfile,
                  tabmodule=self.tabmodule)
cpp.py 文件源码 项目:Docker-XX-Net 作者: kuanghy 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
smi.py 文件源码 项目:PyQYT 作者: collinsctk 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def reset(self):
        if debug.logger & debug.flagLexer:
            logger=debug.logger.getCurrentLogger()
        else:
            logger=lex.NullLogger()

        if debug.logger & debug.flagGrammar:
            debuglogger = debug.logger.getCurrentLogger()
        else:
            debuglogger = None

        self.lexer = lex.lex(module=self,
                             reflags=re.DOTALL,
                             outputdir=self._tempdir,
                             debuglog=debuglogger,
                             errorlog=logger)
cpp.py 文件源码 项目:PyQYT 作者: collinsctk 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
cpp.py 文件源码 项目:PyQYT 作者: collinsctk 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
integer.py 文件源码 项目:Algebra-Computacional-UCM 作者: hhassan1 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def __init__(self):
        super(GenericIntegerLexer, self).__init__()
        self.tokens = (
            'NUMBER',
            'PLUS',
            'MINUS',
            'TIMES',
            'DIVIDE',
            'POWER',
            'LPAREN',
            'RPAREN',
            'SCALAR',
            )
        self.t_NUMBER = r'\d+'
        self.t_PLUS   = r'\+'
        self.t_MINUS  = r'-'
        self.t_TIMES  = r'\*'
        self.t_DIVIDE  = r'/'
        self.t_POWER  = r'\^'
        self.t_LPAREN = r'\('
        self.t_RPAREN = r'\)'
        self.t_ignore = " \t"
        self.lexer = lex.lex(object=self)
cpp.py 文件源码 项目:Algebra-Computacional-UCM 作者: hhassan1 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
cpp.py 文件源码 项目:SublimeKSP 作者: nojanath 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
ksp_parser.py 文件源码 项目:SublimeKSP 作者: nojanath 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def init(outputdir=None):
    outputdir = outputdir or os.path.dirname(__file__)  # os.getcwd()
    current_module = sys.modules[__name__]
    #print (outputdir, current_module)
    debug = 0
    optimize = 0
    lexer = lex.lex(optimize=0, debug=debug)

    # lexer.input('on init\n   declare shared parameter cutoff')
    # while True:
    #     tok = lexer.token()
    #     if tok is None:
    #         break
    #     print (tok)

    return yacc.yacc(method="LALR", optimize=optimize, debug=debug,
                     write_tables=0, module=current_module, start='script',
                     outputdir=outputdir, tabmodule='ksp_parser_tab')
cpp.py 文件源码 项目:ZCC 作者: hlFu 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __init__(self,lexer=None):
        if lexer is None:
            lexer = lex.lexer
        self.lexer = lexer
        self.macros = { }
        self.path = []
        self.temp_path = []

        # Probe the lexer for selected tokens
        self.lexprobe()

        tm = time.localtime()
        self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
        self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
        self.parser = None

    # -----------------------------------------------------------------------------
    # tokenize()
    #
    # Utility function. Given a string of text, tokenize into a list of tokens
    # -----------------------------------------------------------------------------
assembler.py 文件源码 项目:sixteen 作者: jbchouinard 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def main(fp_in, fp_out):
    lex.lex()
    lex.input(fp_in.read())
    tokens = iter(lex.token, None)
    instructions = list(assemble(parse(tokens)))
    allocate_names()
    inst_stream = emit_inst_bytes(substitute_names(instructions))
    data_stream = emit_data_bytes()
    byte_stream = itertools.chain(inst_stream, data_stream)
    write(byte_stream, fp_out)
tdx_lex.py 文件源码 项目:tdx_formula 作者: woodylee1974 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def build(self, **kwargs):
        self.lexer = lex.lex(object = self, debug = False)
function_to_lexed.py 文件源码 项目:FPTuner 作者: soarlab 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def lex_function(text):
  return _function_lexer.lex(text)
franca_lexer.py 文件源码 项目:franca-tools 作者: ingmarlehmann 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def build(self, **kwargs):
            """ Builds the lexer from the specification. Must be
                called after the lexer object is created.
                This method exists separately, because the PLY
                manual warns against calling lex.lex inside
                __init__
            """
            self.lexer = lex.lex(object=self, **kwargs)
cpp.py 文件源码 项目:noc-orchestrator 作者: DirceuSilvaLabs 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ----------------------------------------------------------------------
simplequery.py 文件源码 项目:PySQLKits 作者: healerkx 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __compile(self, code):
        lex.lex()
        parser = yacc.yacc(start = 'statements')

        statements = parser.parse(code)
        #self.__dump(statements)
        return statements
lexer.py 文件源码 项目:pypuppetdbquery 作者: bootc 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        super(Lexer, self).__init__()
        self.lexer = lex.lex(object=self, **kwargs)
cpp.py 文件源码 项目:SwiftKitten 作者: johncsnyder 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def group_lines(self,input):
        lex = self.lexer.clone()
        lines = [x.rstrip() for x in input.splitlines()]
        for i in xrange(len(lines)):
            j = i+1
            while lines[i].endswith('\\') and (j < len(lines)):
                lines[i] = lines[i][:-1]+lines[j]
                lines[j] = ""
                j += 1

        input = "\n".join(lines)
        lex.input(input)
        lex.lineno = 1

        current_line = []
        while True:
            tok = lex.token()
            if not tok:
                break
            current_line.append(tok)
            if tok.type in self.t_WS and '\n' in tok.value:
                yield current_line
                current_line = []

        if current_line:
            yield current_line

    # ----------------------------------------------------------------------
    # tokenstrip()
    # 
    # Remove leading/trailing whitespace tokens from a token list
    # ----------------------------------------------------------------------


问题


面经


文章

微信
公众号

扫码关注公众号