java类org.antlr.v4.runtime.TokenSource的实例源码

EnhancedPainlessLexer.java 文件源码 项目:elasticsearch_my 阅读 22 收藏 0 点赞 0 评论 0
@Override
public Token nextToken() {
    if (stashedNext != null) {
        previous = stashedNext;
        stashedNext = null;
        return previous;
    }
    Token next = super.nextToken();
    if (insertSemicolon(previous, next)) {
        stashedNext = next;
        previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
                Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
        return previous;
    } else {
        previous = next;
        return next;
    }
}
StatementSplitter.java 文件源码 项目:rainbow 阅读 26 收藏 0 点赞 0 评论 0
public StatementSplitter(String sql, Set<String> delimiters)
{
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        }
        else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}
StatementSplitter.java 文件源码 项目:rainbow 阅读 23 收藏 0 点赞 0 评论 0
public static String squeezeStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseLexer.WS) {
            sb.append(' ');
        }
        else {
            sb.append(token.getText());
        }
    }
    return sb.toString().trim();
}
StatementSplitter.java 文件源码 项目:presto 阅读 33 收藏 0 点赞 0 评论 0
public StatementSplitter(String sql, Set<String> delimiters)
{
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        }
        else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}
StatementSplitter.java 文件源码 项目:presto 阅读 22 收藏 0 点赞 0 评论 0
public static String squeezeStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseLexer.WS) {
            sb.append(' ');
        }
        else {
            sb.append(token.getText());
        }
    }
    return sb.toString().trim();
}
FetchCompilerError.java 文件源码 项目:xtext-ide 阅读 20 收藏 0 点赞 0 评论 0
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
    final String input = tokens.getInputStream().toString() + "\n ";
    final String[] lines = input.split("\n");
    final String errorLine = lines[line - 1];
    System.err.println(errorLine.replaceAll("\t", "    "));

    int stop = Math.min(charPositionInLine, errorLine.length());
    for (int i = 0; i < stop; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("    ");
        else
            System.err.print(" ");

    int stop2 = Math.min(stop + length, errorLine.length());
    for (int i = stop; i < stop2; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("^^^^");
        else
            System.err.print("^");
    System.err.println();
}
PSITokenSource.java 文件源码 项目:jetbrains 阅读 27 收藏 0 点赞 0 评论 0
/** Create an ANTLR Token from the current token type of the builder
     *  then advance the builder to next token (which ultimately calls an
     *  ANTLR lexer).  The {@link ANTLRLexerAdaptor} creates tokens via
     *  an ANTLR lexer but converts to {@link TokenIElementType} and here
     *  we have to convert back to an ANTLR token using what info we
     *  can get from the builder. We lose info such as the original channel.
     *  So, whitespace and comments (typically hidden channel) will look like
     *  real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()}
     *  and {@link ParserDefinition#getCommentTokens()} to strip these before
     *  our ANTLR parser sees them.
     */
    @Override
    public Token nextToken() {
        ProgressIndicatorProvider.checkCanceled();

        TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
        int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF;

        int channel = Token.DEFAULT_CHANNEL;
        Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
        String text = builder.getTokenText();
        int start = builder.getCurrentOffset();
        int length = text != null ? text.length() : 0;
        int stop = start + length - 1;
        // PsiBuilder doesn't provide line, column info
        int line = 0;
        int charPositionInLine = 0;
        Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine);
        builder.advanceLexer();
//      System.out.println("TOKEN: "+t);
        return t;
    }
BoaErrorListener.java 文件源码 项目:compiler 阅读 19 收藏 0 点赞 0 评论 0
public void error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) {
    hasError = true;

    final String filename = tokens.getSourceName();

    System.err.print(filename.substring(filename.lastIndexOf(File.separator) + 1) + ": compilation failed: ");
    System.err.print("Encountered " + kind + " error ");
    if (offendingSymbol != null)
        System.err.print("\"" + offendingSymbol + "\" ");
    System.err.println("at line " + line + ", column " + charPositionInLine + ". " + msg);

    underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length);

    if (e != null)
        for (final StackTraceElement st : e.getStackTrace())
            System.err.println("\tat " + st);
    else
        System.err.println("\tat unknown stack");
}
BoaErrorListener.java 文件源码 项目:compiler 阅读 25 收藏 0 点赞 0 评论 0
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
    final String input = tokens.getInputStream().toString() + "\n ";
    final String[] lines = input.split("\n");
    final String errorLine = lines[line - 1];
    System.err.println(errorLine.replaceAll("\t", "    "));

    int stop = Math.min(charPositionInLine, errorLine.length());
    for (int i = 0; i < stop; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("    ");
        else
            System.err.print(" ");

    int stop2 = Math.min(stop + length, errorLine.length());
    for (int i = stop; i < stop2; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("^^^^");
        else
            System.err.print("^");

    System.err.println();
}
CharsAsTokens.java 文件源码 项目:mini-markdown 阅读 26 收藏 0 点赞 0 评论 0
public Token nextToken() {
        Token t = null;
        consumeUnknown();
        int c = input.LA(1);
        int i = input.index();
        if ( c == CharStream.EOF ) {
            t = getTokenFactory().create(Token.EOF, "<EOF>");
        }
        else {
            Integer ttypeI = charToTokenType.get(c);
            t = getTokenFactory().create(
                    new Pair<TokenSource,CharStream>(this,input),
                    ttypeI, String.valueOf((char)c), Token.DEFAULT_CHANNEL, i,  i,
                    line, charPosInLine);
        }
//      System.out.println(t.getText());
        consume();
        return t;
    }
TaggerTokenSource.java 文件源码 项目:goworks 阅读 20 收藏 0 点赞 0 评论 0
@Override
public Token nextToken() {
    if (previousTag != null && previousTag.getToken().getType() == Token.EOF) {
        return previousTag.getToken();
    }

    if (tagIterator.hasNext()) {
        previousTag = tagIterator.next().getTag();
    } else {
        TokenSource source = this;
        String text = null;
        int channel = Token.DEFAULT_CHANNEL;
        int start = snapshot.length();
        int stop = start - 1;
        int lineCount = snapshot.getLineCount();
        int lineLength = snapshot.findLineFromLineNumber(lineCount - 1).getLength();
        previousTag = new TokenTag<>(tokenFactory.create(getTokenFactorySourcePair(), Token.EOF, text, channel, start, stop, lineCount, lineLength));
    }

    line = -1;
    charPositionInLine = -1;
    return previousTag.getToken();
}
ParseTrees.java 文件源码 项目:goworks 阅读 25 收藏 0 点赞 0 评论 0
public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
    Parameters.notNull("context", context);
    int startIndex = context.start.getStartIndex();
    Token stopSymbol = getStopSymbol(context);
    if (stopSymbol == null) {
        return new Interval(startIndex, startIndex - 1);
    }

    int stopIndex;
    if (stopSymbol.getType() != Token.EOF) {
        stopIndex = stopSymbol.getStopIndex();
    } else {
        TokenSource tokenSource = context.getStart().getTokenSource();
        CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
        if (inputStream != null) {
            stopIndex = inputStream.size() - 1;
        } else {
            stopIndex = context.start.getStartIndex() - 1;
        }
    }

    stopIndex = Math.max(stopIndex, startIndex - 1);
    return new Interval(startIndex, stopIndex);
}
ParsingUtils.java 文件源码 项目:intellij-plugin-v4 阅读 33 收藏 0 点赞 0 评论 0
public static Token nextRealToken(CommonTokenStream tokens, int i) {
    int n = tokens.size();
    i++; // search after current i token
    if ( i>=n || i<0 ) return null;
    Token t = tokens.get(i);
    while ( t.getChannel()==Token.HIDDEN_CHANNEL ) {
        if ( t.getType()==Token.EOF ) {
            TokenSource tokenSource = tokens.getTokenSource();
            if ( tokenSource==null ) {
                return new CommonToken(Token.EOF, "EOF");
            }
            TokenFactory<?> tokenFactory = tokenSource.getTokenFactory();
            if ( tokenFactory==null ) {
                return new CommonToken(Token.EOF, "EOF");
            }
            return tokenFactory.create(Token.EOF, "EOF");
        }
        i++;
        if ( i>=n ) return null; // just in case no EOF
        t = tokens.get(i);
    }
    return t;
}
PsiTokenSource.java 文件源码 项目:intellij-plugin-v4 阅读 20 收藏 0 点赞 0 评论 0
@Override
    public Token nextToken() {
        TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
        int type;
        if ( ideaTType==null ) {
            type = Token.EOF;
        }
        else {
            type = ideaTType.getType();
        }

        int channel = Token.DEFAULT_CHANNEL;
        Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
        String text = builder.getTokenText();
        int start = builder.getCurrentOffset();
        int length = text != null ? text.length() : 0;
        int stop = start + length - 1;
        // PsiBuilder doesn't provide line, column info
        int line = 0;
        int charPositionInLine = 0;
        Token t = factory.create(source, type, text, channel, start, stop, line, charPositionInLine);
        builder.advanceLexer();
//      System.out.println("TOKEN: "+t);
        return t;
    }
TaggerTokenSource.java 文件源码 项目:antlrworks2 阅读 19 收藏 0 点赞 0 评论 0
@Override
public Token nextToken() {
    if (previousTag != null && previousTag.getToken().getType() == Token.EOF) {
        return previousTag.getToken();
    }

    if (tagIterator.hasNext()) {
        previousTag = tagIterator.next().getTag();
    } else {
        TokenSource source = this;
        String text = null;
        int channel = Token.DEFAULT_CHANNEL;
        int start = snapshot.length();
        int stop = start - 1;
        int lineCount = snapshot.getLineCount();
        int lineLength = snapshot.findLineFromLineNumber(lineCount - 1).getLength();
        previousTag = new TokenTag<>(tokenFactory.create(getTokenFactorySourcePair(), Token.EOF, text, channel, start, stop, lineCount, lineLength));
    }

    line = -1;
    charPositionInLine = -1;
    return previousTag.getToken();
}
ParseTrees.java 文件源码 项目:antlrworks2 阅读 35 收藏 0 点赞 0 评论 0
public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
    Parameters.notNull("context", context);
    int startIndex = context.start.getStartIndex();
    Token stopSymbol = getStopSymbol(context);
    if (stopSymbol == null) {
        return new Interval(startIndex, startIndex - 1);
    }

    int stopIndex;
    if (stopSymbol.getType() != Token.EOF) {
        stopIndex = stopSymbol.getStopIndex();
    } else {
        TokenSource tokenSource = context.getStart().getTokenSource();
        CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
        if (inputStream != null) {
            stopIndex = inputStream.size() - 1;
        } else {
            stopIndex = context.start.getStartIndex() - 1;
        }
    }

    stopIndex = Math.max(stopIndex, startIndex - 1);
    return new Interval(startIndex, stopIndex);
}
StatementSplitter.java 文件源码 项目:rainbow 阅读 22 收藏 0 点赞 0 评论 0
public static boolean isEmptyStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            return true;
        }
        if (token.getChannel() != Token.HIDDEN_CHANNEL) {
            return false;
        }
    }
}
AntlrDocument.java 文件源码 项目:protobuf-netbeans-plugin 阅读 46 收藏 0 点赞 0 评论 0
/**
 * @requires text != null && tokenSource != null
 * @effects Makes this be a new Document d with d.text = text and d.tokens
 *          set to the tokens produced by tokenSource
 */
protected AntlrDocument(String text, TokenSource tokenSource) {
    Assert.notNull(text);
    Assert.notNull(tokenSource);

    this.text = text;
    this.tokens = new LinkedList<>();

    initTokens(tokenSource);
}
AntlrDocument.java 文件源码 项目:protobuf-netbeans-plugin 阅读 18 收藏 0 点赞 0 评论 0
/**
 * @requires the tokens of this are not initialized yet && source != null
 * @modifies this
 * @effects Initializes the tokens of this with the given token source.
 */
private void initTokens(TokenSource source) {
    Assert.isTrue(tokens.isEmpty());

    Token token;

    do {
        token = source.nextToken();
        tokens.add(token);
    } while (token.getType() != Token.EOF);
}
AbstractGeneratorTest.java 文件源码 项目:org.ops4j.ramler 阅读 26 收藏 0 点赞 0 评论 0
private TypescriptParser buildParser(File source) throws IOException {
    CharStream inputCharStream = CharStreams.fromPath(source.toPath());
    TokenSource tokenSource = new TypescriptLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    TypescriptParser parser = new TypescriptParser(inputTokenStream);

    // make parser throw exception on first error
    parser.setErrorHandler(new BailErrorStrategy());

    // print detailed error messages to System.err
    parser.addErrorListener(new ConsoleErrorListener());

    return parser;
}
TestStringRecognition.java 文件源码 项目:learnantlr 阅读 20 收藏 0 点赞 0 评论 0
private ProgramContext parseProgram(String program, TestErrorListener errorListener) throws IOException
{
    CharStream inputCharStream = new ANTLRInputStream(new StringReader(program));
    TokenSource tokenSource = new ShapePlacerLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    ShapePlacerParser parser = new ShapePlacerParser(inputTokenStream);
    parser.addErrorListener(errorListener);

    ProgramContext context = parser.program();
    return context;
}
TestArithmeticParser.java 文件源码 项目:arithmetic 阅读 19 收藏 0 点赞 0 评论 0
public static ProgramContext parseProgram(String program, ANTLRErrorListener errorListener) throws IOException
{
    CharStream inputCharStream = new ANTLRInputStream(new StringReader(program));
    TokenSource tokenSource = new ArithmeticLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    ArithmeticParser parser = new ArithmeticParser(inputTokenStream);
    parser.addErrorListener(errorListener);
    ProgramContext context = parser.program();
    return context;
}
StatementSplitter.java 文件源码 项目:presto 阅读 26 收藏 0 点赞 0 评论 0
public static boolean isEmptyStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            return true;
        }
        if (token.getChannel() != Token.HIDDEN_CHANNEL) {
            return false;
        }
    }
}
FetchCompilerError.java 文件源码 项目:xtext-ide 阅读 17 收藏 0 点赞 0 评论 0
public String[] error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) {
    try {
        //underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length);
        error[0] = Integer.toString(line); 
        error[1] = Integer.toString(charPositionInLine);
        error[2] = msg;
    } catch(Exception exception) {
    }

    return error;       
}
TSParser.java 文件源码 项目:tosca 阅读 21 收藏 0 点赞 0 评论 0
/** 
 * Initialize token source 
 * @throws IOException 
 */
protected TokenSource newTokenSource(Reader reader, int line, int column) throws IOException
{
    Lexer lexer = newLexer(new ANTLRInputStream(reader));
    lexer.setLine(line);
    lexer.setCharPositionInLine(column);
    return lexer;
}
TSParser.java 文件源码 项目:tosca 阅读 28 收藏 0 点赞 0 评论 0
/**
 * Setup input
 * @throws IOException 
 */
protected void setupInput(Reader reader, int line, int column) throws IOException
{
    TokenSource source = newTokenSource(reader, line, column);
    TokenStream input = new CommonTokenStream(source);

    initATN();
    setInputStream(input);

    setBuildParseTree(false);
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码 项目:goworks 阅读 29 收藏 0 点赞 0 评论 0
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
DocumentSnapshotToken.java 文件源码 项目:goworks 阅读 22 收藏 0 点赞 0 评论 0
public DocumentSnapshotToken(Tuple2<? extends TokenSource, CharStream> source, int type, int channel, int start, int stop) {
    super(source, type, channel, start, stop);
    CharStream inputStream = source.getItem2();
    if (!(inputStream instanceof DocumentSnapshotCharStream)) {
        throw new IllegalArgumentException(String.format("Expected a %s backed by a %s.", TokenSource.class.getSimpleName(), DocumentSnapshotCharStream.class.getSimpleName()));
    }

    DocumentSnapshotCharStream charStream = (DocumentSnapshotCharStream)inputStream;
    snapshot = charStream.getSnapshot();
}
DocumentSnapshotTokenFactory.java 文件源码 项目:goworks 阅读 62 收藏 0 点赞 0 评论 0
@Override
public DocumentSnapshotToken create(Tuple2<? extends TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) {
    if (effectiveSource != null) {
        source = effectiveSource;
    }

    DocumentSnapshotToken t = new DocumentSnapshotToken(source, type, channel, start, stop);
    t.setLine(line);
    t.setCharPositionInLine(charPositionInLine);
    if ( text!=null ) {
        t.setText(text);
    }
    return t;
}
TaggerTokenSource.java 文件源码 项目:goworks 阅读 20 收藏 0 点赞 0 评论 0
@NonNull
protected Tuple2<? extends TokenSource, CharStream> getTokenFactorySourcePair() {
    if (tokenFactorySourcePair == null) {
        tokenFactorySourcePair = Tuple.create(this, getInputStream());
    }

    return tokenFactorySourcePair;
}


问题


面经


文章

微信
公众号

扫码关注公众号