java类org.antlr.v4.runtime.WritableToken的实例源码

BaseTest.java 文件源码 项目:ANTLR-Swift-Target 阅读 22 收藏 0 点赞 0 评论 0
@Override
protected boolean sync(int i) {
    if (!super.sync(i)) {
        return false;
    }

    Token t = get(i);
    if ( hide.contains(t.getType()) ) {
        ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL);
    }

    return true;
}
BaseTest.java 文件源码 项目:ANTLR-Swift-Target 阅读 27 收藏 0 点赞 0 评论 0
@Override
protected boolean sync(int i) {
    if (!super.sync(i)) {
        return false;
    }

    Token t = get(i);
    if ( hide.contains(t.getType()) ) {
        ((WritableToken)t).setChannel(Token.HIDDEN_CHANNEL);
    }

    return true;
}
Formatter.java 文件源码 项目:codebuff 阅读 28 收藏 0 点赞 0 评论 0
/** Format the document. Does not affect/alter doc. */
public String format(InputDocument doc, boolean collectAnalysis) throws Exception {
    if ( testDoc!=null ) throw new IllegalArgumentException("can't call format > once");
    // for debugging we need a map from original token with actual line:col to tree node. used by token analysis
    originalDoc = doc;
    originalTokenToNodeMap = indexTree(doc.tree);
    originalTokens = doc.tokens;

    this.testDoc = InputDocument.dup(doc); // make copy of doc, getting new tokens, tree
    output = new StringBuilder();
    this.realTokens = getRealTokens(testDoc.tokens);
    // squeeze out ws and kill any line/col info so we can't use ground truth by mistake
    wipeCharPositionInfoAndWhitespaceTokens(testDoc.tokens); // all except for first token
    wsClassifier = new kNNClassifier(corpus, wsFeatures, corpus.injectWhitespace);
    hposClassifier = new kNNClassifier(corpus, hposFeatures, corpus.hpos);

    analysis = new Vector<>(testDoc.tokens.size());
    analysis.setSize(testDoc.tokens.size());

    // make an index on the duplicated doc tree with tokens missing line:col info
    if ( tokenToNodeMap == null ) {
        tokenToNodeMap = indexTree(testDoc.tree);
    }

    WritableToken firstToken = (WritableToken)testDoc.tokens.getNextRealToken(-1);

    String prefix = originalTokens.getText(Interval.of(0, firstToken.getTokenIndex())); // gets any comments in front + first real token
    charPosInLine = firstToken.getCharPositionInLine()+firstToken.getText().length()+1; // start where first token left off
    line = Tool.count(prefix, '\n') + 1;
    output.append(prefix);

    // first identify oversize lists with separators
    IdentifyOversizeLists splitter = new IdentifyOversizeLists(corpus, testDoc.tokens, tokenToNodeMap);
    ParseTreeWalker.DEFAULT.walk(splitter, testDoc.tree);
    tokenToListInfo = splitter.tokenToListInfo;

    realTokens = getRealTokens(testDoc.tokens);
    for (int i = Trainer.ANALYSIS_START_TOKEN_INDEX; i<realTokens.size(); i++) { // can't process first token
        int tokenIndexInStream = realTokens.get(i).getTokenIndex();
        processToken(i, tokenIndexInStream, collectAnalysis);
    }

    releaseMemory();

    return output.toString();
}
PreprocessTokenSource.java 文件源码 项目:rpgleparser 阅读 19 收藏 0 点赞 0 评论 0
private void hide(final Token t){
       if ( t instanceof WritableToken ) {
           ((WritableToken)t).setChannel(RpgLexer.HIDDEN);
       }        

}
PreprocessTokenSource.java 文件源码 项目:rpgleparser 阅读 28 收藏 0 点赞 0 评论 0
private void hideAndAdd(final Token t){
       if ( t instanceof WritableToken ) {
           ((WritableToken)t).setChannel(RpgLexer.HIDDEN);
       }        
       addToken(t);
}


问题


面经


文章

微信
公众号

扫码关注公众号