/**
* @requires lexer != null
* @modifies lexer
* @effects Applies this to lexer
*/
public void apply(Lexer lexer) {
Assert.notNull(lexer);
lexer._mode = mode;
lexer._modeStack.clear();
if (modeStack != null) {
lexer._modeStack.addAll(modeStack);
}
}
java类org.antlr.v4.runtime.Lexer的实例源码
AntlrLexerState.java 文件源码
项目:protobuf-netbeans-plugin
阅读 22
收藏 0
点赞 0
评论 0
Dbg.java 文件源码
项目:codebuff
阅读 15
收藏 0
点赞 0
评论 0
/** Compute a document difference metric 0-1.0 between two documents that
* are identical other than (likely) the whitespace and comments.
*
* 1.0 means the docs are maximally different and 0 means docs are identical.
*
* The Levenshtein distance between the docs counts only
* whitespace diffs as the non-WS content is identical.
* Levenshtein distance is bounded by 0..max(len(doc1),len(doc2)) so
* we normalize the distance by dividing by max WS count.
*
* TODO: can we simplify this to a simple walk with two
* cursors through the original vs formatted counting
* mismatched whitespace? real text are like anchors.
*/
public static double docDiff(String original,
String formatted,
Class<? extends Lexer> lexerClass)
throws Exception
{
// Grammar must strip all but real tokens and whitespace (and put that on hidden channel)
CodeBuffTokenStream original_tokens = Tool.tokenize(original, lexerClass);
// String s = original_tokens.getText();
CodeBuffTokenStream formatted_tokens = Tool.tokenize(formatted, lexerClass);
// String t = formatted_tokens.getText();
// walk token streams and examine whitespace in between tokens
int i = -1;
int ws_distance = 0;
int original_ws = 0;
int formatted_ws = 0;
while ( true ) {
Token ot = original_tokens.LT(i); // TODO: FIX THIS! can't use LT()
if ( ot==null || ot.getType()==Token.EOF ) break;
List<Token> ows = original_tokens.getHiddenTokensToLeft(ot.getTokenIndex());
original_ws += tokenText(ows).length();
Token ft = formatted_tokens.LT(i); // TODO: FIX THIS! can't use LT()
if ( ft==null || ft.getType()==Token.EOF ) break;
List<Token> fws = formatted_tokens.getHiddenTokensToLeft(ft.getTokenIndex());
formatted_ws += tokenText(fws).length();
ws_distance += whitespaceEditDistance(tokenText(ows), tokenText(fws));
i++;
}
// it's probably ok to ignore ws diffs after last real token
int max_ws = Math.max(original_ws, formatted_ws);
double normalized_ws_distance = ((float) ws_distance)/max_ws;
return normalized_ws_distance;
}
Tool.java 文件源码
项目:codebuff
阅读 23
收藏 0
点赞 0
评论 0
public static CodeBuffTokenStream tokenize(String doc, Class<? extends Lexer> lexerClass)
throws Exception {
ANTLRInputStream input = new ANTLRInputStream(doc);
Lexer lexer = getLexer(lexerClass, input);
CodeBuffTokenStream tokens = new CodeBuffTokenStream(lexer);
tokens.fill();
return tokens;
}
Trainer.java 文件源码
项目:codebuff
阅读 26
收藏 0
点赞 0
评论 0
public static List<Token> getRealTokens(CommonTokenStream tokens) {
List<Token> real = new ArrayList<>();
for (int i=0; i<tokens.size(); i++) {
Token t = tokens.get(i);
if ( t.getType()!=Token.EOF &&
t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL )
{
real.add(t);
}
}
return real;
}
CodeBuffTokenStream.java 文件源码
项目:codebuff
阅读 15
收藏 0
点赞 0
评论 0
public List<Token> getRealTokens(int from, int to) {
List<Token> real = new ArrayList<Token>();
for (int i=from; i<=to; i++) {
Token t = tokens.get(i);
if ( t.getChannel()==Lexer.DEFAULT_TOKEN_CHANNEL ) real.add(t);
}
if ( real.size()==0 ) return null;
return real;
}
GUIController.java 文件源码
项目:codebuff
阅读 27
收藏 0
点赞 0
评论 0
public GUIController(List<TokenPositionAnalysis> analysisPerToken,
InputDocument testDoc,
String formattedText,
Class<? extends Lexer> lexerClass)
{
this.analysisPerToken = analysisPerToken;
this.formattedText = formattedText;
this.lexerClass = lexerClass;
this.testDoc = testDoc;
this.scope = new BuffScope();
}
Grammar.java 文件源码
项目:codebuff
阅读 32
收藏 0
点赞 0
评论 0
/** Given a token type, get a meaningful name for it such as the ID
* or string literal. If this is a lexer and the ttype is in the
* char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.
*/
public String getTokenDisplayName(int ttype) {
// inside any target's char range and is lexer grammar?
if ( isLexer() &&
ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE )
{
return CharSupport.getANTLRCharLiteralForChar(ttype);
}
if ( ttype==Token.EOF ) {
return "EOF";
}
if ( ttype==Token.INVALID_TYPE ) {
return INVALID_TOKEN_NAME;
}
if (ttype >= 0 && ttype < typeToStringLiteralList.size() && typeToStringLiteralList.get(ttype) != null) {
return typeToStringLiteralList.get(ttype);
}
if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) {
return typeToTokenList.get(ttype);
}
return String.valueOf(ttype);
}
Grammar.java 文件源码
项目:codebuff
阅读 29
收藏 0
点赞 0
评论 0
/** What is the max char value possible for this grammar's target? Use
* unicode max if no target defined.
*/
public int getMaxCharValue() {
return org.antlr.v4.runtime.Lexer.MAX_CHAR_VALUE;
// if ( generator!=null ) {
// return generator.target.getMaxCharValue(generator);
// }
// else {
// return Label.MAX_CHAR_VALUE;
// }
}
MainTest.java 文件源码
项目:gramtest
阅读 13
收藏 0
点赞 0
评论 0
/**
* Test with arithmetic expressions grammar
* @throws java.io.IOException
*/
@Test
public void testArithExpGram() throws IOException {
Lexer lexer = new bnfLexer(new ANTLRInputStream(getClass().getResourceAsStream("/arithexp.bnf")));
CommonTokenStream tokens = new CommonTokenStream(lexer);
bnfParser grammarparser = new bnfParser(tokens);
ParserRuleContext tree = grammarparser.rulelist();
GeneratorVisitor extractor = new GeneratorVisitor();
extractor.visit(tree);
List<String> generatedTests = extractor.getTests();
Assert.assertEquals(100, generatedTests.size());
}
MainTest.java 文件源码
项目:gramtest
阅读 17
收藏 0
点赞 0
评论 0
/**
* Test with course codes grammar
* @throws java.io.IOException
*/
@Test
public void testCourseCodeGram() throws IOException {
Lexer lexer = new bnfLexer(new ANTLRInputStream(getClass().getResourceAsStream("/coursecodes.bnf")));
CommonTokenStream tokens = new CommonTokenStream(lexer);
bnfParser grammarparser = new bnfParser(tokens);
ParserRuleContext tree = grammarparser.rulelist();
GeneratorVisitor extractor = new GeneratorVisitor();
extractor.visit(tree);
List<String> generatedTests = extractor.getTests();
Assert.assertEquals(10, generatedTests.size());
}