public static void addDebugListeners(FusionTablesSqlParser parser) {
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(new DiagnosticErrorListener());
parser.addErrorListener(new VerboseErrorListener());
}
java类org.antlr.v4.runtime.atn.PredictionMode的实例源码
Util.java 文件源码
项目:ftc
阅读 21
收藏 0
点赞 0
评论 0
TestRig.java 文件源码
项目:codebuff
阅读 19
收藏 0
点赞 0
评论 0
protected void process(Lexer lexer, Class<? extends Parser> parserClass, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, InvocationTargetException, PrintException {
try {
ANTLRInputStream input = new ANTLRInputStream(r);
lexer.setInputStream(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
tokens.fill();
if ( showTokens ) {
for (Object tok : tokens.getTokens()) {
System.out.println(tok);
}
}
if ( startRuleName.equals(LEXER_START_RULE_NAME) ) return;
if ( diagnostics ) {
parser.addErrorListener(new DiagnosticErrorListener());
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
}
if ( printTree || gui || psFile!=null ) {
parser.setBuildParseTree(true);
}
if ( SLL ) { // overrides diagnostics
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
}
parser.setTokenStream(tokens);
parser.setTrace(trace);
try {
Method startRule = parserClass.getMethod(startRuleName);
ParserRuleContext tree = (ParserRuleContext)startRule.invoke(parser, (Object[])null);
if ( printTree ) {
System.out.println(tree.toStringTree(parser));
}
if ( gui ) {
Trees.inspect(tree, parser);
}
if ( psFile!=null ) {
Trees.save(tree, parser, psFile); // Generate postscript
}
}
catch (NoSuchMethodException nsme) {
System.err.println("No method for rule "+startRuleName+" or it has arguments");
}
}
finally {
if ( r!=null ) r.close();
if ( is!=null ) is.close();
}
}
ProgramParser.java 文件源码
项目:Alpha
阅读 20
收藏 0
点赞 0
评论 0
public Program parse(CharStream stream) throws IOException {
/*
// In order to require less memory: use unbuffered streams and avoid constructing a full parse tree.
ASPCore2Lexer lexer = new ASPCore2Lexer(new UnbufferedCharStream(is));
lexer.setTokenFactory(new CommonTokenFactory(true));
final ASPCore2Parser parser = new ASPCore2Parser(new UnbufferedTokenStream<>(lexer));
parser.setBuildParseTree(false);
*/
CommonTokenStream tokens = new CommonTokenStream(
new ASPCore2Lexer(stream)
);
final ASPCore2Parser parser = new ASPCore2Parser(tokens);
// Try SLL parsing mode (faster but may terminate incorrectly).
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setErrorHandler(new BailErrorStrategy());
final CustomErrorListener errorListener = new CustomErrorListener(stream.getSourceName());
ASPCore2Parser.ProgramContext programContext;
try {
// Parse program
programContext = parser.program();
} catch (ParseCancellationException e) {
// Recognition exception may be caused simply by SLL parsing failing,
// retry with LL parser and DefaultErrorStrategy printing errors to console.
if (e.getCause() instanceof RecognitionException) {
tokens.seek(0);
parser.addErrorListener(errorListener);
parser.setErrorHandler(new DefaultErrorStrategy());
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
// Re-run parse.
programContext = parser.program();
} else {
throw e;
}
}
// If the our SwallowingErrorListener has handled some exception during parsing
// just re-throw that exception.
// At this time, error messages will be already printed out to standard error
// because ANTLR by default adds an org.antlr.v4.runtime.ConsoleErrorListener
// to every parser.
// That ConsoleErrorListener will print useful messages, but not report back to
// our code.
// org.antlr.v4.runtime.BailErrorStrategy cannot be used here, because it would
// abruptly stop parsing as soon as the first error is reached (i.e. no recovery
// is attempted) and the user will only see the first error encountered.
if (errorListener.getRecognitionException() != null) {
throw errorListener.getRecognitionException();
}
// Abort parsing if there were some (recoverable) syntax errors.
if (parser.getNumberOfSyntaxErrors() != 0) {
throw new ParseCancellationException();
}
// Construct internal program representation.
ParseTreeVisitor visitor = new ParseTreeVisitor(externals);
return visitor.translate(programContext);
}
TestFiles.java 文件源码
项目:rpgleparser
阅读 27
收藏 0
点赞 0
评论 0
@Test
public void test() throws IOException, URISyntaxException{
final String inputString = TestUtils.loadFile(sourceFile);
final File expectedFile = new File(sourceFile.getPath().replaceAll("\\.rpgle", ".expected.txt"));
final String expectedFileText = expectedFile.exists()?TestUtils.loadFile(expectedFile):null;
final String expectedTokens = getTokens(expectedFileText);
String expectedTree = getTree(expectedFileText);
final List<String> errors = new ArrayList<String>();
final ANTLRInputStream input = new ANTLRInputStream(new FixedWidthBufferedReader(inputString));
final RpgLexer rpglexer = new RpgLexer(input);
final TokenSource lexer = new PreprocessTokenSource(rpglexer);
final CommonTokenStream tokens = new CommonTokenStream(lexer);
final RpgParser parser = new RpgParser(tokens);
final ErrorListener errorListener = new ErrorListener(errors, rpglexer, parser);
rpglexer.addErrorListener(errorListener);
parser.addErrorListener(errorListener);
final String actualTokens = TestUtils.printTokens(lexer,rpglexer.getVocabulary());
boolean rewriteExpectFile=false;
if(expectedTokens != null && expectedTokens.trim().length()>0 ){
if(autoReplaceFailed && !expectedTokens.equals(actualTokens)){
rewriteExpectFile=true;
}else{
assertEquals("Token lists do not match",expectedTokens,actualTokens);
}
}
rpglexer.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.reset();
final ParseTree parseTree = parser.r();
final String actualTree = TreeUtils.printTree(parseTree, parser);
if(!errors.isEmpty()){
System.out.println("/*===TOKENS===*/\r\n" + actualTokens + "\r\n");
System.out.println("/*===TREE===*/\r\n" + actualTree + "\r\n/*======*/");
}
assertThat(errors, is(empty()));
if(expectedTree==null || expectedTree.trim().length() == 0||rewriteExpectFile){
writeExpectFile(expectedFile,actualTokens,actualTree);
System.out.println("Tree written to " + expectedFile);
}else{
if(autoReplaceFailed && !actualTree.equals(expectedTree)){
System.out.println("Replaced content of " + expectedFile);
expectedTree = actualTree;
writeExpectFile(expectedFile,actualTokens,actualTree);
}
assertEquals("Parse trees do not match",expectedTree,actualTree);
}
}
WebkitGenerator.java 文件源码
项目:jamweaver
阅读 18
收藏 0
点赞 0
评论 0
protected SpecificationContext parse(Lexer lexer, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, PrintException {
ANTLRInputStream input = new ANTLRInputStream(r);
lexer.setInputStream(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
tokens.fill();
if (options.showTokens) {
for (Object tok : tokens.getTokens()) {
System.out.println(tok);
}
}
if (options.diagnostics) {
parser.addErrorListener(new DiagnosticErrorListener());
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
}
parser.setBuildParseTree(true);
// SLL overrides diagnostics
// %%% Not sure what it really is though.
if (options.sll) {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
}
parser.setTokenStream(tokens);
parser.setTrace(options.trace);
SpecificationContext tree = ((WebkitIDLParser)parser).specification();
if (options.printTree) {
System.out.println(tree.toStringTree(parser));
}
if (options.gui) {
tree.inspect(parser);
}
if (options.psFile != null) {
tree.save(parser, options.psFile); // Generate postscript
}
return tree;
}
GeckoGenerator.java 文件源码
项目:jamweaver
阅读 17
收藏 0
点赞 0
评论 0
protected SpecificationContext parse(Lexer lexer, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, PrintException {
ANTLRInputStream input = new ANTLRInputStream(r);
lexer.setInputStream(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
tokens.fill();
if (options.showTokens) {
for (Object tok : tokens.getTokens()) {
System.out.println(tok);
}
}
if (options.diagnostics) {
parser.addErrorListener(new DiagnosticErrorListener());
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
}
parser.setBuildParseTree(true);
// SLL overrides diagnostics
// %%% Not sure what it really is though.
if (options.sll) {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
}
parser.setTokenStream(tokens);
parser.setTrace(options.trace);
SpecificationContext tree = ((GeckoIDLParser)parser).specification();
if (options.printTree) {
System.out.println(tree.toStringTree(parser));
}
if (options.gui) {
tree.inspect(parser);
}
if (options.psFile != null) {
tree.save(parser, options.psFile); // Generate postscript
}
return tree;
}
RegressionTestRig.java 文件源码
项目:antlr4-regressionTestRig
阅读 16
收藏 0
点赞 0
评论 0
/**
* Load the parser as requested by the command line arguments, and then setup
* the parser for the 'diagnostics', 'printTree' or 'SLL' options.
* <p>
* The {@link #parser} and {@link #treePrinter} variables my be null if no
* parser was requested (that is, if the start rule name is the
* LEXER_START_RULE_NAME), or if the requested parser could not be loaded or
* instantiated.
* <p>
* The {@link #treePrinter} variable may also be null if the printTree option
* has not been requested.
* <p>
* @throws various exceptions while loading the parser class and instantiating
* a parser instance.
*/
protected void loadParser() throws ClassNotFoundException, NoSuchMethodException,
InstantiationException, IllegalAccessException, InvocationTargetException {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if ( !startRuleName.equals(LEXER_START_RULE_NAME) ) {
String parserName = grammarName+"Parser";
parserClass = null;
try {
parserClass = cl.loadClass(parserName).asSubclass(Parser.class);
} catch (ClassNotFoundException cnfe) {
System.err.println("ERROR: Can't load "+parserName+" as a parser");
throw cnfe;
}
try {
Constructor<? extends Parser> parserCtor = parserClass.getConstructor(TokenStream.class);
parser = parserCtor.newInstance((TokenStream)null);
} catch (Exception anException) {
System.err.println("ERROR: Could not create a parser for "+parserName);
throw anException;
}
if ( diagnostics ) {
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
}
if ( printTree ) {
parser.setBuildParseTree(true);
treePrinter = new TreePrinter(primaryIndentStr,
secondaryIndentStr,
indentCyclePeriod,
parser);
}
if ( trace ) {
traceListener = new PrintStreamTraceListener(parser);
parser.addParseListener(traceListener);
}
if ( SLL ) { // overrides diagnostics
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
}
}
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 37
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
CurrentRuleContextParserTask.java 文件源码
项目:goworks
阅读 17
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_ruleSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
if (requestedData.contains(GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT)) {
CurrentRuleContextData data = null;
if (context.getPosition() != null) {
int caretOffset = context.getPosition().getOffset();
Future<ParserData<List<Anchor>>> result =
taskManager.getData(snapshot, GrammarParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
ParserData<List<Anchor>> anchorsData = result.get();
List<Anchor> anchors = anchorsData.getData();
GrammarParser.RuleSpecContext ruleContext = null;
int grammarType = -1;
Future<ParserData<FileModel>> fileModelResult =
taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.ALLOW_STALE, ParserDataOptions.SYNCHRONOUS));
ParserData<FileModel> fileModelData = fileModelResult.get();
FileModel fileModel = fileModelData.getData();
if (anchors != null) {
Anchor enclosing = null;
/*
* parse the current rule
*/
for (Anchor anchor : anchors) {
if (anchor instanceof GrammarParserAnchorListener.GrammarTypeAnchor) {
grammarType = ((GrammarParserAnchorListener.GrammarTypeAnchor)anchor).getGrammarType();
continue;
}
if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
enclosing = anchor;
} else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
break;
}
}
if (enclosing != null) {
CharStream input = new DocumentSnapshotCharStream(snapshot);
input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
GrammarLexer lexer = new GrammarLexer(input);
CommonTokenStream tokens = new TaskTokenStream(lexer);
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokens);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
ruleContext = parser.ruleSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokens.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setInputStream(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
ruleContext = parser.ruleSpec();
} else {
throw ex;
}
}
}
}
data = new CurrentRuleContextData(snapshot, grammarType, fileModel, ruleContext);
}
results.addResult(new BaseParserData<>(context, GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT, snapshot, data));
}
}
ReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 18
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_grammarSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
boolean legacyMode = GrammarEditorKit.isLegacyMode(snapshot);
if (legacyMode) {
ParserData<List<Anchor>> emptyResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, null);
results.addResult(emptyResult);
return;
}
synchronized (lock) {
ParserData<GrammarSpecContext> parseTreeResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, GrammarParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
GrammarSpecContext parseResult;
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.grammarSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.grammarSpec();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
GrammarParserAnchorListener listener = new GrammarParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
FileModelImpl fileModel = null;
if (snapshot.getVersionedDocument().getFileObject() != null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModel = codeModelBuilderListener.getFileModel();
if (fileModel != null) {
updateCodeModelCache(fileModel);
}
}
fileModelResult = new BaseParserData<>(context, GrammarParserDataDefinitions.FILE_MODEL, snapshot, fileModel);
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}