@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_grammarSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
boolean legacyMode = GrammarEditorKit.isLegacyMode(snapshot);
if (legacyMode) {
ParserData<List<Anchor>> emptyResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, null);
results.addResult(emptyResult);
return;
}
synchronized (lock) {
ParserData<GrammarSpecContext> parseTreeResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, GrammarParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
GrammarSpecContext parseResult;
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.grammarSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.grammarSpec();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
GrammarParserAnchorListener listener = new GrammarParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
FileModelImpl fileModel = null;
if (snapshot.getVersionedDocument().getFileObject() != null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModel = codeModelBuilderListener.getFileModel();
if (fileModel != null) {
updateCodeModelCache(fileModel);
}
}
fileModelResult = new BaseParserData<>(context, GrammarParserDataDefinitions.FILE_MODEL, snapshot, fileModel);
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}
ReferenceAnchorsParserTask.java 文件源码
java
阅读 17
收藏 0
点赞 0
评论 0
项目:goworks
作者:
评论列表
文章目录