public List<Pipeline> parsePipelines(String pipelines) throws ParseException {
final ParseContext parseContext = new ParseContext(false);
final SyntaxErrorListener errorListener = new SyntaxErrorListener(parseContext);
final RuleLangLexer lexer = new RuleLangLexer(new ANTLRInputStream(pipelines));
lexer.removeErrorListeners();
lexer.addErrorListener(errorListener);
final RuleLangParser parser = new RuleLangParser(new CommonTokenStream(lexer));
parser.setErrorHandler(new DefaultErrorStrategy());
parser.removeErrorListeners();
parser.addErrorListener(errorListener);
final RuleLangParser.PipelineDeclsContext pipelineDeclsContext = parser.pipelineDecls();
WALKER.walk(new PipelineAstBuilder(parseContext), pipelineDeclsContext);
if (parseContext.getErrors().isEmpty()) {
return parseContext.pipelines;
}
throw new ParseException(parseContext.getErrors());
}
java类org.antlr.v4.runtime.DefaultErrorStrategy的实例源码
PipelineRuleParser.java 文件源码
项目:graylog-plugin-pipeline-processor
阅读 26
收藏 0
点赞 0
评论 0
AstBuilderFactory.java 文件源码
项目:kalang
阅读 16
收藏 0
点赞 0
评论 0
public static AstBuilder createAstBuilder(CompilationUnit source,TokenStream tokens){
KalangParser p = new KalangParser(tokens);
AstBuilder sp = new AstBuilder(source, p);
p.setErrorHandler(new DefaultErrorStrategy() {
@Override
public void reportError(Parser recognizer, RecognitionException e) {
String msg = AntlrErrorString.exceptionString(recognizer, e);
Token end = e.getOffendingToken();
Token start;
RuleContext ctx = e.getCtx();
if(ctx instanceof ParserRuleContext){
start = ((ParserRuleContext) ctx).getStart();
}else{
start = end;
}
sp.getDiagnosisReporter().report(Diagnosis.Kind.ERROR, msg,start,end);
}
});
return sp;
}
KalangCompiler.java 文件源码
项目:kalang
阅读 21
收藏 0
点赞 0
评论 0
@Override
public KalangParser createParser(CompilationUnit compilationUnit, CommonTokenStream tokenStream) {
KalangParser parser = new KalangParser(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy(){
@Override
public void reportError(Parser recognizer, RecognitionException e) {
String msg = AntlrErrorString.exceptionString(recognizer, e);
Diagnosis diagnosis = new Diagnosis(
compilationUnit.getCompileContext()
, Diagnosis.Kind.ERROR
, OffsetRangeHelper.getOffsetRange(e.getOffendingToken())
, msg
, compilationUnit.getSource()
);
diagnosisHandler.handleDiagnosis(diagnosis);
}
});
return parser;
}
TwoPhaseParser.java 文件源码
项目:hypertalk-java
阅读 19
收藏 0
点赞 0
评论 0
/**
* "Second phase" parsing attempt. Will accept any valid HyperTalk script entry, but is less performant for inputs
* utilizing certain parts of the grammar.
*
* @param compilationUnit The unit of work to compile/parse. Represents the grammar's start symbol that should be
* used.
* @param scriptText A plaintext representation of the HyperTalk script to parse
* @return The root of the abstract syntax tree associated with the given compilation unit (i.e., {@link Script}).
* @throws HtSyntaxException Thrown if an error occurs while parsing the script.
*/
static Object parseLL(CompilationUnit compilationUnit, String scriptText) throws HtSyntaxException {
HyperTalkErrorListener errors = new HyperTalkErrorListener();
HyperTalkLexer lexer = new HyperTalkLexer(new CaseInsensitiveInputStream(scriptText));
CommonTokenStream tokens = new CommonTokenStream(lexer);
HyperTalkParser parser = new HyperTalkParser(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.removeErrorListeners(); // don't log to console
parser.addErrorListener(errors);
try {
ParseTree tree = compilationUnit.getParseTree(parser);
if (!errors.errors.isEmpty()) {
throw errors.errors.get(0);
}
return new HyperTalkTreeVisitor().visit(tree);
} catch (RecognitionException e) {
throw new HtSyntaxException(e);
}
}
ParserFactory.java 文件源码
项目:goworks
阅读 21
收藏 0
点赞 0
评论 0
@NonNull
public CodeCompletionGoParser getParser(@NonNull TokenStream input) {
CodeCompletionGoParser parser = createParser(input);
parser.removeErrorListeners();
parser.setBuildParseTree(false);
parser.setErrorHandler(new DefaultErrorStrategy());
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.getInterpreter().force_global_context = false;
parser.getInterpreter().always_try_local_context = true;
parser.setCheckPackageNames(false);
parser.setPackageNames(Collections.<String>emptyList());
return parser;
}
GrammarParserFactory.java 文件源码
项目:goworks
阅读 15
收藏 0
点赞 0
评论 0
@NonNull
public GrammarParser getParser(@NonNull TokenStream input) {
GrammarParser result = createParser(input);
result.getInterpreter().setPredictionMode(PredictionMode.LL);
result.removeErrorListeners();
result.addErrorListener(DescriptiveErrorListener.INSTANCE);
result.setBuildParseTree(false);
result.setErrorHandler(new DefaultErrorStrategy());
return result;
}
TemplateParserFactory.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
@NonNull
public TemplateParser getParser(@NonNull TokenStream input) {
TemplateParser result = createParser(input);
result.getInterpreter().setPredictionMode(PredictionMode.LL);
result.removeErrorListeners();
result.addErrorListener(DescriptiveErrorListener.INSTANCE);
result.setBuildParseTree(false);
result.setErrorHandler(new DefaultErrorStrategy());
return result;
}
GrammarParserFactory.java 文件源码
项目:antlrworks2
阅读 14
收藏 0
点赞 0
评论 0
@NonNull
public GrammarParser getParser(@NonNull TokenStream input) {
GrammarParser result = createParser(input);
result.getInterpreter().setPredictionMode(PredictionMode.LL);
result.removeErrorListeners();
result.addErrorListener(DescriptiveErrorListener.INSTANCE);
result.setBuildParseTree(false);
result.setErrorHandler(new DefaultErrorStrategy());
return result;
}
TemplateParserFactory.java 文件源码
项目:antlrworks2
阅读 17
收藏 0
点赞 0
评论 0
@NonNull
public TemplateParser getParser(@NonNull TokenStream input) {
TemplateParser result = createParser(input);
result.getInterpreter().setPredictionMode(PredictionMode.LL);
result.removeErrorListeners();
result.addErrorListener(DescriptiveErrorListener.INSTANCE);
result.setBuildParseTree(false);
result.setErrorHandler(new DefaultErrorStrategy());
return result;
}
PipelineRuleParser.java 文件源码
项目:graylog-plugin-pipeline-processor
阅读 22
收藏 0
点赞 0
评论 0
/**
* Parses the given rule source and optionally generates a Java class for it if the classloader is not null.
*
* @param id the id of the rule, necessary to generate code
* @param rule rule source code
* @param silent don't emit status messages during parsing
* @param ruleClassLoader the classloader to load the generated code into (can be null)
* @return the parse rule
* @throws ParseException if a one or more parse errors occur
*/
public Rule parseRule(String id, String rule, boolean silent, PipelineClassloader ruleClassLoader) throws ParseException {
final ParseContext parseContext = new ParseContext(silent);
final SyntaxErrorListener errorListener = new SyntaxErrorListener(parseContext);
final RuleLangLexer lexer = new RuleLangLexer(new ANTLRInputStream(rule));
lexer.removeErrorListeners();
lexer.addErrorListener(errorListener);
final RuleLangParser parser = new RuleLangParser(new CommonTokenStream(lexer));
parser.setErrorHandler(new DefaultErrorStrategy());
parser.removeErrorListeners();
parser.addErrorListener(errorListener);
final RuleLangParser.RuleDeclarationContext ruleDeclaration = parser.ruleDeclaration();
// parsing stages:
// 1. build AST nodes, checks for invalid var, function refs
// 2. type annotator: infer type information from var refs, func refs
// 3. checker: static type check w/ coercion nodes
// 4. optimizer: TODO
WALKER.walk(new RuleAstBuilder(parseContext), ruleDeclaration);
WALKER.walk(new RuleTypeAnnotator(parseContext), ruleDeclaration);
WALKER.walk(new RuleTypeChecker(parseContext), ruleDeclaration);
if (parseContext.getErrors().isEmpty()) {
Rule parsedRule = parseContext.getRules().get(0).withId(id);
if (ruleClassLoader != null && ConfigurationStateUpdater.isAllowCodeGeneration()) {
try {
final Class<? extends GeneratedRule> generatedClass = codeGenerator.generateCompiledRule(parsedRule, ruleClassLoader);
if (generatedClass != null) {
parsedRule = parsedRule.toBuilder().generatedRuleClass(generatedClass).build();
}
} catch (Exception e) {
log.warn("Unable to compile rule {} to native code, falling back to interpreting it: {}", parsedRule.name(), e.getMessage());
}
}
return parsedRule;
}
throw new ParseException(parseContext.getErrors());
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 21
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
CurrentRuleContextParserTask.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_ruleSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
if (requestedData.contains(GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT)) {
CurrentRuleContextData data = null;
if (context.getPosition() != null) {
int caretOffset = context.getPosition().getOffset();
Future<ParserData<List<Anchor>>> result =
taskManager.getData(snapshot, GrammarParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
ParserData<List<Anchor>> anchorsData = result.get();
List<Anchor> anchors = anchorsData.getData();
GrammarParser.RuleSpecContext ruleContext = null;
int grammarType = -1;
Future<ParserData<FileModel>> fileModelResult =
taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.ALLOW_STALE, ParserDataOptions.SYNCHRONOUS));
ParserData<FileModel> fileModelData = fileModelResult.get();
FileModel fileModel = fileModelData.getData();
if (anchors != null) {
Anchor enclosing = null;
/*
* parse the current rule
*/
for (Anchor anchor : anchors) {
if (anchor instanceof GrammarParserAnchorListener.GrammarTypeAnchor) {
grammarType = ((GrammarParserAnchorListener.GrammarTypeAnchor)anchor).getGrammarType();
continue;
}
if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
enclosing = anchor;
} else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
break;
}
}
if (enclosing != null) {
CharStream input = new DocumentSnapshotCharStream(snapshot);
input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
GrammarLexer lexer = new GrammarLexer(input);
CommonTokenStream tokens = new TaskTokenStream(lexer);
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokens);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
ruleContext = parser.ruleSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokens.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setInputStream(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
ruleContext = parser.ruleSpec();
} else {
throw ex;
}
}
}
}
data = new CurrentRuleContextData(snapshot, grammarType, fileModel, ruleContext);
}
results.addResult(new BaseParserData<>(context, GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT, snapshot, data));
}
}
ReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_grammarSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
boolean legacyMode = GrammarEditorKit.isLegacyMode(snapshot);
if (legacyMode) {
ParserData<List<Anchor>> emptyResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, null);
results.addResult(emptyResult);
return;
}
synchronized (lock) {
ParserData<GrammarSpecContext> parseTreeResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, GrammarParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
GrammarSpecContext parseResult;
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.grammarSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.grammarSpec();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
GrammarParserAnchorListener listener = new GrammarParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
FileModelImpl fileModel = null;
if (snapshot.getVersionedDocument().getFileObject() != null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModel = codeModelBuilderListener.getFileModel();
if (fileModel != null) {
updateCodeModelCache(fileModel);
}
}
fileModelResult = new BaseParserData<>(context, GrammarParserDataDefinitions.FILE_MODEL, snapshot, fileModel);
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}
CurrentTemplateContextParserTask.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=TemplateParser.class, rule=TemplateParser.RULE_group, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
if (context.getPosition() == null) {
return;
}
int caretOffset = context.getPosition().getOffset();
Future<ParserData<List<Anchor>>> result =
taskManager.getData(snapshot, TemplateParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
ParserData<List<Anchor>> anchorsData = result.get();
List<Anchor> anchors = anchorsData.getData();
TemplateParser.GroupContext ruleContext = null;
if (anchors != null) {
Anchor enclosing = null;
// int grammarType = -1;
/*
* parse the current template
*/
for (Anchor anchor : anchors) {
// if (anchor instanceof TemplateParserAnchorListener.TemplateTypeAnchor) {
// grammarType = ((TemplateParserAnchorListener.TemplateTypeAnchor)anchor).getGrammarType();
// continue;
// }
if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
enclosing = anchor;
} else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
break;
}
}
if (enclosing != null) {
CharStream input = new DocumentSnapshotCharStream(snapshot);
input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
TemplateLexer lexer = new TemplateLexer(input);
CommonTokenStream tokens = new TaskTokenStream(lexer);
TemplateParser parser = TemplateParserFactory.DEFAULT.getParser(tokens);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
ruleContext = parser.group();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokens.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setInputStream(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
ruleContext = parser.group();
} else {
throw ex;
}
}
}
}
CurrentTemplateContextData data = new CurrentTemplateContextData(snapshot, ruleContext);
results.addResult(new BaseParserData<>(context, TemplateParserDataDefinitions.CURRENT_TEMPLATE_CONTEXT, snapshot, data));
}
ReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=TemplateParser.class, rule=TemplateParser.RULE_groupFile, version=4, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results) throws InterruptedException, ExecutionException {
synchronized (lock) {
ParserData<GroupFileContext> parseTreeResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, TemplateParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
TemplateParser.GroupFileContext parseResult;
TemplateParser parser = TemplateParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.groupFile();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.groupFile();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, TemplateParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
TemplateParserAnchorListener listener = new TemplateParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, TemplateParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModelResult = new BaseParserData<>(context, TemplateParserDataDefinitions.FILE_MODEL, snapshot, codeModelBuilderListener.getFileModel());
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}
GoParserFactory.java 文件源码
项目:goworks
阅读 22
收藏 0
点赞 0
评论 0
protected void configureParser(@NonNull Parser parser, @NonNull ParserConfiguration configuration) {
ParserATNSimulator interpreter = parser.getInterpreter();
// common configuration
interpreter.force_global_context = false;
interpreter.always_try_local_context = true;
interpreter.optimize_tail_calls = true;
parser.setBuildParseTree(true);
parser.removeErrorListeners();
switch (configuration) {
case FASTEST:
interpreter.setPredictionMode(PredictionMode.SLL);
interpreter.tail_call_preserves_sll = false;
interpreter.treat_sllk1_conflict_as_ambiguity = true;
parser.setErrorHandler(new BailErrorStrategy());
break;
case SLL:
throw new UnsupportedOperationException("The tail_call_preserves_sll flag cannot change within a single ATN instance.");
//interpreter.setPredictionMode(PredictionMode.SLL);
//interpreter.tail_call_preserves_sll = true;
//interpreter.treat_sllk1_conflict_as_ambiguity = true;
//parser.setErrorHandler(new BailErrorStrategy<Token>());
//break;
case HYBRID:
interpreter.setPredictionMode(PredictionMode.LL);
interpreter.tail_call_preserves_sll = false;
interpreter.treat_sllk1_conflict_as_ambiguity = true;
parser.setErrorHandler(new BailErrorStrategy());
break;
case HYBRID_SLL:
throw new UnsupportedOperationException("The tail_call_preserves_sll flag cannot change within a single ATN instance.");
//interpreter.setPredictionMode(PredictionMode.LL);
//interpreter.tail_call_preserves_sll = true;
//interpreter.treat_sllk1_conflict_as_ambiguity = true;
//parser.setErrorHandler(new BailErrorStrategy<Token>());
//break;
case PRECISE:
interpreter.setPredictionMode(PredictionMode.LL);
interpreter.tail_call_preserves_sll = false;
interpreter.treat_sllk1_conflict_as_ambiguity = false;
parser.setErrorHandler(new DefaultErrorStrategy());
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
break;
default:
throw new IllegalArgumentException("Invalid configuration.");
}
}
ProtoFiles.java 文件源码
项目:protobuf-el
阅读 17
收藏 0
点赞 0
评论 0
Builder() {
this(new DefaultErrorStrategy(), new ConsoleProtoErrorListener(""), FileDescriptors
.newBuilder());
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:antlrworks2
阅读 23
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
CurrentRuleContextParserTask.java 文件源码
项目:antlrworks2
阅读 19
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_ruleSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
if (requestedData.contains(GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT)) {
CurrentRuleContextData data = null;
if (context.getPosition() != null) {
int caretOffset = context.getPosition().getOffset();
Future<ParserData<List<Anchor>>> result =
taskManager.getData(snapshot, GrammarParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
ParserData<List<Anchor>> anchorsData = result.get();
List<Anchor> anchors = anchorsData.getData();
GrammarParser.RuleSpecContext ruleContext = null;
int grammarType = -1;
Future<ParserData<FileModel>> fileModelResult =
taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.ALLOW_STALE, ParserDataOptions.SYNCHRONOUS));
ParserData<FileModel> fileModelData = fileModelResult.get();
FileModel fileModel = fileModelData.getData();
if (anchors != null) {
Anchor enclosing = null;
/*
* parse the current rule
*/
for (Anchor anchor : anchors) {
if (anchor instanceof GrammarParserAnchorListener.GrammarTypeAnchor) {
grammarType = ((GrammarParserAnchorListener.GrammarTypeAnchor)anchor).getGrammarType();
continue;
}
if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
enclosing = anchor;
} else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
break;
}
}
if (enclosing != null) {
CharStream input = new DocumentSnapshotCharStream(snapshot);
input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
GrammarLexer lexer = new GrammarLexer(input);
CommonTokenStream tokens = new TaskTokenStream(lexer);
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokens);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
ruleContext = parser.ruleSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokens.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setInputStream(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
ruleContext = parser.ruleSpec();
} else {
throw ex;
}
}
}
}
data = new CurrentRuleContextData(snapshot, grammarType, fileModel, ruleContext);
}
results.addResult(new BaseParserData<>(context, GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT, snapshot, data));
}
}
ReferenceAnchorsParserTask.java 文件源码
项目:antlrworks2
阅读 16
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_grammarSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
boolean legacyMode = GrammarEditorKit.isLegacyMode(snapshot);
if (legacyMode) {
ParserData<List<Anchor>> emptyResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, null);
results.addResult(emptyResult);
return;
}
synchronized (lock) {
ParserData<GrammarSpecContext> parseTreeResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, GrammarParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
GrammarSpecContext parseResult;
GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.grammarSpec();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.grammarSpec();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
GrammarParserAnchorListener listener = new GrammarParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
FileModelImpl fileModel = null;
if (snapshot.getVersionedDocument().getFileObject() != null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModel = codeModelBuilderListener.getFileModel();
if (fileModel != null) {
updateCodeModelCache(fileModel);
}
}
fileModelResult = new BaseParserData<>(context, GrammarParserDataDefinitions.FILE_MODEL, snapshot, fileModel);
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}
CurrentTemplateContextParserTask.java 文件源码
项目:antlrworks2
阅读 19
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=TemplateParser.class, rule=TemplateParser.RULE_group, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
if (context.getPosition() == null) {
return;
}
int caretOffset = context.getPosition().getOffset();
Future<ParserData<List<Anchor>>> result =
taskManager.getData(snapshot, TemplateParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));
ParserData<List<Anchor>> anchorsData = result.get();
List<Anchor> anchors = anchorsData.getData();
TemplateParser.GroupContext ruleContext = null;
if (anchors != null) {
Anchor enclosing = null;
// int grammarType = -1;
/*
* parse the current template
*/
for (Anchor anchor : anchors) {
// if (anchor instanceof TemplateParserAnchorListener.TemplateTypeAnchor) {
// grammarType = ((TemplateParserAnchorListener.TemplateTypeAnchor)anchor).getGrammarType();
// continue;
// }
if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
enclosing = anchor;
} else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
break;
}
}
if (enclosing != null) {
CharStream input = new DocumentSnapshotCharStream(snapshot);
input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
TemplateLexer lexer = new TemplateLexer(input);
CommonTokenStream tokens = new TaskTokenStream(lexer);
TemplateParser parser = TemplateParserFactory.DEFAULT.getParser(tokens);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
ruleContext = parser.group();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokens.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.setInputStream(tokens);
parser.setErrorHandler(new DefaultErrorStrategy());
ruleContext = parser.group();
} else {
throw ex;
}
}
}
}
CurrentTemplateContextData data = new CurrentTemplateContextData(snapshot, ruleContext);
results.addResult(new BaseParserData<>(context, TemplateParserDataDefinitions.CURRENT_TEMPLATE_CONTEXT, snapshot, data));
}
ReferenceAnchorsParserTask.java 文件源码
项目:antlrworks2
阅读 15
收藏 0
点赞 0
评论 0
@Override
@RuleDependency(recognizer=TemplateParser.class, rule=TemplateParser.RULE_groupFile, version=4, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results) throws InterruptedException, ExecutionException {
synchronized (lock) {
ParserData<GroupFileContext> parseTreeResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, TemplateParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, TemplateParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
// DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
// input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
// GrammarLexer lexer = new GrammarLexer(input);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
TemplateParser.GroupFileContext parseResult;
TemplateParser parser = TemplateParserFactory.DEFAULT.getParser(tokenStream);
try {
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
parser.removeErrorListeners();
parser.setBuildParseTree(true);
parser.setErrorHandler(new BailErrorStrategy());
parseResult = parser.groupFile();
} catch (ParseCancellationException ex) {
if (ex.getCause() instanceof RecognitionException) {
// retry with default error handler
tokenStream.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.setInputStream(tokenStream);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.groupFile();
} else {
throw ex;
}
}
parseTreeResult = new BaseParserData<>(context, TemplateParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
TemplateParserAnchorListener listener = new TemplateParserAnchorListener(snapshot);
ParseTreeWalker.DEFAULT.walk(listener, parseResult);
anchorPointsResult = new BaseParserData<>(context, TemplateParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
}
if (fileModelResult == null) {
CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
fileModelResult = new BaseParserData<>(context, TemplateParserDataDefinitions.FILE_MODEL, snapshot, codeModelBuilderListener.getFileModel());
}
}
results.addResult(parseTreeResult);
results.addResult(fileModelResult);
if (anchorPointsResult != null) {
results.addResult(anchorPointsResult);
}
}
}
ProtoFiles.java 文件源码
项目:protobuf-el
阅读 19
收藏 0
点赞 0
评论 0
/**
* Returns a new Builder with its defaults set to {@link DefaultErrorStrategy} and the supplied
* errorListener.
*/
public static Builder newBuilder(final IBaseProtoErrorListener errorListener) {
return new Builder(new DefaultErrorStrategy(), errorListener);
}