/**
* The ATN with bypass alternatives is expensive to create so we create it
* lazily.
*
* @throws UnsupportedOperationException if the current parser does not
* implement the {@link #getSerializedATN()} method.
*/
@NotNull
public ATN getATNWithBypassAlts() {
String serializedAtn = getSerializedATN();
if (serializedAtn == null) {
throw new UnsupportedOperationException("The current parser does not support an ATN with bypass alternatives.");
}
synchronized (bypassAltsAtnCache) {
ATN result = bypassAltsAtnCache.get(serializedAtn);
if (result == null) {
ATNDeserializationOptions deserializationOptions = new ATNDeserializationOptions();
deserializationOptions.setGenerateRuleBypassTransitions(true);
result = new ATNDeserializer(deserializationOptions).deserialize(serializedAtn.toCharArray());
bypassAltsAtnCache.put(serializedAtn, result);
}
return result;
}
}
java类org.antlr.v4.runtime.atn.ATNDeserializer的实例源码
Parser.java 文件源码
项目:Scratch-ApuC
阅读 21
收藏 0
点赞 0
评论 0
Grammar.java 文件源码
项目:codebuff
阅读 25
收藏 0
点赞 0
评论 0
public LexerInterpreter createLexerInterpreter(CharStream input) {
if (this.isParser()) {
throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
}
if (this.isCombined()) {
return implicitLexer.createLexerInterpreter(input);
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
Grammar.java 文件源码
项目:codebuff
阅读 36
收藏 0
点赞 0
评论 0
/** @since 4.5.1 */
public GrammarParserInterpreter createGrammarParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new GrammarParserInterpreter(this, deserialized, tokenStream);
}
Grammar.java 文件源码
项目:codebuff
阅读 28
收藏 0
点赞 0
评论 0
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
if (this.isLexer()) {
throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
}
char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
AbstractGrammarDebuggerEditorKit.java 文件源码
项目:goworks
阅读 25
收藏 0
点赞 0
评论 0
private void loadTokens(final Document document, LexerInterpreterData interpreterData, LexerTraceAnalyzer analyzer) {
try {
TracingCharStream charStream = new TracingCharStream(analyzer, document.getText(0, document.getLength()));
TracingLexer lexer = new TracingLexer(interpreterData, analyzer, charStream);
ATN atn = new ATNDeserializer().deserialize(interpreterData.serializedAtn.toCharArray());
TracingLexerATNSimulator atnSimulator = new TracingLexerATNSimulator(analyzer, lexer, atn);
lexer.setInterpreter(atnSimulator);
CommonTokenStream commonTokenStream = new CommonTokenStream(lexer);
commonTokenStream.fill();
} catch (BadLocationException ex) {
Exceptions.printStackTrace(ex);
}
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:goworks
阅读 20
收藏 0
点赞 0
评论 0
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
startState.apply(lexer);
return lexer;
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:goworks
阅读 25
收藏 0
点赞 0
评论 0
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
AbstractGrammarDebuggerEditorKit.java 文件源码
项目:antlrworks2
阅读 18
收藏 0
点赞 0
评论 0
private void loadTokens(final Document document, LexerInterpreterData interpreterData, LexerTraceAnalyzer analyzer) {
try {
TracingCharStream charStream = new TracingCharStream(analyzer, document.getText(0, document.getLength()));
TracingLexer lexer = new TracingLexer(interpreterData, analyzer, charStream);
ATN atn = new ATNDeserializer().deserialize(interpreterData.serializedAtn.toCharArray());
TracingLexerATNSimulator atnSimulator = new TracingLexerATNSimulator(analyzer, lexer, atn);
lexer.setInterpreter(atnSimulator);
CommonTokenStream commonTokenStream = new CommonTokenStream(lexer);
commonTokenStream.fill();
} catch (BadLocationException ex) {
Exceptions.printStackTrace(ex);
}
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:antlrworks2
阅读 20
收藏 0
点赞 0
评论 0
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
startState.apply(lexer);
return lexer;
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:antlrworks2
阅读 25
收藏 0
点赞 0
评论 0
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 21
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
PreviewParser.java 文件源码
项目:intellij-plugin-v4
阅读 19
收藏 0
点赞 0
评论 0
public PreviewParser(Grammar g, TokenStream input) {
this(g, new ATNDeserializer().deserialize(ATNSerializer.getSerializedAsChars(g.getATN())), input);
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:antlrworks2
阅读 25
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}