private static Set<String> possibleIdentifiers()
{
ImmutableSet.Builder<String> names = ImmutableSet.builder();
Vocabulary vocabulary = SqlBaseLexer.VOCABULARY;
for (int i = 0; i <= vocabulary.getMaxTokenType(); i++) {
String name = nullToEmpty(vocabulary.getLiteralName(i));
Matcher matcher = IDENTIFIER.matcher(name);
if (matcher.matches()) {
names.add(matcher.group(1));
}
}
return names.build();
}
java类org.antlr.v4.runtime.Vocabulary的实例源码
ReservedIdentifiers.java 文件源码
项目:rainbow
阅读 18
收藏 0
点赞 0
评论 0
Trainer.java 文件源码
项目:codebuff
阅读 24
收藏 0
点赞 0
评论 0
public static TerminalNode getMatchingLeftSymbol(Corpus corpus,
InputDocument doc,
TerminalNode node)
{
ParserRuleContext parent = (ParserRuleContext)node.getParent();
int curTokensParentRuleIndex = parent.getRuleIndex();
Token curToken = node.getSymbol();
if (corpus.ruleToPairsBag != null) {
String ruleName = doc.parser.getRuleNames()[curTokensParentRuleIndex];
RuleAltKey ruleAltKey = new RuleAltKey(ruleName, parent.getAltNumber());
List<Pair<Integer, Integer>> pairs = corpus.ruleToPairsBag.get(ruleAltKey);
if ( pairs!=null ) {
// Find appropriate pair given current token
// If more than one pair (a,b) with b=current token pick first one
// or if a common pair like ({,}), then give that one preference.
// or if b is punctuation, prefer a that is punct
List<Integer> viableMatchingLeftTokenTypes = viableLeftTokenTypes(parent, curToken, pairs);
Vocabulary vocab = doc.parser.getVocabulary();
if ( !viableMatchingLeftTokenTypes.isEmpty() ) {
int matchingLeftTokenType =
CollectTokenPairs.getMatchingLeftTokenType(curToken, viableMatchingLeftTokenTypes, vocab);
List<TerminalNode> matchingLeftNodes = parent.getTokens(matchingLeftTokenType);
// get matching left node by getting last node to left of current token
List<TerminalNode> nodesToLeftOfCurrentToken =
filter(matchingLeftNodes, n -> n.getSymbol().getTokenIndex()<curToken.getTokenIndex());
TerminalNode matchingLeftNode = nodesToLeftOfCurrentToken.get(nodesToLeftOfCurrentToken.size()-1);
if (matchingLeftNode == null) {
System.err.println("can't find matching node for "+node.getSymbol());
}
return matchingLeftNode;
}
}
}
return null;
}
GrammarParserInterpreter.java 文件源码
项目:codebuff
阅读 26
收藏 0
点赞 0
评论 0
public GrammarParserInterpreter(Grammar g,
String grammarFileName,
Vocabulary vocabulary,
Collection<String> ruleNames,
ATN atn,
TokenStream input) {
super(grammarFileName, vocabulary, ruleNames, atn, input);
this.g = g;
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:goworks
阅读 19
收藏 0
点赞 0
评论 0
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
startState.apply(lexer);
return lexer;
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:goworks
阅读 20
收藏 0
点赞 0
评论 0
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:antlrworks2
阅读 19
收藏 0
点赞 0
评论 0
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
startState.apply(lexer);
return lexer;
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:antlrworks2
阅读 18
收藏 0
点赞 0
评论 0
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
Vocabulary vocabulary = lexerInterpreterData.vocabulary;
String grammarFileName = lexerInterpreterData.grammarFileName;
List<String> ruleNames = lexerInterpreterData.ruleNames;
List<String> modeNames = lexerInterpreterData.modeNames;
return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
FunctionExpressionLexer.java 文件源码
项目:rapidminer
阅读 22
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
ExprParser.java 文件源码
项目:Expr3
阅读 21
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
Trainer.java 文件源码
项目:codebuff
阅读 27
收藏 0
点赞 0
评论 0
public static String _toString(FeatureMetaData[] FEATURES, InputDocument doc, int[] features,
boolean showInfo) {
Vocabulary v = doc.parser.getVocabulary();
String[] ruleNames = doc.parser.getRuleNames();
StringBuilder buf = new StringBuilder();
for (int i=0; i<FEATURES.length; i++) {
if ( FEATURES[i].type.equals(UNUSED) ) continue;
if ( i>0 ) buf.append(" ");
if ( i==INDEX_CUR_TOKEN_TYPE ) {
buf.append("| "); // separate prev from current tokens
}
int displayWidth = FEATURES[i].type.displayWidth;
switch ( FEATURES[i].type ) {
case TOKEN :
String tokenName = v.getDisplayName(features[i]);
String abbrev = StringUtils.abbreviateMiddle(tokenName, "*", displayWidth);
String centered = StringUtils.center(abbrev, displayWidth);
buf.append(String.format("%"+displayWidth+"s", centered));
break;
case RULE :
if ( features[i]>=0 ) {
String ruleName = ruleNames[unrulealt(features[i])[0]];
int ruleAltNum = unrulealt(features[i])[1];
ruleName += ":"+ruleAltNum;
abbrev = StringUtils.abbreviateMiddle(ruleName, "*", displayWidth);
buf.append(String.format("%"+displayWidth+"s", abbrev));
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
break;
case INT :
case INFO_LINE:
case INFO_CHARPOS:
if ( showInfo ) {
if ( features[i]>=0 ) {
buf.append(String.format("%"+displayWidth+"s", StringUtils.center(String.valueOf(features[i]), displayWidth)));
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
}
break;
case INFO_FILE:
if ( showInfo ) {
String fname = new File(doc.fileName).getName();
fname = StringUtils.abbreviate(fname, displayWidth);
buf.append(String.format("%"+displayWidth+"s", fname));
}
break;
case BOOL :
if ( features[i]!=-1 ) {
buf.append(features[i] == 1 ? "true " : "false");
}
else {
buf.append(Tool.sequence(displayWidth, " "));
}
break;
default :
System.err.println("NO STRING FOR FEATURE TYPE: "+ FEATURES[i].type);
}
}
return buf.toString();
}
CollectTokenPairs.java 文件源码
项目:codebuff
阅读 20
收藏 0
点赞 0
评论 0
public CollectTokenPairs(Vocabulary vocab, String[] ruleNames) {
this.vocab = vocab;
this.ruleNames = ruleNames;
}
Grammar.java 文件源码
项目:codebuff
阅读 28
收藏 0
点赞 0
评论 0
/**
* Gets a {@link Vocabulary} instance describing the vocabulary used by the
* grammar.
*/
public Vocabulary getVocabulary() {
return new VocabularyImpl(getTokenLiteralNames(), getTokenSymbolicNames());
}
Verilog2001Parser.java 文件源码
项目:netlist-graph
阅读 30
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
Verilog2001Lexer.java 文件源码
项目:netlist-graph
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
HelloLexer.java 文件源码
项目:KeepTry
阅读 20
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
HelloParser.java 文件源码
项目:KeepTry
阅读 20
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
FunctionExpressionLexer.java 文件源码
项目:rapidminer-studio
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
ExpressionConstraintParser.java 文件源码
项目:UMLS-Terminology-Server
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
ExpressionConstraintLexer.java 文件源码
项目:UMLS-Terminology-Server
阅读 21
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
EditorConfigLexer.java 文件源码
项目:netbeans-editorconfig-editor
阅读 21
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
EditorConfigParser.java 文件源码
项目:netbeans-editorconfig-editor
阅读 17
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
SkinnyParser.java 文件源码
项目:SkinnyAssembler
阅读 21
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
SkinnyLexer.java 文件源码
项目:SkinnyAssembler
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
SoqlParser.java 文件源码
项目:components
阅读 24
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 20
收藏 0
点赞 0
评论 0
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
throws InterruptedException, ExecutionException {
//ParserDebuggerEditorKit.LEX
synchronized (lock) {
ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
if (fileParseResultData == null || parseTreeResult == null) {
Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
ParserRuleContext parseResult;
ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
String grammarFileName = parserInterpreterData.grammarFileName;
Vocabulary vocabulary = parserInterpreterData.vocabulary;
List<String> ruleNames = parserInterpreterData.ruleNames;
ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);
long startTime = System.nanoTime();
parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
parser.getInterpreter().optimize_ll1 = false;
parser.getInterpreter().reportAmbiguities = true;
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners();
parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
parser.addErrorListener(new StatisticsParserErrorListener());
SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
parser.addErrorListener(syntaxErrorListener);
parser.setBuildParseTree(true);
parser.setErrorHandler(new DefaultErrorStrategy());
parseResult = parser.parse(parserInterpreterData.startRuleIndex);
String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
}
results.addResult(fileParseResultData);
results.addResult(parseTreeResult);
}
}
ParserDebuggerReferenceAnchorsParserTask.java 文件源码
项目:goworks
阅读 20
收藏 0
点赞 0
评论 0
public TracingParserInterpreter(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, ATN atn, TokenStream input) {
super(grammarFileName, vocabulary, ruleNames, atn, input);
}
ParserDebuggerTokensTaskTaggerSnapshot.java 文件源码
项目:goworks
阅读 19
收藏 0
点赞 0
评论 0
public ParserDebuggerLexerWrapper(String grammarFileName, Vocabulary vocabulary, Collection<String> ruleNames, Collection<String> modeNames, ATN atn, CharStream input) {
super(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
}
MemoryModelLexer.java 文件源码
项目:org.pshdl
阅读 20
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
MemoryModelParser.java 文件源码
项目:org.pshdl
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}
PSHDLLangLexer.java 文件源码
项目:org.pshdl
阅读 19
收藏 0
点赞 0
评论 0
@Override
public Vocabulary getVocabulary() {
return VOCABULARY;
}