public LL1OptionalBlockSingleAlt(OutputModelFactory factory,
GrammarAST blkAST,
List<CodeBlockForAlt> alts)
{
super(factory, blkAST, alts);
this.decision = ((DecisionState)blkAST.atnState).decision;
/** Lookahead for each alt 1..n */
// IntervalSet[] altLookSets = LinearApproximator.getLL1LookaheadSets(dfa);
IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);
altLook = getAltLookaheadAsStringLists(altLookSets);
IntervalSet look = altLookSets[0];
IntervalSet followLook = altLookSets[1];
IntervalSet expecting = look.or(followLook);
this.error = getThrowNoViableAlt(factory, blkAST, expecting);
expr = addCodeForLookaheadTempVar(look);
followExpr = factory.getLL1Test(followLook, blkAST);
}
java类org.antlr.v4.runtime.misc.IntervalSet的实例源码
LL1OptionalBlockSingleAlt.java 文件源码
项目:codebuff
阅读 20
收藏 0
点赞 0
评论 0
TestSetInline.java 文件源码
项目:codebuff
阅读 16
收藏 0
点赞 0
评论 0
private static Bitset[] createBitsets(OutputModelFactory factory,
IntervalSet set,
int wordSize,
boolean useZeroOffset) {
List<Bitset> bitsetList = new ArrayList<Bitset>();
for (int ttype : set.toArray()) {
Bitset current = !bitsetList.isEmpty() ? bitsetList.get(bitsetList.size() - 1) : null;
if (current == null || ttype > (current.shift + wordSize-1)) {
current = new Bitset();
if (useZeroOffset && ttype >= 0 && ttype < wordSize-1) {
current.shift = 0;
}
else {
current.shift = ttype;
}
bitsetList.add(current);
}
current.ttypes.add(factory.getGenerator().getTarget().getTokenTypeAsTargetLabel(factory.getGrammar(), ttype));
}
return bitsetList.toArray(new Bitset[bitsetList.size()]);
}
GoCompletionQuery.java 文件源码
项目:goworks
阅读 15
收藏 0
点赞 0
评论 0
public static RuleContext getTopContext(Parser parser, RuleContext context, IntervalSet values, boolean checkTop) {
if (checkTop && context instanceof ParserRuleContext) {
if (values.contains(context.getRuleIndex())) {
return context;
}
}
if (context.isEmpty()) {
return null;
}
if (values.contains(parser.getATN().states.get(context.invokingState).ruleIndex)) {
return context.parent;
}
return getTopContext(parser, context.parent, values, false);
}
DefaultErrorStrategy.java 文件源码
项目:Scratch-ApuC
阅读 21
收藏 0
点赞 0
评论 0
/**
* This method implements the single-token deletion inline error recovery
* strategy. It is called by {@link #recoverInline} to attempt to recover
* from mismatched input. If this method returns null, the parser and error
* handler state will not have changed. If this method returns non-null,
* {@code recognizer} will <em>not</em> be in error recovery mode since the
* returned token was a successful match.
*
* <p>If the single-token deletion is successful, this method calls
* {@link #reportUnwantedToken} to report the error, followed by
* {@link Parser#consume} to actually "delete" the extraneous token. Then,
* before returning {@link #reportMatch} is called to signal a successful
* match.</p>
*
* @param recognizer the parser instance
* @return the successfully matched {@link Token} instance if single-token
* deletion successfully recovers from the mismatched input, otherwise
* {@code null}
*/
@Nullable
protected Token singleTokenDeletion(@NotNull Parser recognizer) {
int nextTokenType = recognizer.getInputStream().LA(2);
IntervalSet expecting = getExpectedTokens(recognizer);
if ( expecting.contains(nextTokenType) ) {
reportUnwantedToken(recognizer);
/*
System.err.println("recoverFromMismatchedToken deleting "+
((TokenStream)recognizer.getInputStream()).LT(1)+
" since "+((TokenStream)recognizer.getInputStream()).LT(2)+
" is what we want");
*/
recognizer.consume(); // simply delete extra token
// we want to return the token we're actually matching
Token matchedSymbol = recognizer.getCurrentToken();
reportMatch(recognizer); // we know current token is correct
return matchedSymbol;
}
return null;
}
DefaultErrorStrategy.java 文件源码
项目:Scratch-ApuC
阅读 14
收藏 0
点赞 0
评论 0
/** Conjure up a missing token during error recovery.
*
* The recognizer attempts to recover from single missing
* symbols. But, actions might refer to that missing symbol.
* For example, x=ID {f($x);}. The action clearly assumes
* that there has been an identifier matched previously and that
* $x points at that token. If that token is missing, but
* the next token in the stream is what we want we assume that
* this token is missing and we keep going. Because we
* have to return some token to replace the missing token,
* we have to conjure one up. This method gives the user control
* over the tokens returned for missing tokens. Mostly,
* you will want to create something special for identifier
* tokens. For literals such as '{' and ',', the default
* action in the parser or tree parser works. It simply creates
* a CommonToken of the appropriate type. The text will be the token.
* If you change what tokens must be created by the lexer,
* override this method to create the appropriate tokens.
*/
@NotNull
protected Token getMissingSymbol(@NotNull Parser recognizer) {
Token currentSymbol = recognizer.getCurrentToken();
IntervalSet expecting = getExpectedTokens(recognizer);
int expectedTokenType = expecting.getMinElement(); // get any element
String tokenText;
if ( expectedTokenType== Token.EOF ) tokenText = "<missing EOF>";
else tokenText = "<missing "+recognizer.getTokenNames()[expectedTokenType]+">";
Token current = currentSymbol;
Token lookback = recognizer.getInputStream().LT(-1);
if ( current.getType() == Token.EOF && lookback!=null ) {
current = lookback;
}
return
recognizer.getTokenFactory().create(new Pair<TokenSource, CharStream>(current.getTokenSource(), current.getTokenSource().getInputStream()), expectedTokenType, tokenText,
Token.DEFAULT_CHANNEL,
-1, -1,
current.getLine(), current.getCharPositionInLine());
}
ParserATNSimulator.java 文件源码
项目:Scratch-ApuC
阅读 27
收藏 0
点赞 0
评论 0
/**
* Return a configuration set containing only the configurations from
* {@code configs} which are in a {@link RuleStopState}. If all
* configurations in {@code configs} are already in a rule stop state, this
* method simply returns {@code configs}.
*
* <p>When {@code lookToEndOfRule} is true, this method uses
* {@link ATN#nextTokens} for each configuration in {@code configs} which is
* not already in a rule stop state to see if a rule stop state is reachable
* from the configuration via epsilon-only transitions.</p>
*
* @param configs the configuration set to update
* @param lookToEndOfRule when true, this method checks for rule stop states
* reachable by epsilon-only transitions from each configuration in
* {@code configs}.
*
* @return {@code configs} if all configurations in {@code configs} are in a
* rule stop state, otherwise return a new configuration set containing only
* the configurations from {@code configs} which are in a rule stop state
*/
@NotNull
protected ATNConfigSet removeAllConfigsNotInRuleStopState(@NotNull ATNConfigSet configs, boolean lookToEndOfRule) {
if (PredictionMode.allConfigsInRuleStopStates(configs)) {
return configs;
}
ATNConfigSet result = new ATNConfigSet(configs.fullCtx);
for (ATNConfig config : configs) {
if (config.state instanceof RuleStopState) {
result.add(config, mergeCache);
continue;
}
if (lookToEndOfRule && config.state.onlyHasEpsilonTransitions()) {
IntervalSet nextTokens = atn.nextTokens(config.state);
if (nextTokens.contains(Token.EPSILON)) {
ATNState endOfRuleState = atn.ruleToStopState[config.state.ruleIndex];
result.add(new ATNConfig(config, endOfRuleState), mergeCache);
}
}
}
return result;
}
LL1Analyzer.java 文件源码
项目:Scratch-ApuC
阅读 20
收藏 0
点赞 0
评论 0
/**
* Calculates the SLL(1) expected lookahead set for each outgoing transition
* of an {@link ATNState}. The returned array has one element for each
* outgoing transition in {@code s}. If the closure from transition
* <em>i</em> leads to a semantic predicate before matching a symbol, the
* element at index <em>i</em> of the result will be {@code null}.
*
* @param s the ATN state
* @return the expected symbols for each outgoing transition of {@code s}.
*/
@Nullable
public IntervalSet[] getDecisionLookahead(@Nullable ATNState s) {
// System.out.println("LOOK("+s.stateNumber+")");
if ( s==null ) {
return null;
}
IntervalSet[] look = new IntervalSet[s.getNumberOfTransitions()];
for (int alt = 0; alt < s.getNumberOfTransitions(); alt++) {
look[alt] = new IntervalSet();
Set<ATNConfig> lookBusy = new HashSet<ATNConfig>();
boolean seeThruPreds = false; // fail to get lookahead upon pred
_LOOK(s.transition(alt).target, null, PredictionContext.EMPTY,
look[alt], lookBusy, new BitSet(), seeThruPreds, false);
// Wipe out lookahead for this alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if ( look[alt].size()==0 || look[alt].contains(HIT_PRED) ) {
look[alt] = null;
}
}
return look;
}
CSSErrorStrategy.java 文件源码
项目:jStyleParser
阅读 18
收藏 0
点赞 0
评论 0
/**
* Consumes token until lexer state is function-balanced and
* token from follow is matched. Matched token is also consumed
*/
protected void consumeUntilGreedy(Parser recognizer, IntervalSet set, CSSLexerState.RecoveryMode mode) {
CSSToken t;
do {
Token next = recognizer.getInputStream().LT(1);
if (next instanceof CSSToken) {
t = (CSSToken) recognizer.getInputStream().LT(1);
if (t.getType() == Token.EOF) {
logger.trace("token eof ");
break;
}
} else
break; /* not a CSSToken, probably EOF */
logger.trace("Skipped greedy: {}", t.getText());
// consume token even if it will match
recognizer.consume();
}
while (!(t.getLexerState().isBalanced(mode, null, t) && set.contains(t.getType())));
}
CSSErrorStrategy.java 文件源码
项目:jStyleParser
阅读 17
收藏 0
点赞 0
评论 0
/**
* Consumes token until lexer state is function-balanced and
* token from follow is matched.
*/
public void consumeUntil(Parser recognizer, IntervalSet follow, CSSLexerState.RecoveryMode mode, CSSLexerState ls) {
CSSToken t;
boolean finish;
TokenStream input = recognizer.getInputStream();
do {
Token next = input.LT(1);
if (next instanceof CSSToken) {
t = (CSSToken) input.LT(1);
if (t.getType() == Token.EOF) {
logger.trace("token eof ");
break;
}
} else
break; /* not a CSSToken, probably EOF */
// consume token if does not match
finish = (t.getLexerState().isBalanced(mode, ls, t) && follow.contains(t.getType()));
if (!finish) {
logger.trace("Skipped: {}", t);
input.consume();
}
} while (!finish);
}
ANTLRv4ExternalAnnotator.java 文件源码
项目:intellij-plugin-v4
阅读 18
收藏 0
点赞 0
评论 0
public static Map<String,GrammarAST> getUnusedParserRules(Grammar g) {
if ( g.ast==null || g.isLexer() ) return null;
List<GrammarAST> ruleNodes = g.ast.getNodesWithTypePreorderDFS(IntervalSet.of(ANTLRParser.RULE_REF));
// in case of errors, we walk AST ourselves
// ANTLR's Grammar object might have bailed on rule defs etc...
Set<String> ruleRefs = new HashSet<String>();
Map<String,GrammarAST> ruleDefs = new HashMap<String,GrammarAST>();
for (GrammarAST x : ruleNodes) {
if ( x.getParent().getType()==ANTLRParser.RULE ) {
// System.out.println("def "+x);
ruleDefs.put(x.getText(), x);
}
else if ( x instanceof RuleRefAST ) {
RuleRefAST r = (RuleRefAST) x;
// System.out.println("ref "+r);
ruleRefs.add(r.getText());
}
}
ruleDefs.keySet().removeAll(ruleRefs);
return ruleDefs;
}
BeetlAntlrErrorStrategy.java 文件源码
项目:beetl2.0
阅读 25
收藏 0
点赞 0
评论 0
protected void reportUnwantedToken(@NotNull Parser recognizer)
{
if (inErrorRecoveryMode(recognizer))
{
return;
}
beginErrorCondition(recognizer);
Token t = recognizer.getCurrentToken();
String tokenName = getTokenErrorDisplay(t);
IntervalSet expecting = getExpectedTokens(recognizer);
String msg = "多余输入 " + tokenName + " 期望 " + expecting.toString(recognizer.getTokenNames());
BeetlException exception = new BeetlParserException(BeetlException.PARSER_MISS_ERROR, msg);
// exception.token = this.getGrammarToken(t);
exception.pushToken(this.getGrammarToken(t));
throw exception;
}
GraphParser.java 文件源码
项目:digraph-parser
阅读 30
收藏 0
点赞 0
评论 0
@Override
public void reportMissingToken(Parser recognizer) {
beginErrorCondition(recognizer);
Token t = recognizer.getCurrentToken();
IntervalSet expecting = getExpectedTokens(recognizer);
String msg = "missing " + expecting.toString(recognizer.getTokenNames()) + " at " + getTokenErrorDisplay(t);
throw new RecognitionException(msg, recognizer, recognizer.getInputStream(), recognizer.getContext());
}
ExpressionInterpreter.java 文件源码
项目:arma-dialog-creator
阅读 20
收藏 0
点赞 0
评论 0
@Override
public void reportMissingToken(Parser recognizer) {
beginErrorCondition(recognizer);
Token t = recognizer.getCurrentToken();
IntervalSet expecting = getExpectedTokens(recognizer);
String msg = "missing " + expecting.toString(recognizer.getTokenNames()) + " at " + getTokenErrorDisplay(t);
throw new RecognitionException(msg, recognizer, recognizer.getInputStream(), recognizer.getContext());
}
CursorContextListener.java 文件源码
项目:ftc
阅读 19
收藏 0
点赞 0
评论 0
@Override
public void notifyOnError(Token offendingToken, Token missingToken, IntervalSet tokensExpected) {
super.notifyOnError(offendingToken, missingToken, tokensExpected);
if (offendingToken != null)
offendingSymbol = offendingToken;
if (tokensExpected.size() > 0)
expectedSymbols = getTokenNames(tokensExpected);
debugOnError();
}
GrammarAST.java 文件源码
项目:codebuff
阅读 23
收藏 0
点赞 0
评论 0
public List<GrammarAST> getNodesWithType(IntervalSet types) {
List<GrammarAST> nodes = new ArrayList<GrammarAST>();
List<GrammarAST> work = new LinkedList<GrammarAST>();
work.add(this);
GrammarAST t;
while ( !work.isEmpty() ) {
t = work.remove(0);
if ( types==null || types.contains(t.getType()) ) nodes.add(t);
if ( t.children!=null ) {
work.addAll(Arrays.asList(t.getChildrenAsArray()));
}
}
return nodes;
}
GrammarAST.java 文件源码
项目:codebuff
阅读 19
收藏 0
点赞 0
评论 0
public void getNodesWithTypePreorderDFS_(List<GrammarAST> nodes, IntervalSet types) {
if ( types.contains(this.getType()) ) nodes.add(this);
// walk all children of root.
for (int i= 0; i < getChildCount(); i++) {
GrammarAST child = (GrammarAST)getChild(i);
child.getNodesWithTypePreorderDFS_(nodes, types);
}
}
Grammar.java 文件源码
项目:codebuff
阅读 24
收藏 0
点赞 0
评论 0
/** Return a set of all possible token or char types for this grammar */
public IntSet getTokenTypes() {
if ( isLexer() ) {
return getAllCharValues();
}
return IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, getMaxTokenType());
}
Grammar.java 文件源码
项目:codebuff
阅读 26
收藏 0
点赞 0
评论 0
public static Map<Integer, Interval> getStateToGrammarRegionMap(GrammarRootAST ast, IntervalSet grammarTokenTypes) {
Map<Integer, Interval> stateToGrammarRegionMap = new HashMap<Integer, Interval>();
if ( ast==null ) return stateToGrammarRegionMap;
List<GrammarAST> nodes = ast.getNodesWithType(grammarTokenTypes);
for (GrammarAST n : nodes) {
if (n.atnState != null) {
Interval tokenRegion = Interval.of(n.getTokenStartIndex(), n.getTokenStopIndex());
org.antlr.runtime.tree.Tree ruleNode = null;
// RULEs, BLOCKs of transformed recursive rules point to original token interval
switch ( n.getType() ) {
case ANTLRParser.RULE :
ruleNode = n;
break;
case ANTLRParser.BLOCK :
case ANTLRParser.CLOSURE :
ruleNode = n.getAncestor(ANTLRParser.RULE);
break;
}
if ( ruleNode instanceof RuleAST ) {
String ruleName = ((RuleAST) ruleNode).getRuleName();
Rule r = ast.g.getRule(ruleName);
if ( r instanceof LeftRecursiveRule ) {
RuleAST originalAST = ((LeftRecursiveRule) r).getOriginalAST();
tokenRegion = Interval.of(originalAST.getTokenStartIndex(), originalAST.getTokenStopIndex());
}
}
stateToGrammarRegionMap.put(n.atnState.stateNumber, tokenRegion);
}
}
return stateToGrammarRegionMap;
}
LexerATNFactory.java 文件源码
项目:codebuff
阅读 25
收藏 0
点赞 0
评论 0
/** [Aa\t \u1234a-z\]\-] char sets */
@Override
public Handle charSetLiteral(GrammarAST charSetAST) {
ATNState left = newState(charSetAST);
ATNState right = newState(charSetAST);
IntervalSet set = getSetFromCharSetLiteral(charSetAST);
left.addTransition(new SetTransition(right, set));
charSetAST.atnState = left;
return new Handle(left, right);
}
LexerATNFactory.java 文件源码
项目:codebuff
阅读 22
收藏 0
点赞 0
评论 0
public IntervalSet getSetFromCharSetLiteral(GrammarAST charSetAST) {
String chars = charSetAST.getText();
chars = chars.substring(1, chars.length()-1);
String cset = '"'+ chars +'"';
IntervalSet set = new IntervalSet();
// unescape all valid escape char like \n, leaving escaped dashes as '\-'
// so we can avoid seeing them as '-' range ops.
chars = CharSupport.getStringFromGrammarStringLiteral(cset);
// now make x-y become set of char
int n = chars.length();
for (int i=0; i< n; i++) {
int c = chars.charAt(i);
if ( c=='\\' && (i+1)<n && chars.charAt(i+1)=='-' ) { // \-
set.add('-');
i++;
}
else if ( (i+2)<n && chars.charAt(i+1)=='-' ) { // range x-y
int x = c;
int y = chars.charAt(i+2);
if ( x<=y ) set.add(x,y);
i+=2;
}
else {
set.add(c);
}
}
return set;
}
AnalysisPipeline.java 文件源码
项目:codebuff
阅读 16
收藏 0
点赞 0
评论 0
protected void processLexer() {
// make sure all non-fragment lexer rules must match at least one symbol
for (Rule rule : g.rules.values()) {
if (rule.isFragment()) {
continue;
}
LL1Analyzer analyzer = new LL1Analyzer(g.atn);
IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null);
if (look.contains(Token.EPSILON)) {
g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name);
}
}
}
AnalysisPipeline.java 文件源码
项目:codebuff
阅读 18
收藏 0
点赞 0
评论 0
/** Return whether lookahead sets are disjoint; no lookahead ⇒ not disjoint */
public static boolean disjoint(IntervalSet[] altLook) {
boolean collision = false;
IntervalSet combined = new IntervalSet();
if ( altLook==null ) return false;
for (IntervalSet look : altLook) {
if ( look==null ) return false; // lookahead must've computation failed
if ( !look.and(combined).isNil() ) {
collision = true;
break;
}
combined.addAll(look);
}
return !collision;
}
LeftRecursiveRuleAnalyzer.java 文件源码
项目:codebuff
阅读 22
收藏 0
点赞 0
评论 0
public AltAST addPrecedenceArgToRules(AltAST t, int prec) {
if ( t==null ) return null;
// get all top-level rule refs from ALT
List<GrammarAST> outerAltRuleRefs = t.getNodesWithTypePreorderDFS(IntervalSet.of(RULE_REF));
for (GrammarAST x : outerAltRuleRefs) {
RuleRefAST rref = (RuleRefAST)x;
boolean recursive = rref.getText().equals(ruleName);
boolean rightmost = rref == outerAltRuleRefs.get(outerAltRuleRefs.size()-1);
if ( recursive && rightmost ) {
GrammarAST dummyValueNode = new GrammarAST(new CommonToken(ANTLRParser.INT, ""+prec));
rref.setOption(LeftRecursiveRuleTransformer.PRECEDENCE_OPTION_NAME, dummyValueNode);
}
}
return t;
}
LL1PlusBlockSingleAlt.java 文件源码
项目:codebuff
阅读 25
收藏 0
点赞 0
评论 0
public LL1PlusBlockSingleAlt(OutputModelFactory factory, GrammarAST plusRoot, List<CodeBlockForAlt> alts) {
super(factory, plusRoot, alts);
BlockAST blkAST = (BlockAST)plusRoot.getChild(0);
PlusBlockStartState blkStart = (PlusBlockStartState)blkAST.atnState;
stateNumber = blkStart.loopBackState.stateNumber;
blockStartStateNumber = blkStart.stateNumber;
PlusBlockStartState plus = (PlusBlockStartState)blkAST.atnState;
this.decision = plus.loopBackState.decision;
IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);
IntervalSet loopBackLook = altLookSets[0];
loopExpr = addCodeForLoopLookaheadTempVar(loopBackLook);
}
TestSetInline.java 文件源码
项目:codebuff
阅读 15
收藏 0
点赞 0
评论 0
public TestSetInline(OutputModelFactory factory, GrammarAST ast, IntervalSet set, int wordSize) {
super(factory, ast);
bitsetWordSize = wordSize;
Bitset[] withZeroOffset = createBitsets(factory, set, wordSize, true);
Bitset[] withoutZeroOffset = createBitsets(factory, set, wordSize, false);
this.bitsets = withZeroOffset.length <= withoutZeroOffset.length ? withZeroOffset : withoutZeroOffset;
this.varName = "_la";
}
Choice.java 文件源码
项目:codebuff
阅读 21
收藏 0
点赞 0
评论 0
public List<String[]> getAltLookaheadAsStringLists(IntervalSet[] altLookSets) {
List<String[]> altLook = new ArrayList<String[]>();
for (IntervalSet s : altLookSets) {
altLook.add(factory.getGenerator().getTarget().getTokenTypesAsTargetLabels(factory.getGrammar(), s.toArray()));
}
return altLook;
}
Choice.java 文件源码
项目:codebuff
阅读 14
收藏 0
点赞 0
评论 0
public TestSetInline addCodeForLookaheadTempVar(IntervalSet look) {
List<SrcOp> testOps = factory.getLL1Test(look, ast);
TestSetInline expr = Utils.find(testOps, TestSetInline.class);
if (expr != null) {
Decl d = new TokenTypeDecl(factory, expr.varName);
factory.getCurrentRuleFunction().addLocalDecl(d);
CaptureNextTokenType nextType = new CaptureNextTokenType(factory,expr.varName);
addPreambleOp(nextType);
}
return expr;
}
ThrowRecognitionException.java 文件源码
项目:codebuff
阅读 14
收藏 0
点赞 0
评论 0
public ThrowRecognitionException(OutputModelFactory factory, GrammarAST ast, IntervalSet expecting) {
super(factory, ast);
//this.decision = ((BlockStartState)ast.ATNState).decision;
grammarLine = ast.getLine();
grammarLine = ast.getCharPositionInLine();
grammarFile = factory.getGrammar().fileName;
//this.expecting = factory.createExpectingBitSet(ast, decision, expecting, "error");
// factory.defineBitSet(this.expecting);
}
LL1AltBlock.java 文件源码
项目:codebuff
阅读 16
收藏 0
点赞 0
评论 0
public LL1AltBlock(OutputModelFactory factory, GrammarAST blkAST, List<CodeBlockForAlt> alts) {
super(factory, blkAST, alts);
this.decision = ((DecisionState)blkAST.atnState).decision;
/** Lookahead for each alt 1..n */
IntervalSet[] altLookSets = factory.getGrammar().decisionLOOK.get(decision);
altLook = getAltLookaheadAsStringLists(altLookSets);
IntervalSet expecting = IntervalSet.or(altLookSets); // combine alt sets
this.error = getThrowNoViableAlt(factory, blkAST, expecting);
}
LL1Loop.java 文件源码
项目:codebuff
阅读 16
收藏 0
点赞 0
评论 0
public SrcOp addCodeForLoopLookaheadTempVar(IntervalSet look) {
TestSetInline expr = addCodeForLookaheadTempVar(look);
if (expr != null) {
CaptureNextTokenType nextType = new CaptureNextTokenType(factory, expr.varName);
addIterationOp(nextType);
}
return expr;
}