@Override
public Token nextToken() {
if (stashedNext != null) {
previous = stashedNext;
stashedNext = null;
return previous;
}
Token next = super.nextToken();
if (insertSemicolon(previous, next)) {
stashedNext = next;
previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
return previous;
} else {
previous = next;
return next;
}
}
java类org.antlr.v4.runtime.Lexer的实例源码
EnhancedPainlessLexer.java 文件源码
项目:elasticsearch_my
阅读 23
收藏 0
点赞 0
评论 0
AntlrParser.java 文件源码
项目:pgcodekeeper
阅读 24
收藏 0
点赞 0
评论 0
private static <T extends Parser> T makeBasicParser(Class<T> parserClass,
ANTLRInputStream stream, String parsedObjectName, List<AntlrError> errors) {
Lexer lexer;
Parser parser;
if (parserClass.isAssignableFrom(SQLParser.class)) {
lexer = new SQLLexer(stream);
parser = new SQLParser(new CommonTokenStream(lexer));
} else if (parserClass.isAssignableFrom(IgnoreListParser.class)) {
lexer = new IgnoreListLexer(stream);
parser = new IgnoreListParser(new CommonTokenStream(lexer));
} else {
throw new IllegalArgumentException("Unknown parser class: " + parserClass);
}
CustomAntlrErrorListener err = new CustomAntlrErrorListener(parsedObjectName, errors);
lexer.removeErrorListeners();
lexer.addErrorListener(err);
parser.removeErrorListeners();
parser.addErrorListener(err);
return parserClass.cast(parser);
}
ParserUtil.java 文件源码
项目:antlr-examples
阅读 21
收藏 0
点赞 0
评论 0
public static <L extends Lexer, P extends Parser> P newParser(
Function<CharStream, L> lexerFactory,
Function<TokenStream, P> parserFactory,
String input,
boolean useBailErrorStrategy,
boolean removeErrorListeners) {
CharStream charStream = new ANTLRInputStream(input);
L lexer = lexerFactory.apply(charStream);
if (removeErrorListeners) {
lexer.removeErrorListeners();
}
TokenStream tokenStream = new CommonTokenStream(lexer);
P parser = parserFactory.apply(tokenStream);
if (useBailErrorStrategy) {
parser.setErrorHandler(new BailErrorStrategy());
}
if (removeErrorListeners) {
parser.removeErrorListeners();
}
return parser;
}
LangDescriptor.java 文件源码
项目:codebuff
阅读 18
收藏 0
点赞 0
评论 0
public LangDescriptor(String name,
String corpusDir,
String fileRegex,
Class<? extends Lexer> lexerClass,
Class<? extends Parser> parserClass,
String startRuleName,
int indentSize,
int singleLineCommentType)
{
this.name = name;
this.corpusDir = corpusDir;
this.fileRegex = fileRegex;
this.lexerClass = lexerClass;
this.parserClass = parserClass;
this.startRuleName = startRuleName;
this.indentSize = indentSize;
this.singleLineCommentType = singleLineCommentType;
}
Grammar.java 文件源码
项目:codebuff
阅读 34
收藏 0
点赞 0
评论 0
/**
* Gets the name by which a token can be referenced in the generated code.
* For tokens defined in a {@code tokens{}} block or via a lexer rule, this
* is the declared name of the token. For token types generated by the use
* of a string literal within a parser rule of a combined grammar, this is
* the automatically generated token type which includes the
* {@link #AUTO_GENERATED_TOKEN_NAME_PREFIX} prefix. For types which are not
* associated with a defined token, this method returns
* {@link #INVALID_TOKEN_NAME}.
*
* @param ttype The token type.
* @return The name of the token with the specified type.
*/
public String getTokenName(int ttype) {
// inside any target's char range and is lexer grammar?
if ( isLexer() &&
ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE )
{
return CharSupport.getANTLRCharLiteralForChar(ttype);
}
if ( ttype==Token.EOF ) {
return "EOF";
}
if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) {
return typeToTokenList.get(ttype);
}
return INVALID_TOKEN_NAME;
}
CharSupport.java 文件源码
项目:codebuff
阅读 17
收藏 0
点赞 0
评论 0
/** Return a string representing the escaped char for code c. E.g., If c
* has value 0x100, you will get "\u0100". ASCII gets the usual
* char (non-hex) representation. Control characters are spit out
* as unicode. While this is specially set up for returning Java strings,
* it can be used by any language target that has the same syntax. :)
*/
public static String getANTLRCharLiteralForChar(int c) {
if ( c< Lexer.MIN_CHAR_VALUE ) {
return "'<INVALID>'";
}
if ( c<ANTLRLiteralCharValueEscape.length && ANTLRLiteralCharValueEscape[c]!=null ) {
return '\''+ANTLRLiteralCharValueEscape[c]+'\'';
}
if ( Character.UnicodeBlock.of((char)c)==Character.UnicodeBlock.BASIC_LATIN &&
!Character.isISOControl((char)c) ) {
if ( c=='\\' ) {
return "'\\\\'";
}
if ( c=='\'') {
return "'\\''";
}
return '\''+Character.toString((char)c)+'\'';
}
// turn on the bit above max "\uFFFF" value so that we pad with zeros
// then only take last 4 digits
String hex = Integer.toHexString(c|0x10000).toUpperCase().substring(1,5);
String unicodeStr = "'\\u"+hex+"'";
return unicodeStr;
}
ThriftListener.java 文件源码
项目:thrifty
阅读 26
收藏 0
点赞 0
评论 0
private List<Token> getLeadingComments(Token token) {
List<Token> hiddenTokens = tokenStream.getHiddenTokensToLeft(token.getTokenIndex(), Lexer.HIDDEN);
if (hiddenTokens == null || hiddenTokens.isEmpty()) {
return Collections.emptyList();
}
List<Token> comments = new ArrayList<>(hiddenTokens.size());
for (Token hiddenToken : hiddenTokens) {
if (isComment(hiddenToken) && !trailingDocTokenIndexes.get(hiddenToken.getTokenIndex())) {
comments.add(hiddenToken);
}
}
return comments;
}
ThriftListener.java 文件源码
项目:thrifty
阅读 25
收藏 0
点赞 0
评论 0
/**
* Read comments following the given token, until the first newline is encountered.
*
* INVARIANT:
* Assumes that the parse tree is being walked top-down, left to right!
*
* Trailing-doc tokens are marked as such, so that subsequent searches for "leading"
* doc don't grab tokens already used as "trailing" doc. If the walk order is *not*
* top-down, left-to-right, then the assumption underpinning the separation of leading
* and trailing comments is broken.
*
* @param endToken the token from which to search for trailing comment tokens.
* @return a list, possibly empty, of all trailing comment tokens.
*/
private List<Token> getTrailingComments(Token endToken) {
List<Token> hiddenTokens = tokenStream.getHiddenTokensToRight(endToken.getTokenIndex(), Lexer.HIDDEN);
if (hiddenTokens == null || hiddenTokens.isEmpty()) {
return Collections.emptyList();
}
Token maybeTrailingDoc = hiddenTokens.get(0); // only one trailing comment is possible
if (isComment(maybeTrailingDoc)) {
trailingDocTokenIndexes.set(maybeTrailingDoc.getTokenIndex());
return Collections.singletonList(maybeTrailingDoc);
}
return Collections.emptyList();
}
ParserUtil.java 文件源码
项目:antlr-examples
阅读 22
收藏 0
点赞 0
评论 0
public static <L extends Lexer, P extends Parser> P newParser(
Function<CharStream, L> lexerFactory,
Function<TokenStream, P> parserFactory,
String input,
boolean useBailErrorStrategy,
boolean removeErrorListeners) {
CharStream charStream = new ANTLRInputStream(input);
L lexer = lexerFactory.apply(charStream);
if (removeErrorListeners) {
lexer.removeErrorListeners();
}
TokenStream tokenStream = new CommonTokenStream(lexer);
P parser = parserFactory.apply(tokenStream);
if (useBailErrorStrategy) {
parser.setErrorHandler(new BailErrorStrategy());
}
if (removeErrorListeners) {
parser.removeErrorListeners();
}
return parser;
}
ParseUtils.java 文件源码
项目:mdetect
阅读 26
收藏 0
点赞 0
评论 0
public static Pair<Parser, Lexer> parsePHP(String filePath) {
AntlrCaseInsensitiveFileStream input;
try {
input = new AntlrCaseInsensitiveFileStream(filePath);
} catch (IOException e) {
e.printStackTrace();
return null;
}
PHPLexer lexer = new PHPLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
PHPParser parser = new InterruptablePHPParser(tokens, filePath);
/* turn on prediction mode to speed up parsing */
parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
Pair<Parser, Lexer> retval = new Pair<Parser, Lexer>(parser, lexer);
return retval;
}
ParseUtils.java 文件源码
项目:mdetect
阅读 27
收藏 0
点赞 0
评论 0
public static Document processFile(String filePath) {
Pair<Parser, Lexer> pl = parsePHP(filePath);
PHPParser parser = (PHPParser) pl.a;
parser.setBuildParseTree(true);
/*
* htmlDocument is the start rule (the top-level rule)
* for the PHP grammar
*/
ParserRuleContext tree = parser.htmlDocument();
List<String> ruleNames = Arrays.asList(parser.getRuleNames());
Map<Integer, String> invTokenMap = getInvTokenMap(parser);
TokenStream tokenStream = parser.getTokenStream();
ParseTreeDOMSerializer ptSerializer = new ParseTreeDOMSerializer(ruleNames, invTokenMap, tokenStream);
ParseTreeWalker.DEFAULT.walk(ptSerializer, tree);
Document result= ptSerializer.getDOMDocument();
return result;
}
OTLDListener.java 文件源码
项目:transportlanguage
阅读 20
收藏 0
点赞 0
评论 0
/**
* Parses the supplied input using the OTLDListener and returns it after walking it
* @param reader input to parse
* @return walked OTLDListener
* @throws IOException
*/
public static OTLDListener parseFile(InputStream reader) throws IOException {
OTLDErrorListener errorListener = new OTLDErrorListener();
ANTLRInputStream stream = new ANTLRInputStream(reader);
Lexer lexer = new otldLexer(stream);
lexer.removeErrorListeners();
lexer.addErrorListener(errorListener);
TokenStream tokens = new CommonTokenStream(lexer);
otldParser parser = new otldParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(errorListener);
ParseTree tree = parser.program();
OTLDListener railroad = new OTLDListener();
if (errorListener.getErrors().isEmpty()) {
ParseTreeWalker walker = new ParseTreeWalker();
walker.walk(railroad, tree);
} else {
railroad.errors.addAll(errorListener.getErrors());
}
return railroad;
}
ECParser.java 文件源码
项目:netbeans-editorconfig-editor
阅读 20
收藏 0
点赞 0
评论 0
@Override
public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException {
this.snapshot = snapshot;
String text = snapshot.getText().toString();
ANTLRInputStream input = new ANTLRInputStream(text);
Lexer lexer = new EditorConfigLexer(input);
lexer.removeErrorListeners();
CommonTokenStream tokens = new CommonTokenStream(lexer);
parser = new EditorConfigParser(tokens);
parser.removeErrorListeners();
syntaxErrors = new ArrayList<>();
EditorConfigErrorListener errorListener = new EditorConfigErrorListener(syntaxErrors);
parser.addErrorListener(errorListener);
EditorConfigParser.FileContext root = parser.file();
result = new ECParserResult(snapshot, parser, root);
}
LexerErrorListener.java 文件源码
项目:jetbrains-plugin-st4
阅读 19
收藏 0
点赞 0
评论 0
@Override
public void syntaxError(Recognizer<?, ?> recognizer,
Object offendingSymbol,
int line,
int charPositionInLine,
String msg,
RecognitionException e)
{
if ( offendingSymbol==null ) {
final Lexer lexer = (Lexer) recognizer;
int i = lexer.getCharIndex();
final int n = lexer.getInputStream().size();
if (i >= n) {
i = n - 1;
}
final String text = lexer.getInputStream().getText(new Interval(i, i));
CommonToken t = (CommonToken) lexer.getTokenFactory().create(Token.INVALID_TYPE, text);
t.setStartIndex(i);
t.setStopIndex(i);
t.setLine(line);
t.setCharPositionInLine(charPositionInLine);
offendingSymbol = t;
}
// System.out.println("lex error: " + offendingSymbol);
issues.add(new Issue(msg, (Token)offendingSymbol));
}
GrammarCompletionProvider.java 文件源码
项目:goworks
阅读 16
收藏 0
点赞 0
评论 0
boolean isContext(Token token, int offset, boolean allowInStrings, boolean allowInActions) {
if (token == null) {
return false;
}
switch (token.getType()) {
case GrammarLexer.LEXER_CHAR_SET:
case GrammarLexer.ACTION_COMMENT:
return false;
case GrammarLexer.STRING_LITERAL:
case GrammarLexer.DOUBLE_QUOTE_STRING_LITERAL:
return allowInStrings;
case GrammarLexer.ARG_ACTION_WORD:
case GrammarLexer.ACTION_WORD:
return allowInActions;
case GrammarLexer.WS:
return true;
default:
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}
GoCompletionProvider.java 文件源码
项目:goworks
阅读 21
收藏 0
点赞 0
评论 0
static boolean isGoContext(Token token, int offset, boolean allowInStrings) {
if (token == null) {
return false;
}
switch (token.getType()) {
case GoLexer.COMMENT:
return false;
case GoLexer.CharLiteral:
case GoLexer.StringLiteral:
return allowInStrings;
case GoLexer.WS:
case GoLexer.NEWLINE:
return true;
default:
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}
ThrowExceptionErrorListener.java 文件源码
项目:criteria
阅读 18
收藏 0
点赞 0
评论 0
@Override
public final void syntaxError(final Recognizer<?, ?> recognizer,
final Object offendingSymbol, final int line,
final int charPositionInLine, final String msg,
final RecognitionException e) {
String input;
if (recognizer instanceof Lexer) {
final CharStream cs = ((Lexer) recognizer).getInputStream();
input = cs.getText(new Interval(0, cs.size()));
} else if (recognizer instanceof Parser) {
final TokenStream tokens = ((Parser) recognizer).getInputStream();
if (tokens != null) {
input = tokens.getText();
} else {
input = "<unknown input>";
}
} else {
input = "<unknown input>";
}
throw new AntlrParseException(input, line, charPositionInLine, msg);
}
LexerActionExecutor.java 文件源码
项目:Scratch-ApuC
阅读 21
收藏 0
点赞 0
评论 0
/**
* Execute the actions encapsulated by this executor within the context of a
* particular {@link Lexer}.
*
* <p>This method calls {@link IntStream#seek} to set the position of the
* {@code input} {@link CharStream} prior to calling
* {@link LexerAction#execute} on a position-dependent action. Before the
* method returns, the input position will be restored to the same position
* it was in when the method was invoked.</p>
*
* @param lexer The lexer instance.
* @param input The input stream which is the source for the current token.
* When this method is called, the current {@link IntStream#index} for
* {@code input} should be the start of the following token, i.e. 1
* character past the end of the current token.
* @param startIndex The token start index. This value may be passed to
* {@link IntStream#seek} to set the {@code input} position to the beginning
* of the token.
*/
public void execute(@NotNull Lexer lexer, CharStream input, int startIndex) {
boolean requiresSeek = false;
int stopIndex = input.index();
try {
for (LexerAction lexerAction : lexerActions) {
if (lexerAction instanceof LexerIndexedCustomAction) {
int offset = ((LexerIndexedCustomAction)lexerAction).getOffset();
input.seek(startIndex + offset);
lexerAction = ((LexerIndexedCustomAction)lexerAction).getAction();
requiresSeek = (startIndex + offset) != stopIndex;
}
else if (lexerAction.isPositionDependent()) {
input.seek(stopIndex);
requiresSeek = false;
}
lexerAction.execute(lexer);
}
}
finally {
if (requiresSeek) {
input.seek(stopIndex);
}
}
}
ECParser.java 文件源码
项目:editorconfig-netbeans
阅读 19
收藏 0
点赞 0
评论 0
@Override
public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) throws ParseException {
this.snapshot = snapshot;
String text = snapshot.getText().toString();
ANTLRInputStream input = new ANTLRInputStream(text);
Lexer lexer = new EditorConfigLexer(input);
lexer.removeErrorListeners();
CommonTokenStream tokens = new CommonTokenStream(lexer);
parser = new EditorConfigParser(tokens);
parser.removeErrorListeners();
syntaxErrors = new ArrayList<>();
EditorConfigErrorListener errorListener = new EditorConfigErrorListener(syntaxErrors);
parser.addErrorListener(errorListener);
EditorConfigParser.FileContext root = parser.file();
result = new ECParserResult(snapshot, parser, root);
}
AdlDeserializer.java 文件源码
项目:adl2-core
阅读 27
收藏 0
点赞 0
评论 0
public Archetype parse(Reader reader) throws IOException {
try {
CharStream charStream = new ANTLRInputStream(reader);
Lexer lexer = new adlLexer(charStream);
adlParser parser = new adlParser(new BufferedTokenStream(lexer));
AccumulatingErrorListener errorHandler = new AccumulatingErrorListener();
parser.removeErrorListeners();
parser.addErrorListener(errorHandler);
adlParser.AdlContext context = parser.adl();
if (!errorHandler.getErrors().isEmpty()) {
throw new AdlParserException(Joiner.on("\n").join(errorHandler.getErrors()));
}
AdlTreeParser treeParser = new AdlTreeParser();
return treeParser.parseAdl(context);
} finally {
reader.close();
}
}
CSSTokenRecovery.java 文件源码
项目:jStyleParser
阅读 24
收藏 0
点赞 0
评论 0
public CSSTokenRecovery(Lexer lexer,
CharStream input,
CSSLexerState ls,
Logger log) {
this.lexer = lexer;
this.input = input;
// this.state = state;
this.ls = ls;
this.log = log;
this.expectedToken = new Stack<Integer>();
this.eof = false;
lexerTypeMapper = CSSToken.createDefaultTypeMapper(lexer.getClass());
typeMapper = new CSSToken.TypeMapper(CSSTokenRecovery.class, lexer.getClass(),
"APOS", "QUOT", "RPAREN", "RCURLY", "IMPORT",
"CHARSET", "STRING", "INVALID_STRING");
}
ParserUtils.java 文件源码
项目:antlr-denter
阅读 43
收藏 0
点赞 0
评论 0
public static <P extends Parser> P getParser(Class<? extends Lexer> lexerClass, Class<P> parserClass, String source) {
Lexer lexer = getLexer(lexerClass, source);
TokenStream tokens = new CommonTokenStream(lexer);
P parser;
try {
parser = parserClass.getConstructor(TokenStream.class).newInstance(tokens);
} catch (Exception e) {
throw new IllegalArgumentException("couldn't invoke parser constructor", e);
}
parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
parser.removeErrorListeners(); // don't spit to stderr
parser.addErrorListener(new DiagnosticErrorListener());
parser.addErrorListener(new AntlrFailureListener());
return parser;
}
BenchGramBenchmark.java 文件源码
项目:antlr-denter
阅读 19
收藏 0
点赞 0
评论 0
private <L extends Lexer> void warmupAndRun(Class<L> lexerClass, String source,
Function<? super L, Tokens> lexerToIter,
double multiplier)
{
System.out.printf("[%s]: %d tokens in %d chars%n",
lexerClass.getSimpleName(),
countTokens(lexerClass, source),
source.toCharArray().length);
for (int i = 0; i < WARMUP_REPS; ++i) {
timedRuns(lexerClass, source, "warmup " + i, Math.round(WARMUP * multiplier * globalMultiplier), lexerToIter);
}
System.out.println();
System.out.println("Starting main runs...");
double time = timedRuns(lexerClass, source, "runs", Math.round(RUNS * multiplier * globalMultiplier), lexerToIter);
System.out.println();
System.out.println();
fail(time + " ms per run"); // easy reporting.
}
macEditorParser.java 文件源码
项目:NBStudio
阅读 20
收藏 0
点赞 0
评论 0
@Override
public void parse(Snapshot snapshot, Task task, SourceModificationEvent event) {
this.snapshot = snapshot;
Document document = snapshot.getSource().getDocument(true);
syntaxErrors = (List<SyntaxError>) document.getProperty("syntaxErrors");
if (syntaxErrors == null) {
syntaxErrors = new ArrayList<>();
document.putProperty("syntaxErrors", syntaxErrors);
}
embeddedOffset = snapshot.getOriginalOffset(0);
// Logger.Log("macparse: " + syntaxErrors.size() + " - " + embeddedOffset);
if (embeddedOffset <= 0) {
syntaxErrors.clear();
}
ANTLRInputStream input = new ANTLRInputStream(snapshot.getText().toString());
Lexer lexer = new macLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
macParser = new macParser(tokens);
macParser.removeErrorListeners();
macParser.addErrorListener(new ErrorListener(syntaxErrors, embeddedOffset));
try {
ProgContext prog = macParser.prog();
} catch (RecognitionException ex) {
ex.printStackTrace();
}
}
ClassFile.java 文件源码
项目:NBStudio
阅读 25
收藏 0
点赞 0
评论 0
@Override
public void save(byte[] data) {
ANTLRInputStream input = new ANTLRInputStream(new String(data, charset));
Lexer lexer = new clsLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
clsParser clsParser = new clsParser(tokens);
try {
clsParser.ProgContext prog = clsParser.prog();
ParseTreeWalker walker = new ParseTreeWalker();
CLSParserListerer4Save listener = new CLSParserListerer4Save(clsParser, db, cls);
walker.walk(listener, prog);
cls._save();
db.utilities().compileCacheClass(getName(), "cdfk-u");
} catch (CacheException | RecognitionException ex) {
System.out.println("SaveException: " + ex.getLocalizedMessage());
// ex.printStackTrace();
}
}
GrammarCompletionProvider.java 文件源码
项目:antlrworks2
阅读 20
收藏 0
点赞 0
评论 0
boolean isContext(Token token, int offset, boolean allowInStrings, boolean allowInActions) {
if (token == null) {
return false;
}
switch (token.getType()) {
case GrammarLexer.LEXER_CHAR_SET:
case GrammarLexer.ACTION_COMMENT:
return false;
case GrammarLexer.STRING_LITERAL:
case GrammarLexer.DOUBLE_QUOTE_STRING_LITERAL:
return allowInStrings;
case GrammarLexer.ARG_ACTION_WORD:
case GrammarLexer.ACTION_WORD:
return allowInActions;
case GrammarLexer.WS:
return true;
default:
return token.getChannel() == Lexer.DEFAULT_TOKEN_CHANNEL;
}
}
ANTLRAssistBehavior.java 文件源码
项目:gitplex-mit
阅读 17
收藏 0
点赞 0
评论 0
public ANTLRAssistBehavior(Class<? extends Parser> parserClass, Class<? extends Lexer> lexerClass,
String grammarFiles[], String tokenFile, String ruleName) {
this.lexerClass = lexerClass;
this.parserClass = parserClass;
codeAssist = new CodeAssist(lexerClass, grammarFiles, tokenFile) {
@Override
protected List<InputSuggestion> suggest(ParentedElement element, String matchWith) {
return ANTLRAssistBehavior.this.suggest(element, matchWith);
}
@Override
protected List<String> getHints(ParentedElement expectedElement, String matchWith) {
return ANTLRAssistBehavior.this.getHints(expectedElement, matchWith);
}
@Override
protected InputSuggestion wrapAsSuggestion(ParentedElement expectedElement, String suggestedLiteral,
boolean complete) {
return ANTLRAssistBehavior.this.wrapAsSuggestion(expectedElement, suggestedLiteral, complete);
}
@Override
protected int getEndOfMatch(ElementSpec spec, String content) {
return ANTLRAssistBehavior.this.getEndOfMatch(spec, content);
}
};
this.ruleName = ruleName;
}
ANTLRAssistBehavior.java 文件源码
项目:gitplex-mit
阅读 18
收藏 0
点赞 0
评论 0
private Constructor<? extends Lexer> getLexerCtor() {
if (lexerCtor == null) {
try {
lexerCtor = lexerClass.getConstructor(CharStream.class);
} catch (NoSuchMethodException | SecurityException e) {
throw new RuntimeException(e);
}
}
return lexerCtor;
}
LexerErrorListener.java 文件源码
项目:elasticsearch-jdbc
阅读 21
收藏 0
点赞 0
评论 0
@Override
public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line,
int charPositionInLine, String msg, RecognitionException e) {
String position = "line " + line + ", pos " + charPositionInLine;
String charText = "";
String hint = "";
if (recognizer != null && recognizer instanceof Lexer) {
Lexer lexer = (Lexer) recognizer;
String fullText = lexer.getInputStream().toString();
charText = String.valueOf(fullText.charAt(lexer.getCharIndex()));
hint = AntlrUtils.underlineError(fullText, charText, line, charPositionInLine);
}
throw new LexicalErrorException(position + " near " + charText + " : " + msg + "\n" + hint, e);
}
ProtobufEditorParser.java 文件源码
项目:protobuf-netbeans-plugin
阅读 19
收藏 0
点赞 0
评论 0
@Override
public void parse(
Snapshot snapshot,
Task task,
SourceModificationEvent event) throws ParseException {
Assert.notNull(snapshot);
this.snapshot = snapshot;
CharStream input =
new ANTLRInputStream(snapshot.getText().toString());
Lexer lexer = new ProtobufLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
ProtobufParser parser = new ProtobufParser(tokens);
parser.removeErrorListeners();
ParsingErrorListener listener = new ParsingErrorListener();
parser.addErrorListener(listener);
ParseTree tree = parser.proto();
// TODO def and ref phases
this.errors.clear();
this.errors.addAll(listener.errors());
}