org.antlr.v4.runtime.TokenSource Java Examples
The following examples show how to use
org.antlr.v4.runtime.TokenSource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StatementSplitter.java From presto with Apache License 2.0 | 6 votes |
public StatementSplitter(String sql, Set<String> delimiters) { TokenSource tokens = getLexer(sql, delimiters); ImmutableList.Builder<Statement> list = ImmutableList.builder(); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseParser.DELIMITER) { String statement = sb.toString().trim(); if (!statement.isEmpty()) { list.add(new Statement(statement, token.getText())); } sb = new StringBuilder(); } else { sb.append(token.getText()); } } this.completeStatements = list.build(); this.partialStatement = sb.toString().trim(); }
Example #2
Source File: StatementSplitter.java From presto with Apache License 2.0 | 6 votes |
public static String squeezeStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseLexer.WS) { sb.append(' '); } else { sb.append(token.getText()); } } return sb.toString().trim(); }
Example #3
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 6 votes |
public static String squeezeStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseLexer.WS) { sb.append(' '); } else { sb.append(token.getText()); } } return sb.toString().trim(); }
Example #4
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 6 votes |
public StatementSplitter(String sql, Set<String> delimiters) { TokenSource tokens = getLexer(sql, delimiters); ImmutableList.Builder<Statement> list = ImmutableList.builder(); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseParser.DELIMITER) { String statement = sb.toString().trim(); if (!statement.isEmpty()) { list.add(new Statement(statement, token.getText())); } sb = new StringBuilder(); } else { sb.append(token.getText()); } } this.completeStatements = list.build(); this.partialStatement = sb.toString().trim(); }
Example #5
Source File: PSITokenSource.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 6 votes |
/** Create an ANTLR Token from the current token type of the builder * then advance the builder to next token (which ultimately calls an * ANTLR lexer). The {@link ANTLRLexerAdaptor} creates tokens via * an ANTLR lexer but converts to {@link TokenIElementType} and here * we have to convert back to an ANTLR token using what info we * can get from the builder. We lose info such as the original channel. * So, whitespace and comments (typically hidden channel) will look like * real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()} * and {@link ParserDefinition#getCommentTokens()} to strip these before * our ANTLR parser sees them. */ @Override public Token nextToken() { ProgressIndicatorProvider.checkCanceled(); TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType(); int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF; int channel = Token.DEFAULT_CHANNEL; Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null); String text = builder.getTokenText(); int start = builder.getCurrentOffset(); int length = text != null ? text.length() : 0; int stop = start + length - 1; // PsiBuilder doesn't provide line, column info int line = 0; int charPositionInLine = 0; Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine); builder.advanceLexer(); // System.out.println("TOKEN: "+t); return t; }
Example #6
Source File: BoaErrorListener.java From compiler with Apache License 2.0 | 6 votes |
public void error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) { hasError = true; final String filename = tokens.getSourceName(); System.err.print(filename.substring(filename.lastIndexOf(File.separator) + 1) + ": compilation failed: "); System.err.print("Encountered " + kind + " error "); if (offendingSymbol != null) System.err.print("\"" + offendingSymbol + "\" "); System.err.print("at line " + line + ", "); if (length > 0) System.err.print("columns " + charPositionInLine + "-" + (charPositionInLine + length - 1)); else System.err.print("column " + charPositionInLine); System.err.println(". " + msg); underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length); if (e != null) for (final StackTraceElement st : e.getStackTrace()) System.err.println("\tat " + st); else System.err.println("\tat unknown stack"); }
Example #7
Source File: StatementSplitter.java From rainbow with Apache License 2.0 | 6 votes |
public StatementSplitter(String sql, Set<String> delimiters) { TokenSource tokens = getLexer(sql, delimiters); ImmutableList.Builder<Statement> list = ImmutableList.builder(); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseParser.DELIMITER) { String statement = sb.toString().trim(); if (!statement.isEmpty()) { list.add(new Statement(statement, token.getText())); } sb = new StringBuilder(); } else { sb.append(token.getText()); } } this.completeStatements = list.build(); this.partialStatement = sb.toString().trim(); }
Example #8
Source File: StatementSplitter.java From rainbow with Apache License 2.0 | 6 votes |
public static String squeezeStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseLexer.WS) { sb.append(' '); } else { sb.append(token.getText()); } } return sb.toString().trim(); }
Example #9
Source File: BoaErrorListener.java From compiler with Apache License 2.0 | 6 votes |
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) { final String input = tokens.getInputStream().toString() + "\n "; final String[] lines = input.split("\n"); final String errorLine = lines[line - 1]; System.err.println(errorLine.replaceAll("\t", " ")); int stop = Math.min(charPositionInLine, errorLine.length()); for (int i = 0; i < stop; i++) if (errorLine.charAt(i) == '\t') System.err.print(" "); else System.err.print(" "); int stop2 = Math.min(stop + length, errorLine.length()); for (int i = stop; i < stop2; i++) if (errorLine.charAt(i) == '\t') System.err.print("^^^^"); else System.err.print("^"); System.err.println(); }
Example #10
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 5 votes |
public static boolean isEmptyStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { return true; } if (token.getChannel() != Token.HIDDEN_CHANNEL) { return false; } } }
Example #11
Source File: StatementSplitter.java From rainbow with Apache License 2.0 | 5 votes |
public static boolean isEmptyStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { return true; } if (token.getChannel() != Token.HIDDEN_CHANNEL) { return false; } } }
Example #12
Source File: LocationRepointCommonTokenFactory.java From Concurnas with MIT License | 5 votes |
@Override public CommonToken create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) { return super.create( source, type, text, channel, start, stop, /*line +*/ lineoffset, charPositionInLine + coloffset); }
Example #13
Source File: InputHighlighter.java From presto with Apache License 2.0 | 5 votes |
@Override public AttributedString highlight(LineReader reader, String buffer) { TokenSource tokens = StatementSplitter.getLexer(buffer, STATEMENT_DELIMITERS); AttributedStringBuilder builder = new AttributedStringBuilder(); boolean error = false; while (true) { Token token = tokens.nextToken(); int type = token.getType(); if (type == Token.EOF) { break; } String text = token.getText(); if (error || (type == SqlBaseLexer.UNRECOGNIZED)) { error = true; builder.styled(ERROR_STYLE, text); } else if (isKeyword(text)) { builder.styled(KEYWORD_STYLE, text); } else if (isString(type)) { builder.styled(STRING_STYLE, text); } else if (isNumber(type)) { builder.styled(NUMBER_STYLE, text); } else if (isComment(type)) { builder.styled(COMMENT_STYLE, text); } else { builder.append(text); } } return builder.toAttributedString(); }
Example #14
Source File: StatementSplitter.java From presto with Apache License 2.0 | 5 votes |
public static boolean isEmptyStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { return true; } if (token.getChannel() != Token.HIDDEN_CHANNEL) { return false; } } }
Example #15
Source File: ErrorDetailsTest.java From batfish with Apache License 2.0 | 4 votes |
@Override public TokenSource getTokenSource() { return null; }
Example #16
Source File: UnrecognizedLineToken.java From batfish with Apache License 2.0 | 4 votes |
@Override public TokenSource getTokenSource() { return null; }
Example #17
Source File: CSSTokenFactory.java From jStyleParser with GNU Lesser General Public License v3.0 | 4 votes |
public CSSTokenFactory(Pair<TokenSource, CharStream> input, Lexer lexer, CSSLexerState ls, Class<? extends Lexer> lexerClass) { this.input = input; this.lexer = lexer; this.ls = ls; this.typeMapper = CSSToken.createDefaultTypeMapper(lexerClass); }
Example #18
Source File: TokenStreamSubset.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 4 votes |
public TokenStreamSubset(TokenSource tokenSource) { super(tokenSource); }
Example #19
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 4 votes |
private static TokenSource getLexer(String sql, Set<String> terminators) { requireNonNull(sql, "sql is null"); CharStream stream = new CaseInsensitiveStream(new ANTLRInputStream(sql)); return new DelimiterLexer(stream, terminators); }
Example #20
Source File: ANTLRParserAdaptor.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 4 votes |
@NotNull @Override public ASTNode parse(IElementType root, PsiBuilder builder) { ProgressIndicatorProvider.checkCanceled(); TokenSource source = new PSITokenSource(builder); TokenStream tokens = new CommonTokenStream(source); parser.setTokenStream(tokens); parser.setErrorHandler(new ErrorStrategyAdaptor()); // tweaks missing tokens parser.removeErrorListeners(); parser.addErrorListener(new SyntaxErrorListener()); // trap errors ParseTree parseTree = null; PsiBuilder.Marker rollbackMarker = builder.mark(); try { parseTree = parse(parser, root); } finally { rollbackMarker.rollbackTo(); } // Now convert ANTLR parser tree to PSI tree by mimicking subtree // enter/exit with mark/done calls. I *think* this creates their parse // tree (AST as they call it) when you call {@link PsiBuilder#getTreeBuilt} ANTLRParseTreeToPSIConverter listener = createListener(parser, root, builder); PsiBuilder.Marker rootMarker = builder.mark(); ParseTreeWalker.DEFAULT.walk(listener, parseTree); while (!builder.eof()) { ProgressIndicatorProvider.checkCanceled(); builder.advanceLexer(); } // NOTE: parse tree returned from parse will be the // usual ANTLR tree ANTLRParseTreeToPSIConverter will // convert that to the analogous jetbrains AST nodes // When parsing an entire file, the root IElementType // will be a IFileElementType. // // When trying to rename IDs and so on, you get a // dummy root and a type arg identifier IElementType. // This results in a weird tree that has for example // (ID (expr (primary ID))) with the ID IElementType // as a subtree root as well as the appropriate leaf // all the way at the bottom. The dummy ID root is a // CompositeElement and created by // ParserDefinition.createElement() despite having // being TokenIElementType. rootMarker.done(root); return builder.getTreeBuilt(); // calls the ASTFactory.createComposite() etc... }
Example #21
Source File: CodeBuffTokenStream.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
public CodeBuffTokenStream(TokenSource tokenSource) { super(tokenSource); }
Example #22
Source File: TokenStreamFactory.java From kalang with MIT License | 4 votes |
public static CommonTokenStream createTokenStream(TokenSource lexer) { return new CommonTokenStream(lexer); }
Example #23
Source File: StatementSplitter.java From rainbow with Apache License 2.0 | 4 votes |
private static TokenSource getLexer(String sql, Set<String> terminators) { requireNonNull(sql, "sql is null"); CharStream stream = new CaseInsensitiveStream(new ANTLRInputStream(sql)); return new DelimiterLexer(stream, terminators); }
Example #24
Source File: TokenSubStream.java From fuzzyc2cpg with GNU Lesser General Public License v3.0 | 4 votes |
public TokenSubStream(TokenSource tokenSource) { super(tokenSource); }
Example #25
Source File: StatementSplitter.java From presto with Apache License 2.0 | 4 votes |
public static TokenSource getLexer(String sql, Set<String> terminators) { requireNonNull(sql, "sql is null"); CharStream stream = new CaseInsensitiveStream(CharStreams.fromString(sql)); return new DelimiterLexer(stream, terminators); }