Java Code Examples for org.antlr.v4.runtime.Token#EOF
The following examples show how to use
org.antlr.v4.runtime.Token#EOF .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 6 votes |
public StatementSplitter(String sql, Set<String> delimiters) { TokenSource tokens = getLexer(sql, delimiters); ImmutableList.Builder<Statement> list = ImmutableList.builder(); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseParser.DELIMITER) { String statement = sb.toString().trim(); if (!statement.isEmpty()) { list.add(new Statement(statement, token.getText())); } sb = new StringBuilder(); } else { sb.append(token.getText()); } } this.completeStatements = list.build(); this.partialStatement = sb.toString().trim(); }
Example 2
Source File: AntlrContextTest.java From sonar-tsql-plugin with GNU General Public License v3.0 | 6 votes |
@Test public void compareLinesWithAntrl() throws Throwable { String s = "select " + "*" + "from dbo.test"; AntlrContext result = AntlrUtils.getRequest(s); for (Token t : result.getStream().getTokens()) { if (t.getType() == Token.EOF) { continue; } int[] start = result.getLineAndColumn(t.getStartIndex()); int[] end = result.getLineAndColumn(t.getStopIndex()); Assert.assertNotNull(start); Assert.assertNotNull(end); Assert.assertEquals(t.getLine(), start[0]); Assert.assertEquals(t.getCharPositionInLine(), start[1]); } }
Example 3
Source File: Grammar.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
/** * Gets the name by which a token can be referenced in the generated code. * For tokens defined in a {@code tokens{}} block or via a lexer rule, this * is the declared name of the token. For token types generated by the use * of a string literal within a parser rule of a combined grammar, this is * the automatically generated token type which includes the * {@link #AUTO_GENERATED_TOKEN_NAME_PREFIX} prefix. For types which are not * associated with a defined token, this method returns * {@link #INVALID_TOKEN_NAME}. * * @param ttype The token type. * @return The name of the token with the specified type. */ public String getTokenName(int ttype) { // inside any target's char range and is lexer grammar? if ( isLexer() && ttype >= Lexer.MIN_CHAR_VALUE && ttype <= Lexer.MAX_CHAR_VALUE ) { return CharSupport.getANTLRCharLiteralForChar(ttype); } if ( ttype==Token.EOF ) { return "EOF"; } if (ttype >= 0 && ttype < typeToTokenList.size() && typeToTokenList.get(ttype) != null) { return typeToTokenList.get(ttype); } return INVALID_TOKEN_NAME; }
Example 4
Source File: Trainer.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public void computeFeatureVectorForToken(int i) { Token curToken = tokens.get(i); if ( curToken.getType()==Token.EOF ) return; int[] features = getFeatures(i); int injectNL_WS = getInjectWSCategory(tokens, i); int aligned = -1; // "don't care" if ( (injectNL_WS&0xFF)==CAT_INJECT_NL ) { TerminalNode node = tokenToNodeMap.get(curToken); aligned = getAlignmentCategory(doc, node, indentSize); } // track feature -> injectws, align decisions for token i corpus.addExemplar(doc, features, injectNL_WS, aligned); }
Example 5
Source File: CQLErrorStrategy.java From PoseidonX with Apache License 2.0 | 6 votes |
@NotNull private String getText(TokenStream tokens, Interval interval) { int start = interval.a; int stop = interval.b; if (start < 0 || stop < 0) return ""; if (stop >= tokens.size()) stop = tokens.size() - 1; StringBuilder buf = new StringBuilder(); for (int i = start; i <= stop; i++) { Token t = tokens.get(i); if (t.getType() == Token.EOF) break; buf.append(t.getText()); if (i != stop) { buf.append(" "); } } return buf.toString(); }
Example 6
Source File: StatementSplitter.java From macrobase with Apache License 2.0 | 6 votes |
public static String squeezeStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseLexer.WS) { sb.append(' '); } else { sb.append(token.getText()); } } return sb.toString().trim(); }
Example 7
Source File: PSITokenSource.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 6 votes |
/** Create an ANTLR Token from the current token type of the builder * then advance the builder to next token (which ultimately calls an * ANTLR lexer). The {@link ANTLRLexerAdaptor} creates tokens via * an ANTLR lexer but converts to {@link TokenIElementType} and here * we have to convert back to an ANTLR token using what info we * can get from the builder. We lose info such as the original channel. * So, whitespace and comments (typically hidden channel) will look like * real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()} * and {@link ParserDefinition#getCommentTokens()} to strip these before * our ANTLR parser sees them. */ @Override public Token nextToken() { ProgressIndicatorProvider.checkCanceled(); TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType(); int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF; int channel = Token.DEFAULT_CHANNEL; Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null); String text = builder.getTokenText(); int start = builder.getCurrentOffset(); int length = text != null ? text.length() : 0; int stop = start + length - 1; // PsiBuilder doesn't provide line, column info int line = 0; int charPositionInLine = 0; Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine); builder.advanceLexer(); // System.out.println("TOKEN: "+t); return t; }
Example 8
Source File: ParseTreeToXml.java From netbeans with Apache License 2.0 | 5 votes |
@Override public Document visitTerminal(TerminalNode node) { if (node.getSymbol().getType() != Token.EOF) { copyAttrs( getTerminalNodeElement(node), node); } super.visitTerminal(node); return doc; }
Example 9
Source File: XpathParser.java From JsoupXpath with Apache License 2.0 | 5 votes |
public final AbbreviatedStepContext abbreviatedStep() throws RecognitionException { AbbreviatedStepContext _localctx = new AbbreviatedStepContext(_ctx, getState()); enterRule(_localctx, 16, RULE_abbreviatedStep); int _la; try { enterOuterAlt(_localctx, 1); { setState(103); _la = _input.LA(1); if ( !(_la==DOT || _la==DOTDOT) ) { _errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } } } catch (RecognitionException re) { _localctx.exception = re; _errHandler.reportError(this, re); _errHandler.recover(this, re); } finally { exitRule(); } return _localctx; }
Example 10
Source File: XpathParser.java From JsoupXpath with Apache License 2.0 | 5 votes |
public final NCNameContext nCName() throws RecognitionException { NCNameContext _localctx = new NCNameContext(_ctx, getState()); enterRule(_localctx, 52, RULE_nCName); int _la; try { enterOuterAlt(_localctx, 1); { setState(224); _la = _input.LA(1); if ( !(_la==AxisName || _la==NCName) ) { _errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } } } catch (RecognitionException re) { _localctx.exception = re; _errHandler.reportError(this, re); _errHandler.recover(this, re); } finally { exitRule(); } return _localctx; }
Example 11
Source File: StatementSplitter.java From rainbow with Apache License 2.0 | 5 votes |
public static boolean isEmptyStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { return true; } if (token.getChannel() != Token.HIDDEN_CHANNEL) { return false; } } }
Example 12
Source File: Dbg.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** Compute a document difference metric 0-1.0 between two documents that * are identical other than (likely) the whitespace and comments. * * 1.0 means the docs are maximally different and 0 means docs are identical. * * The Levenshtein distance between the docs counts only * whitespace diffs as the non-WS content is identical. * Levenshtein distance is bounded by 0..max(len(doc1),len(doc2)) so * we normalize the distance by dividing by max WS count. * * TODO: can we simplify this to a simple walk with two * cursors through the original vs formatted counting * mismatched whitespace? real text are like anchors. */ public static double docDiff(String original, String formatted, Class<? extends Lexer> lexerClass) throws Exception { // Grammar must strip all but real tokens and whitespace (and put that on hidden channel) CodeBuffTokenStream original_tokens = Tool.tokenize(original, lexerClass); // String s = original_tokens.getText(); CodeBuffTokenStream formatted_tokens = Tool.tokenize(formatted, lexerClass); // String t = formatted_tokens.getText(); // walk token streams and examine whitespace in between tokens int i = -1; int ws_distance = 0; int original_ws = 0; int formatted_ws = 0; while ( true ) { Token ot = original_tokens.LT(i); // TODO: FIX THIS! can't use LT() if ( ot==null || ot.getType()==Token.EOF ) break; List<Token> ows = original_tokens.getHiddenTokensToLeft(ot.getTokenIndex()); original_ws += tokenText(ows).length(); Token ft = formatted_tokens.LT(i); // TODO: FIX THIS! can't use LT() if ( ft==null || ft.getType()==Token.EOF ) break; List<Token> fws = formatted_tokens.getHiddenTokensToLeft(ft.getTokenIndex()); formatted_ws += tokenText(fws).length(); ws_distance += whitespaceEditDistance(tokenText(ows), tokenText(fws)); i++; } // it's probably ok to ignore ws diffs after last real token int max_ws = Math.max(original_ws, formatted_ws); double normalized_ws_distance = ((float) ws_distance)/max_ws; return normalized_ws_distance; }
Example 13
Source File: PSIElementTypeFactory.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 5 votes |
public static TokenIElementType getEofElementType(Language language) { TokenIElementType result = eofIElementTypesCache.get(language); if (result == null) { result = new TokenIElementType(Token.EOF, "EOF", language); eofIElementTypesCache.put(language, result); } return result; }
Example 14
Source File: BeetlAntlrErrorStrategy.java From beetl2.0 with BSD 3-Clause "New" or "Revised" License | 5 votes |
protected void reportNoViableAlternative(@NotNull Parser recognizer, @NotNull NoViableAltException e) { TokenStream tokens = recognizer.getInputStream(); String input; if (tokens instanceof TokenStream) { if (e.getStartToken().getType() == Token.EOF) input = "<文件尾>"; else input = tokens.getText(e.getStartToken(), e.getOffendingToken()); } else { input = "<未知输入>"; } BeetlException exception = null; if(keys.contains(e.getOffendingToken().getText())){ exception = new BeetlParserException(BeetlException.PARSER_VIABLE_ERROR, "不允许"+e.getOffendingToken().getText()+"关键出现在这里"+":"+escapeWSAndQuote(input), e); }else{ exception = new BeetlParserException(BeetlException.PARSER_VIABLE_ERROR, escapeWSAndQuote(input), e); } // String msg = "no viable alternative at input " + escapeWSAndQuote(input); exception.pushToken(this.getGrammarToken(e.getOffendingToken())); throw exception; }
Example 15
Source File: Grammar.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public void setTokenForType(int ttype, String text) { if (ttype == Token.EOF) { // ignore EOF, it will be reported as an error separately return; } if ( ttype>=typeToTokenList.size() ) { Utils.setSize(typeToTokenList, ttype+1); } String prevToken = typeToTokenList.get(ttype); if ( prevToken==null || prevToken.charAt(0)=='\'' ) { // only record if nothing there before or if thing before was a literal typeToTokenList.set(ttype, text); } }
Example 16
Source File: ANTLRLexerAdaptor.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 5 votes |
@Nullable public IElementType getTokenType(int antlrTokenType) { if ( antlrTokenType==Token.EOF ) { // return null when lexing is finished return null; } return tokenElementTypes.get(antlrTokenType); }
Example 17
Source File: ParseTreePrettyPrinter.java From batfish with Apache License 2.0 | 4 votes |
@Override public void visitTerminal(TerminalNode ctx) { String nodeText = BatfishCombinedParser.escape(ctx.getText()); _ptSentences.getSentences().add(""); for (int i = 0; i < _indent; i++) { _ptSentences.appendToLastSentence(" "); } Token t = ctx.getSymbol(); int tokenType = t.getType(); int modeAsInt = _combinedParser.getTokenMode(t); String mode; if (modeAsInt == -1) { mode = "<MANUAL/UNKNOWN>"; } else { mode = _combinedParser.getLexer().getModeNames()[modeAsInt]; } String tokenName = (tokenType == Token.EOF) ? "EOF" : _vocabulary.getSymbolicName(tokenType); // If the parent context has a named field pointing to the token, it is because the user // has a defined name. Add it to the output message. for (Field f : ctx.getParent().getClass().getFields()) { if (f.getName().equals("start") || f.getName().equals("stop") || f.getName().startsWith("_t") || f.getName().equals(tokenName)) { continue; } try { if (f.get(ctx.getParent()) == ctx.getSymbol()) { _ptSentences.appendToLastSentence(f.getName() + " = "); } } catch (Throwable thrown) { // Ignore the error and continue. } } if (tokenType == Token.EOF) { _ptSentences.appendToLastSentence(tokenName + ":" + nodeText); } else { _ptSentences.appendToLastSentence(tokenName + ":'" + nodeText + "'"); } if (!mode.equals("DEFAULT_MODE")) { _ptSentences.appendToLastSentence(" <== mode:" + mode); } if (_printLineNumbers) { _ptSentences.appendToLastSentence(String.format(" line:%s", _combinedParser.getLine(t))); } }
Example 18
Source File: XpathParser.java From JsoupXpath with Apache License 2.0 | 4 votes |
public final PathExprNoRootContext pathExprNoRoot() throws RecognitionException { PathExprNoRootContext _localctx = new PathExprNoRootContext(_ctx, getState()); enterRule(_localctx, 26, RULE_pathExprNoRoot); int _la; try { setState(147); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { setState(141); locationPath(); } break; case 2: enterOuterAlt(_localctx, 2); { setState(142); filterExpr(); setState(145); _errHandler.sync(this); _la = _input.LA(1); if (_la==PATHSEP || _la==ABRPATH) { { setState(143); ((PathExprNoRootContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !(_la==PATHSEP || _la==ABRPATH) ) { ((PathExprNoRootContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(144); relativeLocationPath(); } } } break; } } catch (RecognitionException re) { _localctx.exception = re; _errHandler.reportError(this, re); _errHandler.recover(this, re); } finally { exitRule(); } return _localctx; }
Example 19
Source File: XpathParser.java From JsoupXpath with Apache License 2.0 | 4 votes |
public final AdditiveExprContext additiveExpr() throws RecognitionException { AdditiveExprContext _localctx = new AdditiveExprContext(_ctx, getState()); enterRule(_localctx, 38, RULE_additiveExpr); int _la; try { enterOuterAlt(_localctx, 1); { setState(188); multiplicativeExpr(); setState(193); _errHandler.sync(this); _la = _input.LA(1); while (_la==MINUS || _la==PLUS) { { { setState(189); ((AdditiveExprContext)_localctx).op = _input.LT(1); _la = _input.LA(1); if ( !(_la==MINUS || _la==PLUS) ) { ((AdditiveExprContext)_localctx).op = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } setState(190); multiplicativeExpr(); } } setState(195); _errHandler.sync(this); _la = _input.LA(1); } } } catch (RecognitionException re) { _localctx.exception = re; _errHandler.reportError(this, re); _errHandler.recover(this, re); } finally { exitRule(); } return _localctx; }
Example 20
Source File: SwiftSupport.java From swift-js-transpiler with MIT License | 4 votes |
public static boolean isRightOperatorWS(Token t) { return rightWS.get(t.getType()) || t.getType()==Token.EOF; }