Java Code Examples for org.antlr.v4.runtime.IntStream#EOF
The following examples show how to use
org.antlr.v4.runtime.IntStream#EOF .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CaseInsensitiveInputStream.java From flink-cep-dsl with Apache License 2.0 | 6 votes |
@Override public int LA(int i) { if (i == 0) { return 0; } if (i < 0) { i++; if ((p + i - 1) < 0) { return IntStream.EOF; } } if ((p + i - 1) >= n) { return IntStream.EOF; } return la[p + i - 1]; }
Example 2
Source File: CharBufferStream.java From antsdb with GNU Lesser General Public License v3.0 | 6 votes |
@Override public int LA(int i) { if ( i==0 ) { return 0; // undefined } if ( i<0 ) { i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1] if ( (p+i-1) < 0 ) { return IntStream.EOF; // invalid; no char before first char } } if ( (p+i-1) >= n ) { return IntStream.EOF; } int ch = this.buf.get(p+i-1); return ch; }
Example 3
Source File: Session.java From antsdb with GNU Lesser General Public License v3.0 | 6 votes |
private void skipComments(CharStream cs) { int idx = cs.index(); if (cs.LA(1) == '/') { cs.consume(); if (cs.LA(1) == '*') { cs.consume(); for (;;) { int ch = cs.LA(1); cs.consume(); if (ch == IntStream.EOF) break; if (ch == '/') { break; } } return; } } cs.seek(idx); }
Example 4
Source File: CaseInsensitiveInputStream.java From mobi with GNU Affero General Public License v3.0 | 6 votes |
@Override public int LA(int i) { if (i == 0) { return 0; // undefined } if (i < 0) { i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1] if ((p + i - 1) < 0) { return IntStream.EOF; // invalid; no char before first char } } if ((p + i - 1) >= n) { return IntStream.EOF; } return lookaheadData[p + i - 1]; }
Example 5
Source File: TokenUtilTest.java From kalang with MIT License | 6 votes |
@Test public void test(){ CommonTokenStream ts = TokenStreamFactory.createTokenStream("class{ }"); int tokenSize = ts.size(); assertEquals(0, tokenSize); List<Token> tokens = ts.getTokens(); assertEquals(0, tokens.size()); ts.consume(); ts.consume(); assertEquals("}", ts.LT(1).getText()); assertEquals("{", ts.LT(-1).getText()); assertEquals("class", ts.LT(-2).getText()); //why is it 4? assertEquals(4, ts.size()); int consumeSize = 2; while(ts.LA(1)!=IntStream.EOF){ ts.consume(); consumeSize++; } tokens = ts.getTokens(); assertEquals(5, tokens.size()); assertEquals(3, consumeSize); }
Example 6
Source File: CaseInsensitiveStream.java From presto with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 7
Source File: CaseInsensitiveStream.java From crate with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 8
Source File: BatfishLexerRecoveryStrategy.java From batfish with Apache License 2.0 | 5 votes |
/** * Wrap current unmatchable char up to next char in provided separator chars in a {@link * BatfishLexer#UNMATCHABLE_TOKEN} and emit it. */ public void recover() { // Always recover in the default mode -- otherwise, the parser can get stuck in an infinite // loop, e.g. if separator is not valid in the current mode. _lexer._mode = Lexer.DEFAULT_MODE; int tokenStartMarker = _lexer._input.mark(); try { _lexer._token = null; _lexer._channel = Token.DEFAULT_CHANNEL; _lexer._tokenStartCharIndex = _lexer._input.index(); _lexer._tokenStartCharPositionInLine = _lexer.getInterpreter().getCharPositionInLine(); _lexer._tokenStartLine = _lexer.getInterpreter().getLine(); _lexer._text = null; _lexer._type = BatfishLexer.UNMATCHABLE_TOKEN; for (int nextChar = _lexer._input.LA(1); !_separatorChars.contains(nextChar); nextChar = _lexer._input.LA(1)) { if (nextChar == IntStream.EOF) { _lexer._hitEOF = true; _lexer.emitEOF(); return; } _lexer.getInterpreter().consume(_lexer._input); } _lexer.emit(); } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _lexer._input.release(tokenStartMarker); } }
Example 9
Source File: CaseInsensitiveStream.java From macrobase with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 10
Source File: CaseInsensitiveStream.java From rainbow with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 11
Source File: CaseInsensitiveStream.java From sylph with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 12
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 5 votes |
private int nextChar(CharStream inputTape) { int ch = currentChar(inputTape); if (ch != IntStream.EOF) { inputTape.consume(); ch = currentChar(inputTape); } return ch; }
Example 13
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 5 votes |
private int currentChar(CharStream inputTape) { int index = inputTape.index(); int ch; if (index < inputTape.size()) { ch = inputTape.getText(Interval.of(index, index + 1)).charAt(0); } else { ch = IntStream.EOF; } return ch; }
Example 14
Source File: CaseInsensitiveStream.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example 15
Source File: DelimiterLexer.java From rainbow with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
Example 16
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 4 votes |
private boolean isEndBlockComment(int ch) { return ch == '*' && inputTape.LA(+2) == '/' || ch == IntStream.EOF; }
Example 17
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 4 votes |
private boolean isEndLineComment(int ch) { return ch == '\n' || ch == IntStream.EOF; }
Example 18
Source File: DelimiterLexer.java From macrobase with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
Example 19
Source File: DelimiterLexer.java From presto with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }