org.antlr.v4.runtime.IntStream Java Examples
The following examples show how to use
org.antlr.v4.runtime.IntStream.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CaseInsensitiveInputStream.java From flink-cep-dsl with Apache License 2.0 | 6 votes |
@Override public int LA(int i) { if (i == 0) { return 0; } if (i < 0) { i++; if ((p + i - 1) < 0) { return IntStream.EOF; } } if ((p + i - 1) >= n) { return IntStream.EOF; } return la[p + i - 1]; }
Example #2
Source File: ANTLRLexerAdaptor.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 6 votes |
@Override public void start(CharSequence buffer, int startOffset, int endOffset, int initialState) { this.buffer = buffer; this.endOffset = endOffset; CharStream in = new CharSequenceCharStream(buffer, endOffset, IntStream.UNKNOWN_SOURCE_NAME); in.seek(startOffset); ANTLRLexerState state; if (startOffset == 0 && initialState == 0) { state = getInitialState(); } else { state = toLexerState(initialState); } applyLexerState(in, state); advance(); }
Example #3
Source File: CharBufferStream.java From antsdb with GNU Lesser General Public License v3.0 | 6 votes |
@Override public int LA(int i) { if ( i==0 ) { return 0; // undefined } if ( i<0 ) { i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1] if ( (p+i-1) < 0 ) { return IntStream.EOF; // invalid; no char before first char } } if ( (p+i-1) >= n ) { return IntStream.EOF; } int ch = this.buf.get(p+i-1); return ch; }
Example #4
Source File: Session.java From antsdb with GNU Lesser General Public License v3.0 | 6 votes |
private void skipComments(CharStream cs) { int idx = cs.index(); if (cs.LA(1) == '/') { cs.consume(); if (cs.LA(1) == '*') { cs.consume(); for (;;) { int ch = cs.LA(1); cs.consume(); if (ch == IntStream.EOF) break; if (ch == '/') { break; } } return; } } cs.seek(idx); }
Example #5
Source File: CaseInsensitiveInputStream.java From mobi with GNU Affero General Public License v3.0 | 6 votes |
@Override public int LA(int i) { if (i == 0) { return 0; // undefined } if (i < 0) { i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1] if ((p + i - 1) < 0) { return IntStream.EOF; // invalid; no char before first char } } if ((p + i - 1) >= n) { return IntStream.EOF; } return lookaheadData[p + i - 1]; }
Example #6
Source File: TokenUtilTest.java From kalang with MIT License | 6 votes |
@Test public void test(){ CommonTokenStream ts = TokenStreamFactory.createTokenStream("class{ }"); int tokenSize = ts.size(); assertEquals(0, tokenSize); List<Token> tokens = ts.getTokens(); assertEquals(0, tokens.size()); ts.consume(); ts.consume(); assertEquals("}", ts.LT(1).getText()); assertEquals("{", ts.LT(-1).getText()); assertEquals("class", ts.LT(-2).getText()); //why is it 4? assertEquals(4, ts.size()); int consumeSize = 2; while(ts.LA(1)!=IntStream.EOF){ ts.consume(); consumeSize++; } tokens = ts.getTokens(); assertEquals(5, tokens.size()); assertEquals(3, consumeSize); }
Example #7
Source File: CaseInsensitiveStream.java From presto with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #8
Source File: CaseInsensitiveStream.java From crate with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #9
Source File: BatfishLexerRecoveryStrategy.java From batfish with Apache License 2.0 | 5 votes |
/** * Wrap current unmatchable char up to next char in provided separator chars in a {@link * BatfishLexer#UNMATCHABLE_TOKEN} and emit it. */ public void recover() { // Always recover in the default mode -- otherwise, the parser can get stuck in an infinite // loop, e.g. if separator is not valid in the current mode. _lexer._mode = Lexer.DEFAULT_MODE; int tokenStartMarker = _lexer._input.mark(); try { _lexer._token = null; _lexer._channel = Token.DEFAULT_CHANNEL; _lexer._tokenStartCharIndex = _lexer._input.index(); _lexer._tokenStartCharPositionInLine = _lexer.getInterpreter().getCharPositionInLine(); _lexer._tokenStartLine = _lexer.getInterpreter().getLine(); _lexer._text = null; _lexer._type = BatfishLexer.UNMATCHABLE_TOKEN; for (int nextChar = _lexer._input.LA(1); !_separatorChars.contains(nextChar); nextChar = _lexer._input.LA(1)) { if (nextChar == IntStream.EOF) { _lexer._hitEOF = true; _lexer.emitEOF(); return; } _lexer.getInterpreter().consume(_lexer._input); } _lexer.emit(); } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _lexer._input.release(tokenStartMarker); } }
Example #10
Source File: CaseInsensitiveStream.java From macrobase with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #11
Source File: LexerATNFactory.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
@Override public Handle tokenRef(TerminalAST node) { // Ref to EOF in lexer yields char transition on -1 if ( node.getText().equals("EOF") ) { ATNState left = newState(node); ATNState right = newState(node); left.addTransition(new AtomTransition(right, IntStream.EOF)); return new Handle(left, right); } return _ruleRef(node); }
Example #12
Source File: CaseInsensitiveStream.java From rainbow with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #13
Source File: CaseInsensitiveStream.java From sylph with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #14
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 5 votes |
private int nextChar(CharStream inputTape) { int ch = currentChar(inputTape); if (ch != IntStream.EOF) { inputTape.consume(); ch = currentChar(inputTape); } return ch; }
Example #15
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 5 votes |
private int currentChar(CharStream inputTape) { int index = inputTape.index(); int ch; if (index < inputTape.size()) { ch = inputTape.getText(Interval.of(index, index + 1)).charAt(0); } else { ch = IntStream.EOF; } return ch; }
Example #16
Source File: CaseInsensitiveStream.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Override public int LA(int i) { int result = stream.LA(i); switch (result) { case 0: case IntStream.EOF: return result; default: return Character.toUpperCase(result); } }
Example #17
Source File: LocatableTest.java From gyro with Apache License 2.0 | 4 votes |
@Test void getFile() { assertThat(new TestLocatable(stream, 0, 0, 0, 0).getFile()) .isEqualTo(IntStream.UNKNOWN_SOURCE_NAME); }
Example #18
Source File: DelimiterLexer.java From rainbow with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
Example #19
Source File: GyroCharStreamTest.java From gyro with Apache License 2.0 | 4 votes |
@Test void getSourceName() { assertThat(stream.getSourceName()).isEqualTo(IntStream.UNKNOWN_SOURCE_NAME); }
Example #20
Source File: GyroCharStream.java From gyro with Apache License 2.0 | 4 votes |
private CharStream createStream(List<String> lines, String file) { return CharStreams.fromString( String.join("\n", lines), file != null ? file : IntStream.UNKNOWN_SOURCE_NAME); }
Example #21
Source File: Locatable.java From gyro with Apache License 2.0 | 4 votes |
default String getFile() { return Optional.ofNullable(getStream()) .map(GyroCharStream::getSourceName) .orElse(IntStream.UNKNOWN_SOURCE_NAME); }
Example #22
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 4 votes |
private boolean isEndBlockComment(int ch) { return ch == '*' && inputTape.LA(+2) == '/' || ch == IntStream.EOF; }
Example #23
Source File: ContextDependentFEELLexer.java From jdmn with Apache License 2.0 | 4 votes |
private boolean isEndLineComment(int ch) { return ch == '\n' || ch == IntStream.EOF; }
Example #24
Source File: DelimiterLexer.java From macrobase with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
Example #25
Source File: BatfishANTLRErrorStrategy.java From batfish with Apache License 2.0 | 4 votes |
public BatfishRecognitionException( Recognizer<?, ?> recognizer, IntStream input, ParserRuleContext ctx) { super(null, recognizer, input, ctx); }
Example #26
Source File: DelimiterLexer.java From presto with Apache License 2.0 | 4 votes |
@Override public Token nextToken() { if (_input == null) { throw new IllegalStateException("nextToken requires a non-null input stream."); } // Mark start location in char stream so unbuffered streams are // guaranteed at least have text of current token int tokenStartMarker = _input.mark(); try { outer: while (true) { if (_hitEOF) { emitEOF(); return _token; } _token = null; _channel = Token.DEFAULT_CHANNEL; _tokenStartCharIndex = _input.index(); _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine(); _tokenStartLine = getInterpreter().getLine(); _text = null; do { _type = Token.INVALID_TYPE; int ttype = -1; // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit // added to match the delimiters before we attempt to match the token boolean found = false; for (String terminator : delimiters) { if (match(terminator)) { ttype = SqlBaseParser.DELIMITER; found = true; break; } } if (!found) { try { ttype = getInterpreter().match(_input, _mode); } catch (LexerNoViableAltException e) { notifyListeners(e); // report error recover(e); ttype = SKIP; } } if (_input.LA(1) == IntStream.EOF) { _hitEOF = true; } if (_type == Token.INVALID_TYPE) { _type = ttype; } if (_type == SKIP) { continue outer; } } while (_type == MORE); if (_token == null) { emit(); } return _token; } } finally { // make sure we release marker after match or // unbuffered char stream will keep buffering _input.release(tokenStartMarker); } }
Example #27
Source File: BatfishLexerRecoveryStrategy.java From batfish with Apache License 2.0 | 2 votes |
/** * Construct a {@link BatfishLexerRecoveryStrategy} for given {@code lexer} using {@code * separatorChars} to mark end of invalid chars to be consumed and discarded. * * @param lexer The {@link BatfishLexer} using this strategy * @param separatorChars The chars used to mark the end (non-inclusive) of any string of invalid * chars */ public BatfishLexerRecoveryStrategy(BatfishLexer lexer, Set<Integer> separatorChars) { _lexer = lexer; _separatorChars = ImmutableSet.copyOf(Sets.union(separatorChars, Collections.singleton(IntStream.EOF))); }