org.antlr.v4.runtime.misc.Pair Java Examples
The following examples show how to use
org.antlr.v4.runtime.misc.Pair.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IdentifyOversizeLists.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public void visitNonSingletonWithSeparator(ParserRuleContext ctx, List<? extends ParserRuleContext> siblings, Token separator) { boolean oversize = isOversizeList(ctx, siblings, separator); Map<Token, Pair<Boolean, Integer>> tokenInfo = getInfoAboutListTokens(ctx, tokens, tokenToNodeMap, siblings, oversize); // copy sibling list info for associated tokens into overall list // but don't overwrite existing so that most general (largest construct) // list information is use/retained (i.e., not overwritten). for (Token t : tokenInfo.keySet()) { if ( !tokenToListInfo.containsKey(t) ) { tokenToListInfo.put(t, tokenInfo.get(t)); } } }
Example #2
Source File: SqlParser.java From crate with Apache License 2.0 | 6 votes |
@Override public void exitNonReserved(SqlBaseParser.NonReservedContext context) { // replace nonReserved words with IDENT tokens context.getParent().removeLastChild(); Token token = (Token) context.getChild(0).getPayload(); context.getParent().addChild(new TerminalNodeImpl( new CommonToken( new Pair<>(token.getTokenSource(), token.getInputStream()), SqlBaseLexer.IDENTIFIER, token.getChannel(), token.getStartIndex(), token.getStopIndex()) ) ); }
Example #3
Source File: PreviewPanel.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 6 votes |
private static Pair<UberTreeViewer,JPanel> createParseTreePanel() { // wrap tree and slider in panel JPanel treePanel = new JPanel(new BorderLayout(0, 0)); treePanel.setBackground(JBColor.white); final UberTreeViewer viewer = isTrackpadZoomSupported ? new TrackpadZoomingTreeView(null, null, false) : new UberTreeViewer(null, null, false); JSlider scaleSlider = createTreeViewSlider(viewer); // Wrap tree viewer component in scroll pane JScrollPane scrollPane = new JBScrollPane(viewer); // use Intellij's scroller treePanel.add(scrollPane, BorderLayout.CENTER); treePanel.add(scaleSlider, BorderLayout.SOUTH); return new Pair<>(viewer, treePanel); }
Example #4
Source File: PreviewPanel.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 6 votes |
private JTabbedPane createParseTreeAndProfileTabbedPanel() { JBTabbedPane tabbedPane = new JBTabbedPane(); LOG.info("createParseTreePanel" + " " + project.getName()); Pair<UberTreeViewer, JPanel> pair = createParseTreePanel(); treeViewer = pair.a; setupContextMenu(treeViewer); tabbedPane.addTab("Parse tree", pair.b); hierarchyViewer = new HierarchyViewer(null, this); tabbedPane.addTab("Hierarchy", hierarchyViewer); profilerPanel = new ProfilerPanel(project, this); tabbedPane.addTab("Profiler", profilerPanel.getComponent()); return tabbedPane; }
Example #5
Source File: Namespace.java From mumbler with GNU General Public License v3.0 | 6 votes |
public Pair<Integer, FrameSlot> getIdentifier(String id) { int depth = 0; Namespace current = this; FrameSlot slot = current.frameDescriptor.findFrameSlot(id); while (slot == null) { depth++; current = current.parent; if (current == null) { return new Pair<>(LEVEL_UNDEFINED, null); } slot = current.frameDescriptor.findFrameSlot(id); } if (current.parent == null) { return new Pair<>(LEVEL_GLOBAL, slot); } return new Pair<>(depth, slot); }
Example #6
Source File: Converter.java From mumbler with GNU General Public License v3.0 | 6 votes |
public SymbolNode convert(SymbolSyntax syntax, Namespace ns) { SymbolNode node; MumblerSymbol sym = syntax.getValue(); Pair<Integer, FrameSlot> pair = ns.getIdentifier(sym.name); if (pair.a == Namespace.LEVEL_UNDEFINED) { throwReaderException(sym.name + " undefined", syntax, ns); return null; } else if (pair.a == 0) { node = LocalSymbolNodeGen.create(pair.b); } else if (pair.a == Namespace.LEVEL_GLOBAL) { node = GlobalSymbolNodeGen.create(pair.b, this.context.getGlobalFrame()); } else { node = ClosureSymbolNodeGen.create(pair.b, pair.a); } node.setSourceSection(syntax.getSourceSection()); return node; }
Example #7
Source File: SqlParser.java From macrobase with Apache License 2.0 | 6 votes |
@Override public void exitNonReserved(SqlBaseParser.NonReservedContext context) { // we can't modify the tree during rule enter/exit event handling unless we're dealing with a terminal. // Otherwise, ANTLR gets confused an fires spurious notifications. if (!(context.getChild(0) instanceof TerminalNode)) { int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); throw new AssertionError( "nonReserved can only contain tokens. Found nested rule: " + ruleNames .get(rule)); } // replace nonReserved words with IDENT tokens context.getParent().removeLastChild(); Token token = (Token) context.getChild(0).getPayload(); context.getParent().addChild(new CommonToken( new Pair<>(token.getTokenSource(), token.getInputStream()), SqlBaseLexer.IDENTIFIER, token.getChannel(), token.getStartIndex(), token.getStopIndex())); }
Example #8
Source File: Antlr4ParserTest.java From openCypher with Apache License 2.0 | 6 votes |
@Test public void shouldReportInvalidCypher() throws FileNotFoundException, URISyntaxException { List<String> queries = getQueries( "/cypher-error.txt" ); Stream<Pair<Boolean,String>> results = queries.stream().map( query -> { SyntaxError lexerListener = new SyntaxError(); SyntaxError parserListener = new SyntaxError(); Antlr4TestUtils.parseLegacyWithListeners( query, lexerListener, parserListener ); return new Pair<>( parserListener.errorFound, query ); } ); results.forEach( r -> { if ( !r.a ) { fail( "Expected query to raise syntax error, but it did not: " + r.b ); } } ); }
Example #9
Source File: PSITokenSource.java From antlr4-intellij-adaptor with BSD 2-Clause "Simplified" License | 6 votes |
/** Create an ANTLR Token from the current token type of the builder * then advance the builder to next token (which ultimately calls an * ANTLR lexer). The {@link ANTLRLexerAdaptor} creates tokens via * an ANTLR lexer but converts to {@link TokenIElementType} and here * we have to convert back to an ANTLR token using what info we * can get from the builder. We lose info such as the original channel. * So, whitespace and comments (typically hidden channel) will look like * real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()} * and {@link ParserDefinition#getCommentTokens()} to strip these before * our ANTLR parser sees them. */ @Override public Token nextToken() { ProgressIndicatorProvider.checkCanceled(); TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType(); int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF; int channel = Token.DEFAULT_CHANNEL; Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null); String text = builder.getTokenText(); int start = builder.getCurrentOffset(); int length = text != null ? text.length() : 0; int stop = start + length - 1; // PsiBuilder doesn't provide line, column info int line = 0; int charPositionInLine = 0; Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine); builder.advanceLexer(); // System.out.println("TOKEN: "+t); return t; }
Example #10
Source File: VisitorFile.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public VisitorFile(OutputModelFactory factory, String fileName) { super(factory, fileName); Grammar g = factory.getGrammar(); parserName = g.getRecognizerName(); grammarName = g.name; for (Rule r : g.rules.values()) { Map<String, List<Pair<Integer, AltAST>>> labels = r.getAltLabels(); if ( labels!=null ) { for (Map.Entry<String, List<Pair<Integer, AltAST>>> pair : labels.entrySet()) { visitorNames.add(pair.getKey()); visitorLabelRuleNames.put(pair.getKey(), r.name); } } else { // if labels, must label all. no need for generic rule visitor then visitorNames.add(r.name); } } ActionAST ast = g.namedActions.get("header"); if ( ast!=null ) header = new Action(factory, ast); genPackage = factory.getGrammar().tool.genPackage; }
Example #11
Source File: ListenerFile.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public ListenerFile(OutputModelFactory factory, String fileName) { super(factory, fileName); Grammar g = factory.getGrammar(); parserName = g.getRecognizerName(); grammarName = g.name; for (Rule r : g.rules.values()) { Map<String, List<Pair<Integer,AltAST>>> labels = r.getAltLabels(); if ( labels!=null ) { for (Map.Entry<String, List<Pair<Integer, AltAST>>> pair : labels.entrySet()) { listenerNames.add(pair.getKey()); listenerLabelRuleNames.put(pair.getKey(), r.name); } } else { // only add rule context if no labels listenerNames.add(r.name); } } ActionAST ast = g.namedActions.get("header"); if ( ast!=null ) header = new Action(factory, ast); genPackage = factory.getGrammar().tool.genPackage; }
Example #12
Source File: LeftRecursiveRuleAnalyzer.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
@Override public void suffixAlt(AltAST originalAltTree, int alt) { AltAST altTree = (AltAST)originalAltTree.dupTree(); String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null; String label = null; boolean isListLabel = false; GrammarAST lrlabel = stripLeftRecursion(altTree); if ( lrlabel!=null ) { label = lrlabel.getText(); isListLabel = lrlabel.getParent().getType() == PLUS_ASSIGN; leftRecursiveRuleRefLabels.add(new Pair<GrammarAST,String>(lrlabel,altLabel)); } stripAltLabel(altTree); String altText = text(altTree); altText = altText.trim(); LeftRecursiveRuleAltInfo a = new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, isListLabel, originalAltTree); suffixAlts.put(alt, a); // System.out.println("suffixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText); }
Example #13
Source File: Rule.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
/** * Get {@code #} labels. The keys of the map are the labels applied to outer * alternatives of a lexer rule, and the values are collections of pairs * (alternative number and {@link AltAST}) identifying the alternatives with * this label. Unlabeled alternatives are not included in the result. */ public Map<String, List<Pair<Integer, AltAST>>> getAltLabels() { Map<String, List<Pair<Integer, AltAST>>> labels = new LinkedHashMap<String, List<Pair<Integer, AltAST>>>(); for (int i=1; i<=numberOfAlts; i++) { GrammarAST altLabel = alt[i].ast.altLabel; if ( altLabel!=null ) { List<Pair<Integer, AltAST>> list = labels.get(altLabel.getText()); if (list == null) { list = new ArrayList<Pair<Integer, AltAST>>(); labels.put(altLabel.getText(), list); } list.add(new Pair<Integer, AltAST>(i, alt[i].ast)); } } if ( labels.isEmpty() ) return null; return labels; }
Example #14
Source File: SqlParser.java From presto with Apache License 2.0 | 6 votes |
@Override public void exitNonReserved(SqlBaseParser.NonReservedContext context) { // we can't modify the tree during rule enter/exit event handling unless we're dealing with a terminal. // Otherwise, ANTLR gets confused an fires spurious notifications. if (!(context.getChild(0) instanceof TerminalNode)) { int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); throw new AssertionError("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule)); } // replace nonReserved words with IDENT tokens context.getParent().removeLastChild(); Token token = (Token) context.getChild(0).getPayload(); Token newToken = new CommonToken( new Pair<>(token.getTokenSource(), token.getInputStream()), SqlBaseLexer.IDENTIFIER, token.getChannel(), token.getStartIndex(), token.getStopIndex()); context.getParent().addChild(parser.createTerminalNode(context.getParent(), newToken)); }
Example #15
Source File: CollectTokenPairs.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
/** Return a new map from rulename to List of (a,b) pairs stripped of * tuples (a,b) where a or b is in rule repeated token set. * E.g., before removing repeated token ',', we see: * * elementValueArrayInitializer: 4:'{',',' 1:'{','}' 4:',','}' * * After removing tuples containing repeated tokens, we get: * * elementValueArrayInitializer: 1:'{','}' */ protected Map<RuleAltKey,List<Pair<Integer,Integer>>> stripPairsWithRepeatedTokens() { Map<RuleAltKey,List<Pair<Integer,Integer>>> ruleToPairsWoRepeats = new HashMap<>(); // For each rule for (RuleAltKey ruleAltKey : ruleToPairsBag.keySet()) { Set<Integer> ruleRepeatedTokens = ruleToRepeatedTokensSet.get(ruleAltKey); Set<Pair<Integer, Integer>> pairsBag = ruleToPairsBag.get(ruleAltKey); // If there are repeated tokens for this rule if ( ruleRepeatedTokens!=null ) { // Remove all (a,b) for b in repeated token set List<Pair<Integer, Integer>> pairsWoRepeats = BuffUtils.filter(pairsBag, p -> !ruleRepeatedTokens.contains(p.a) && !ruleRepeatedTokens.contains(p.b)); ruleToPairsWoRepeats.put(ruleAltKey, pairsWoRepeats); } else { ruleToPairsWoRepeats.put(ruleAltKey, new ArrayList<>(pairsBag)); } } return ruleToPairsWoRepeats; }
Example #16
Source File: Trainer.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
/** Walk upwards from node until we find a child of p at t's char position. * Don't see alignment with self, t, or element *after* us. * return null if there is no such ancestor p. */ public static Pair<ParserRuleContext,Integer> earliestAncestorWithChildStartingAtCharPos(ParserRuleContext node, Token t, int charpos) { ParserRuleContext p = node; while ( p!=null ) { // check all children of p to see if one of them starts at charpos for (int i = 0; i<p.getChildCount(); i++) { ParseTree child = p.getChild(i); Token start; if ( child instanceof ParserRuleContext ) { start = ((ParserRuleContext) child).getStart(); } else { // must be token start = ((TerminalNode)child).getSymbol(); } // check that we don't see alignment with self or element *after* us if ( start.getTokenIndex()<t.getTokenIndex() && start.getCharPositionInLine()==charpos ) { return new Pair<>(p,i); } } p = p.getParent(); } return null; }
Example #17
Source File: Tool.java From bookish with MIT License | 6 votes |
public void generateArticle(String metadataFilename, Target target) throws IOException { String outFilename; String inputFilename = metadataFilename; Book book = new Book(this, "", ""); book.entities = new HashMap<>(); Translator trans = new Translator(book, book.entities, target, outputDir); if ( target==Target.HTML || target==Target.HTML_ARTICLE ) { outFilename = "index.html"; } else { outFilename = stripFileExtension(basename(inputFilename))+".tex"; } Pair<Document, String> results = parseArticle(book, trans, inputDir, basename(inputFilename)); String output = results.b; ParrtIO.save(outputDir+"/"+outFilename, output); //System.out.println("Wrote "+outputDir+"/"+outFilename); execCommandLine(String.format("cp -r %s/images %s", inputDir, outputDir)); execCommandLine(String.format("cp -r %s/css %s", inputDir, outputDir)); }
Example #18
Source File: SqlParser.java From rainbow with Apache License 2.0 | 6 votes |
@Override public void exitNonReserved(SqlBaseParser.NonReservedContext context) { // we can't modify the tree during rule enter/exit event handling unless we're dealing with a terminal. // Otherwise, ANTLR gets confused an fires spurious notifications. if (!(context.getChild(0) instanceof TerminalNode)) { int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); throw new AssertionError("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule)); } // replace nonReserved words with IDENT tokens context.getParent().removeLastChild(); Token token = (Token) context.getChild(0).getPayload(); context.getParent().addChild(new CommonToken( new Pair<>(token.getTokenSource(), token.getInputStream()), SqlBaseLexer.IDENTIFIER, token.getChannel(), token.getStartIndex(), token.getStopIndex())); }
Example #19
Source File: AstBuilder.java From graphicsfuzz with Apache License 2.0 | 6 votes |
@Override public Declaration visitInterface_block(Interface_blockContext ctx) { final Optional<LayoutQualifierSequence> maybeLayoutQualifier = ctx.layout_qualifier() == null ? Optional.empty() : Optional.of(visitLayout_qualifier(ctx.layout_qualifier())); final Basic_interface_blockContext basicCtx = ctx.basic_interface_block(); final TypeQualifier interfaceQualifier = visitInterface_qualifier(basicCtx.interface_qualifier()); if (basicCtx.instance_name() != null) { throw new UnsupportedLanguageFeatureException("Named interface blocks are not currently " + "supported."); } final Pair<List<String>, List<Type>> members = getMembers(basicCtx.member_list()); return new InterfaceBlock( maybeLayoutQualifier, interfaceQualifier, basicCtx.IDENTIFIER().getText(), members.a, members.b, Optional.empty()); }
Example #20
Source File: AstBuilder.java From graphicsfuzz with Apache License 2.0 | 6 votes |
private Pair<List<String>, List<Type>> getMembers(Member_listContext memberListContext) { final LinkedList<String> fieldNames = new LinkedList<>(); final LinkedList<Type> fieldTypes = new LinkedList<>(); for (Member_listContext ctx = memberListContext; ctx != null; ctx = ctx.member_list()) { final Type baseType = visitFully_specified_type(ctx.member_declaration().fully_specified_type()); for (Struct_declarator_listContext declarators = ctx.member_declaration() .struct_declarator_list(); declarators != null; declarators = declarators.struct_declarator_list()) { fieldNames.addFirst(declarators.struct_declarator().IDENTIFIER().getText()); if (declarators.struct_declarator().array_specifier() == null) { fieldTypes.addFirst(baseType); } else { final ArrayType arrayType = new ArrayType(baseType.getWithoutQualifiers(), getArrayInfo(declarators.struct_declarator().array_specifier())); fieldTypes.addFirst(baseType instanceof QualifiedType ? new QualifiedType(arrayType, ((QualifiedType) baseType).getQualifiers()) : arrayType); } } } return new Pair<>(fieldNames, fieldTypes); }
Example #21
Source File: SubsetValidator.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** Select one document at random, then n others w/o replacement as corpus */ public Pair<InputDocument, List<InputDocument>> selectSample(List<InputDocument> documents, int n) { int i = random.nextInt(documents.size()); InputDocument testDoc = documents.get(i); List<InputDocument> others = filter(documents, d -> d!=testDoc); List<InputDocument> corpusSubset = getRandomDocuments(others, n); return new Pair<>(testDoc, corpusSubset); }
Example #22
Source File: LocationRepointCommonTokenFactory.java From Concurnas with MIT License | 5 votes |
@Override public CommonToken create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) { return super.create( source, type, text, channel, start, stop, /*line +*/ lineoffset, charPositionInLine + coloffset); }
Example #23
Source File: ParsingUtils.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 5 votes |
public static Token getSkippedTokenUnderCursor(CommonTokenStream tokens, int offset) { if ( offset<0 || offset >= tokens.getTokenSource().getInputStream().size() ) return null; Token prevToken = null; Token tokenUnderCursor = null; for (Token t : tokens.getTokens()) { int begin = t.getStartIndex(); int end = t.getStopIndex(); if ( (prevToken==null || offset > prevToken.getStopIndex()) && offset < begin ) { // found in between TokenSource tokenSource = tokens.getTokenSource(); CharStream inputStream = null; if ( tokenSource!=null ) { inputStream = tokenSource.getInputStream(); } tokenUnderCursor = new org.antlr.v4.runtime.CommonToken( new Pair<>(tokenSource, inputStream), Token.INVALID_TYPE, -1, prevToken!=null ? prevToken.getStopIndex()+1 : 0, begin-1 ); break; } if ( offset >= begin && offset <= end ) { tokenUnderCursor = t; break; } prevToken = t; } return tokenUnderCursor; }
Example #24
Source File: AltLabelTextProvider.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 5 votes |
public String[] getAltLabels(Rule r) { String[] altLabels = null; Map<String, List<Pair<Integer, AltAST>>> altLabelsMap = r.getAltLabels(); if ( altLabelsMap!=null ) { altLabels = new String[r.getOriginalNumberOfAlts() + 1]; for (String altLabel : altLabelsMap.keySet()) { List<Pair<Integer, AltAST>> pairs = altLabelsMap.get(altLabel); for (Pair<Integer, AltAST> pair : pairs) { altLabels[pair.a] = altLabel; } } } return altLabels; }
Example #25
Source File: Tool.java From bookish with MIT License | 5 votes |
public Pair<BookishParser.DocumentContext,BookishParser> parseChapter(String inputDir, String inputFilename, int chapNumber) throws IOException { CharStream input = CharStreams.fromFileName(inputDir+"/"+inputFilename); BookishLexer lexer = new BookishLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); BookishParser parser = new BookishParser(tokens, inputFilename, chapNumber); BookishParser.DocumentContext doctree = parser.document(); return new Pair<>(doctree, parser); }
Example #26
Source File: Tool.java From bookish with MIT License | 5 votes |
public Pair<Document,String> parseArticle(Book book, Translator trans, String inputDir, String inputFilename) throws IOException { Pair<BookishParser.DocumentContext,BookishParser> results = parseChapter(inputDir, inputFilename,0); trans.entities = results.b.entities; Document doc = (Document)trans.visit(results.a); // get single chapter doc.chapter.connectContainerTree(); executeCodeSnippets(book, "/tmp/build-article-code", Arrays.asList(results.b.codeBlocks)); ModelConverter converter = new ModelConverter(trans.templates); ST outputST = converter.walk(doc); // walk all OutputModelObjects created as labeled entities to convert those entities // unlabeled entities are done in-line ArrayList<String> labels = new ArrayList<>(trans.entities.keySet()); for (String label : labels) { EntityDef def = trans.entities.get(label); def.template = converter.walk(def.model); if ( def.isGloballyVisible() ) { // move to global space book.entities.put(label, def); trans.entities.remove(label); } } return new Pair<>(doc,outputST.render()); }
Example #27
Source File: SectionDef.java From bookish with MIT License | 5 votes |
public SectionDef(int secNumber, Token titleToken, EntityWithScope enclosingScope) { super(secNumber, titleToken, enclosingScope); String title = titleToken.getText(); title = title.substring(title.indexOf(' ')+1).trim(); Pair<String, String> results = splitSectionTitle(title); this.title = results.a; this.label = results.b; if ( label==null ) { label = "sec:"+getContainerNumber(); } }
Example #28
Source File: ChapterDef.java From bookish with MIT License | 5 votes |
public ChapterDef(int chapNumber, Token titleToken, EntityWithScope enclosingScope) { super(chapNumber, titleToken, enclosingScope); String title = titleToken.getText(); title = title.substring(title.indexOf(' ')+1).trim(); Pair<String, String> results = splitSectionTitle(title); this.title = results.a; this.label = results.b; if ( label==null ) { label = "chp:"+getContainerNumber(); } }
Example #29
Source File: Translator.java From bookish with MIT License | 5 votes |
public static Pair<String,String> splitSectionTitle(String title) { List<String> anchors = extract(sectionAnchorPattern, title); String anchor = null; if ( anchors.size()>0 ) { anchor = anchors.get(0); int lparent = title.indexOf('('); title = title.substring(0, lparent).trim(); } return new Pair<>(title,anchor); }
Example #30
Source File: LeftRecursiveRuleFunction.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public LeftRecursiveRuleFunction(OutputModelFactory factory, LeftRecursiveRule r) { super(factory, r); CodeGenerator gen = factory.getGenerator(); // Since we delete x=lr, we have to manually add decls for all labels // on left-recur refs to proper structs for (Pair<GrammarAST,String> pair : r.leftRecursiveRuleRefLabels) { GrammarAST idAST = pair.a; String altLabel = pair.b; String label = idAST.getText(); GrammarAST rrefAST = (GrammarAST)idAST.getParent().getChild(1); if ( rrefAST.getType() == ANTLRParser.RULE_REF ) { Rule targetRule = factory.getGrammar().getRule(rrefAST.getText()); String ctxName = gen.getTarget().getRuleFunctionContextStructName(targetRule); RuleContextDecl d; if (idAST.getParent().getType() == ANTLRParser.ASSIGN) { d = new RuleContextDecl(factory, label, ctxName); } else { d = new RuleContextListDecl(factory, label, ctxName); } StructDecl struct = ruleCtx; if ( altLabelCtxs!=null ) { StructDecl s = altLabelCtxs.get(altLabel); if ( s!=null ) struct = s; // if alt label, use subctx } struct.addDecl(d); // stick in overall rule's ctx } } }