org.antlr.v4.tool.ast.GrammarRootAST Java Examples
The following examples show how to use
org.antlr.v4.tool.ast.GrammarRootAST.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
Example #2
Source File: Grammar.java From codebuff with BSD 2-Clause "Simplified" License | 6 votes |
public Grammar(Tool tool, GrammarRootAST ast) { if ( ast==null ) { throw new NullPointerException("ast"); } if (ast.tokenStream == null) { throw new IllegalArgumentException("ast must have a token stream"); } this.tool = tool; this.ast = ast; this.name = (ast.getChild(0)).getText(); this.tokenStream = ast.tokenStream; this.originalTokenStream = this.tokenStream; initTokenSymbolTables(); }
Example #3
Source File: Grammar.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** Return list of (TOKEN_NAME node, 'literal' node) pairs */ public static List<Pair<GrammarAST,GrammarAST>> getStringLiteralAliasesFromLexerRules(GrammarRootAST ast) { String[] patterns = { "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL)))", "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL ACTION)))", "(RULE %name:TOKEN_REF (BLOCK (ALT %lit:STRING_LITERAL SEMPRED)))", "(RULE %name:TOKEN_REF (BLOCK (LEXER_ALT_ACTION (ALT %lit:STRING_LITERAL) .)))", "(RULE %name:TOKEN_REF (BLOCK (LEXER_ALT_ACTION (ALT %lit:STRING_LITERAL) . .)))", "(RULE %name:TOKEN_REF (BLOCK (LEXER_ALT_ACTION (ALT %lit:STRING_LITERAL) (LEXER_ACTION_CALL . .))))", "(RULE %name:TOKEN_REF (BLOCK (LEXER_ALT_ACTION (ALT %lit:STRING_LITERAL) . (LEXER_ACTION_CALL . .))))", "(RULE %name:TOKEN_REF (BLOCK (LEXER_ALT_ACTION (ALT %lit:STRING_LITERAL) (LEXER_ACTION_CALL . .) .)))", // TODO: allow doc comment in there }; GrammarASTAdaptor adaptor = new GrammarASTAdaptor(ast.token.getInputStream()); org.antlr.runtime.tree.TreeWizard wiz = new org.antlr.runtime.tree.TreeWizard(adaptor,ANTLRParser.tokenNames); List<Pair<GrammarAST,GrammarAST>> lexerRuleToStringLiteral = new ArrayList<Pair<GrammarAST,GrammarAST>>(); List<GrammarAST> ruleNodes = ast.getNodesWithType(ANTLRParser.RULE); if ( ruleNodes==null || ruleNodes.isEmpty() ) return null; for (GrammarAST r : ruleNodes) { //tool.log("grammar", r.toStringTree()); // System.out.println("chk: "+r.toStringTree()); org.antlr.runtime.tree.Tree name = r.getChild(0); if ( name.getType()==ANTLRParser.TOKEN_REF ) { // check rule against patterns boolean isLitRule; for (String pattern : patterns) { isLitRule = defAlias(r, pattern, wiz, lexerRuleToStringLiteral); if ( isLitRule ) break; } // if ( !isLitRule ) System.out.println("no pattern matched"); } } return lexerRuleToStringLiteral; }
Example #4
Source File: ParsingUtils.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 5 votes |
public static GrammarRootAST parseGrammar(Project project, Tool antlr, VirtualFile grammarFile) { try { Document document = FileDocumentManager.getInstance().getDocument(grammarFile); String grammarText = document != null ? document.getText() : new String(grammarFile.contentsToByteArray()); ANTLRStringStream in = new ANTLRStringStream(grammarText); in.name = grammarFile.getPath(); return antlr.parse(grammarFile.getPath(), in); } catch (IOException ioe) { antlr.errMgr.toolError(ErrorType.CANNOT_OPEN_FILE, ioe, grammarFile); } return null; }
Example #5
Source File: ParsingUtils.java From intellij-plugin-v4 with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Nullable private static Grammar loadGrammar(VirtualFile grammarFile, Project project, Tool antlr) { // basically here I am mimicking the loadGrammar() method from Tool // so that I can check for an empty AST coming back. GrammarRootAST grammarRootAST = parseGrammar(project, antlr, grammarFile); if ( grammarRootAST==null ) { return null; } // Create a grammar from the AST so we can figure out what type it is Grammar g = antlr.createGrammar(grammarRootAST); g.fileName = grammarFile.getPath(); return g; }
Example #6
Source File: Antlr4ParserTest.java From openCypher with Apache License 2.0 | 5 votes |
public void investigateTokenStream() throws IOException { // Keep: Not really testing things but quite useful for debugging antlr lexing String query = "CREATE (a)"; org.antlr.v4.Tool tool = new org.antlr.v4.Tool(); GrammarRootAST ast = tool.parseGrammarFromString( new String( Files.readAllBytes(Paths.get("../../grammar/generated/Cypher.g4"))) ); org.antlr.v4.tool.Grammar g = tool.createGrammar( ast ); tool.process( g, false ); LexerInterpreter lexer = g.createLexerInterpreter( new ANTLRInputStream( query ) ); CommonTokenStream tokenStream = new CommonTokenStream( lexer ); }
Example #7
Source File: Grammar.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public static Map<Integer, Interval> getStateToGrammarRegionMap(GrammarRootAST ast, IntervalSet grammarTokenTypes) { Map<Integer, Interval> stateToGrammarRegionMap = new HashMap<Integer, Interval>(); if ( ast==null ) return stateToGrammarRegionMap; List<GrammarAST> nodes = ast.getNodesWithType(grammarTokenTypes); for (GrammarAST n : nodes) { if (n.atnState != null) { Interval tokenRegion = Interval.of(n.getTokenStartIndex(), n.getTokenStopIndex()); org.antlr.runtime.tree.Tree ruleNode = null; // RULEs, BLOCKs of transformed recursive rules point to original token interval switch ( n.getType() ) { case ANTLRParser.RULE : ruleNode = n; break; case ANTLRParser.BLOCK : case ANTLRParser.CLOSURE : ruleNode = n.getAncestor(ANTLRParser.RULE); break; } if ( ruleNode instanceof RuleAST ) { String ruleName = ((RuleAST) ruleNode).getRuleName(); Rule r = ast.g.getRule(ruleName); if ( r instanceof LeftRecursiveRule ) { RuleAST originalAST = ((LeftRecursiveRule) r).getOriginalAST(); tokenRegion = Interval.of(originalAST.getTokenStartIndex(), originalAST.getTokenStopIndex()); } } stateToGrammarRegionMap.put(n.atnState.stateNumber, tokenRegion); } } return stateToGrammarRegionMap; }
Example #8
Source File: GrammarTransformPipeline.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public void process() { GrammarRootAST root = g.ast; if ( root==null ) return; tool.log("grammar", "before: "+root.toStringTree()); integrateImportedGrammars(g); reduceBlocksToSets(root); expandParameterizedLoops(root); tool.log("grammar", "after: "+root.toStringTree()); }
Example #9
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
public GrammarRootAST parse(String fileName, CharStream in) { try { GrammarASTAdaptor adaptor = new GrammarASTAdaptor(in); ToolANTLRLexer lexer = new ToolANTLRLexer(in, this); CommonTokenStream tokens = new CommonTokenStream(lexer); lexer.tokens = tokens; ToolANTLRParser p = new ToolANTLRParser(tokens, this); p.setTreeAdaptor(adaptor); try { ParserRuleReturnScope r = p.grammarSpec(); GrammarAST root = (GrammarAST)r.getTree(); if ( root instanceof GrammarRootAST) { ((GrammarRootAST)root).hasErrors = lexer.getNumberOfSyntaxErrors()>0 || p.getNumberOfSyntaxErrors()>0; assert ((GrammarRootAST)root).tokenStream == tokens; if ( grammarOptions!=null ) { ((GrammarRootAST)root).cmdLineOptions = grammarOptions; } return ((GrammarRootAST)root); } } catch (v3TreeGrammarException e) { errMgr.grammarError(ErrorType.V3_TREE_GRAMMAR, fileName, e.location); } return null; } catch (RecognitionException re) { // TODO: do we gen errors now? ErrorManager.internalError("can't generate this message at moment; antlr recovers"); } return null; }
Example #10
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** * Try current dir then dir of g then lib dir * @param g * @param nameNode The node associated with the imported grammar name. */ public Grammar loadImportedGrammar(Grammar g, GrammarAST nameNode) throws IOException { String name = nameNode.getText(); Grammar imported = importedGrammars.get(name); if (imported == null) { g.tool.log("grammar", "load " + name + " from " + g.fileName); File importedFile = null; for (String extension : ALL_GRAMMAR_EXTENSIONS) { importedFile = getImportedGrammarFile(g, name + extension); if (importedFile != null) { break; } } if ( importedFile==null ) { errMgr.grammarError(ErrorType.CANNOT_FIND_IMPORTED_GRAMMAR, g.fileName, nameNode.getToken(), name); return null; } String absolutePath = importedFile.getAbsolutePath(); ANTLRFileStream in = new ANTLRFileStream(absolutePath, grammarEncoding); GrammarRootAST root = parse(g.fileName, in); if (root == null) { return null; } imported = createGrammar(root); imported.fileName = absolutePath; importedGrammars.put(root.getGrammarName(), imported); } return imported; }
Example #11
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** Convenience method to load and process an ANTLR grammar. Useful * when creating interpreters. If you need to access to the lexer * grammar created while processing a combined grammar, use * getImplicitLexer() on returned grammar. */ public Grammar loadGrammar(String fileName) { GrammarRootAST grammarRootAST = parseGrammar(fileName); final Grammar g = createGrammar(grammarRootAST); g.fileName = fileName; process(g, false); return g; }
Example #12
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 5 votes |
/** Manually get option node from tree; return null if no defined. */ public static GrammarAST findOptionValueAST(GrammarRootAST root, String option) { GrammarAST options = (GrammarAST)root.getFirstChildWithType(ANTLRParser.OPTIONS); if ( options!=null && options.getChildCount() > 0 ) { for (Object o : options.getChildren()) { GrammarAST c = (GrammarAST)o; if ( c.getType() == ANTLRParser.ASSIGN && c.getChild(0).getText().equals(option) ) { return (GrammarAST)c.getChild(1); } } } return null; }
Example #13
Source File: LexerGrammar.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
public LexerGrammar(Tool tool, GrammarRootAST ast) { super(tool, ast); }
Example #14
Source File: Tool.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
public GrammarRootAST parseGrammarFromString(String grammar) { return parse("<string>", new ANTLRStringStream(grammar)); }
Example #15
Source File: LeftRecursiveRuleTransformer.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
public LeftRecursiveRuleTransformer(GrammarRootAST ast, Collection<Rule> rules, Grammar g) { this.ast = ast; this.rules = rules; this.g = g; this.tool = g.tool; }
Example #16
Source File: LeftRecursiveRuleTransformer.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
/** Return true if successful */ public boolean translateLeftRecursiveRule(GrammarRootAST ast, LeftRecursiveRule r, String language) { //tool.log("grammar", ruleAST.toStringTree()); GrammarAST prevRuleAST = r.ast; String ruleName = prevRuleAST.getChild(0).getText(); LeftRecursiveRuleAnalyzer leftRecursiveRuleWalker = new LeftRecursiveRuleAnalyzer(prevRuleAST, tool, ruleName, language); boolean isLeftRec; try { // System.out.println("TESTING ---------------\n"+ // leftRecursiveRuleWalker.text(ruleAST)); isLeftRec = leftRecursiveRuleWalker.rec_rule(); } catch (RecognitionException re) { isLeftRec = false; // didn't match; oh well } if ( !isLeftRec ) return false; // replace old rule's AST; first create text of altered rule GrammarAST RULES = (GrammarAST)ast.getFirstChildWithType(ANTLRParser.RULES); String newRuleText = leftRecursiveRuleWalker.getArtificialOpPrecRule(); // System.out.println("created: "+newRuleText); // now parse within the context of the grammar that originally created // the AST we are transforming. This could be an imported grammar so // we cannot just reference this.g because the role might come from // the imported grammar and not the root grammar (this.g) RuleAST t = parseArtificialRule(prevRuleAST.g, newRuleText); // reuse the name token from the original AST since it refers to the proper source location in the original grammar ((GrammarAST)t.getChild(0)).token = ((GrammarAST)prevRuleAST.getChild(0)).getToken(); // update grammar AST and set rule's AST. RULES.setChild(prevRuleAST.getChildIndex(), t); r.ast = t; // Reduce sets in newly created rule tree GrammarTransformPipeline transform = new GrammarTransformPipeline(g, g.tool); transform.reduceBlocksToSets(r.ast); transform.expandParameterizedLoops(r.ast); // Rerun semantic checks on the new rule RuleCollector ruleCollector = new RuleCollector(g); ruleCollector.visit(t, "rule"); BasicSemanticChecks basics = new BasicSemanticChecks(g, ruleCollector); // disable the assoc element option checks because they are already // handled for the pre-transformed rule. basics.checkAssocElementOption = false; basics.visit(t, "rule"); // track recursive alt info for codegen r.recPrimaryAlts = new ArrayList<LeftRecursiveRuleAltInfo>(); r.recPrimaryAlts.addAll(leftRecursiveRuleWalker.prefixAndOtherAlts); if (r.recPrimaryAlts.isEmpty()) { tool.errMgr.grammarError(ErrorType.NO_NON_LR_ALTS, g.fileName, ((GrammarAST)r.ast.getChild(0)).getToken(), r.name); } r.recOpAlts = new OrderedHashMap<Integer, LeftRecursiveRuleAltInfo>(); r.recOpAlts.putAll(leftRecursiveRuleWalker.binaryAlts); r.recOpAlts.putAll(leftRecursiveRuleWalker.ternaryAlts); r.recOpAlts.putAll(leftRecursiveRuleWalker.suffixAlts); // walk alt info records and set their altAST to point to appropriate ALT subtree // from freshly created AST setAltASTPointers(r, t); // update Rule to just one alt and add prec alt ActionAST arg = (ActionAST)r.ast.getFirstChildWithType(ANTLRParser.ARG_ACTION); if ( arg!=null ) { r.args = ScopeParser.parseTypedArgList(arg, arg.getText(), g); r.args.type = AttributeDict.DictType.ARG; r.args.ast = arg; arg.resolver = r.alt[1]; // todo: isn't this Rule or something? } // define labels on recursive rule refs we delete; they don't point to nodes of course // these are so $label in action translation works for (Pair<GrammarAST,String> pair : leftRecursiveRuleWalker.leftRecursiveRuleRefLabels) { GrammarAST labelNode = pair.a; GrammarAST labelOpNode = (GrammarAST)labelNode.getParent(); GrammarAST elementNode = (GrammarAST)labelOpNode.getChild(1); LabelElementPair lp = new LabelElementPair(g, labelNode, elementNode, labelOpNode.getType()); r.alt[1].labelDefs.map(labelNode.getText(), lp); } // copy to rule from walker r.leftRecursiveRuleRefLabels = leftRecursiveRuleWalker.leftRecursiveRuleRefLabels; tool.log("grammar", "added: "+t.toStringTree()); return true; }
Example #17
Source File: BasicSemanticChecks.java From codebuff with BSD 2-Clause "Simplified" License | 4 votes |
@Override public void discoverGrammar(GrammarRootAST root, GrammarAST ID) { checkGrammarName(ID.token); }
Example #18
Source File: Antlr4ToolFacade.java From openCypher with Apache License 2.0 | 4 votes |
public static void assertGeneratesValidParser( String resource ) throws Exception { Output.Readable buffer = stringBuilder(); Tool antlr = new Tool(); Antlr4ToolFacade facade = new Antlr4ToolFacade( antlr, buffer ); try { Antlr4.write( Fixture.grammarResource( Antlr4.class, resource ), buffer ); } catch ( Throwable e ) { try { facade.reportFailureIn( "generating grammar" ); } catch ( AssertionError x ) { throw e; } } antlr.addListener( facade ); GrammarRootAST ast = antlr.parse( resource, new ANTLRReaderStream( buffer.reader() ) ); if ( ast.hasErrors ) { RuleAST lastGood = lastGoodRule( ast ); if ( lastGood == null ) { facade.reportFailureIn( "parsing grammar" ); } else { facade.reportFailureIn( "parsing grammar, after " + lastGood.getRuleName() + " on line " + lastGood.getLine() ); } } antlr.process( antlr.createGrammar( ast ), false ); if ( facade.hasErrors() ) { facade.reportFailureIn( "processing grammar" ); } }