edu.stanford.nlp.trees.TypedDependency Java Examples
The following examples show how to use
edu.stanford.nlp.trees.TypedDependency.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MainTest.java From dependensee with GNU General Public License v2.0 | 6 votes |
/** * Test of writeImage method, of class Main. */ @Test public void testWriteImage() throws Exception { String text = "A quick brown fox jumped over the lazy dog."; TreebankLanguagePack tlp = new PennTreebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); LexicalizedParser lp = LexicalizedParser.loadModel(); lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"}); TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize(); Tree tree = lp.apply(wordList); GrammaticalStructure gs = gsf.newGrammaticalStructure(tree); Collection<TypedDependency> tdl = gs.typedDependenciesCollapsed(); Main.writeImage(tdl, "image.png", 3); assert (new File("image.png").exists()); }
Example #2
Source File: Extract.java From phrases with Apache License 2.0 | 6 votes |
private static Pattern TryExtractPattern(TypedDependency dependency) { String rel = dependency.reln().toString(); String gov = dependency.gov().value(); String govTag = dependency.gov().label().tag(); String dep = dependency.dep().value(); String depTag = dependency.dep().label().tag(); Pattern.Relation relation = Pattern.asRelation(rel); if (relation != null) { Pattern pattern = new Pattern(gov, govTag, dep, depTag, relation); if (pattern.isPrimaryPattern()) { return pattern; } } return null; }
Example #3
Source File: NERTool.java From Criteria2Query with Apache License 2.0 | 6 votes |
/** * Word Dependency Author:chi Date:2017-3-22 * */ public Collection<TypedDependency> outputDependency(Tree t) { TreebankLanguagePack tlp = new PennTreebankLanguagePack(); // tlp.setGenerateOriginalDependencies(true); Standford Dependency GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(t); Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); int countforitem = 0; int source = 0; int target = 0; for (TypedDependency item : tdl) { System.out.println(item); } return tdl; }
Example #4
Source File: NumberOfToken.java From NLIWOD with GNU Affero General Public License v3.0 | 6 votes |
/*** * Returns a list of all noun phrases of the question q. * @param q a question * @return list of noun phrases */ private ArrayList<String> getNounPhrases(String q) { ArrayList<String> nounP = new ArrayList<String>(); Annotation annotation = new Annotation(q); PIPELINE.annotate(annotation); List<CoreMap> question = annotation.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : question) { SemanticGraph basicDeps = sentence.get(BasicDependenciesAnnotation.class); Collection<TypedDependency> typedDeps = basicDeps.typedDependencies(); Iterator<TypedDependency> dependencyIterator = typedDeps.iterator(); while(dependencyIterator.hasNext()) { TypedDependency dependency = dependencyIterator.next(); String depString = dependency.reln().toString(); if(depString.equals("compound") || depString.equals("amod")) { String dep = dependency.dep().toString(); String gov = dependency.gov().toString(); nounP.add(dep.substring(0, dep.lastIndexOf("/")) + " " + gov.substring(0, gov.lastIndexOf("/"))); } } } return nounP; }
Example #5
Source File: CoreNLPUtils.java From minie with GNU General Public License v3.0 | 6 votes |
/** * Given the sentence semantic graph and a list of words, get a subgraph containing just the words in the list * 'words'. Each typed dependency has each word from the list as a governor. * @param sg: sentence semantic graph * @param words: list of words which should contain the semantic graph * @return subgraph containing the words from 'words' * TODO: this needs to be double checked! In some cases we have weird graphs, where there are words missing. * E.g. the sentence 120 from NYT "The International ... ". Try this for getting the subgraph when the source is * detected. */ public static SemanticGraph getSubgraphFromWords(SemanticGraph sg, ObjectArrayList<IndexedWord> words){ // Determining the root int minInd = Integer.MAX_VALUE; IndexedWord root = new IndexedWord(); for (IndexedWord w: words){ if (w.index() < minInd){ minInd = w.index(); root = w; } } // Getting the typed dependency ObjectArrayList<TypedDependency> tds = new ObjectArrayList<TypedDependency>(); for (TypedDependency td: sg.typedDependencies()){ if (words.contains(td.gov()) && words.contains(td.dep())) tds.add(td); } // Create the semantic graph TreeGraphNode rootTGN = new TreeGraphNode(new CoreLabel(root)); EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(tds, rootTGN); SemanticGraph phraseSg = SemanticGraphFactory.generateUncollapsedDependencies(gs); return phraseSg; }
Example #6
Source File: CoreNLPUtils.java From minie with GNU General Public License v3.0 | 6 votes |
private static SemanticGraph getSubgraph(ObjectArrayList<TypedDependency> tds, SemanticGraph sg, IndexedWord parent, SemanticGraphEdge e, int maxPathLength, ObjectArrayList<IndexedWord> words){ Set<IndexedWord> children = sg.getChildren(parent); for (IndexedWord child: children){ if (((sg.getShortestDirectedPathEdges(sg.getFirstRoot(), child)).size() <= maxPathLength) && words.contains(child)){ e = sg.getEdge(parent, child); tds.add(new TypedDependency(e.getRelation(), parent, child)); if (sg.hasChildren(child)) getSubgraph(tds, sg, child, e, maxPathLength, words); } // else break; } TreeGraphNode rootTGN = new TreeGraphNode(new CoreLabel(parent)); EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(tds, rootTGN); return SemanticGraphFactory.generateUncollapsedDependencies(gs); }
Example #7
Source File: Phrase.java From minie with GNU General Public License v3.0 | 5 votes |
/** * Parametric constructor with a list of words for the phrase and the semantic graph of the phrase. The root, * and the list of typed dependencies are empty. * @param wList: list of words for the phrase * @param sg: semantic graph for the phrase */ public Phrase(ObjectArrayList<IndexedWord> wList, SemanticGraph sg){ this.wordList = wList; this.phraseGraph = sg; this.root = new IndexedWord(); this.tds = new ObjectArrayList<TypedDependency>(); }
Example #8
Source File: Extract.java From phrases with Apache License 2.0 | 5 votes |
private static List<Pattern> ExtractPrimaryPatterns(Collection<TypedDependency> tdl) { List<Pattern> primary = new ArrayList<Pattern>(); for (TypedDependency td : tdl) { Pattern pattern = TryExtractPattern(td); if (pattern != null) { primary.add(pattern); } } return primary; }
Example #9
Source File: ParseTree.java From NLIDB with Apache License 2.0 | 5 votes |
/** * Construct a parse tree using the stanford NLP parser. Only one sentence. * Here we are omitting the information of dependency labels (tags). * @param text input text. */ public ParseTree(String text, NLParser parser) { // pre-processing the input text DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(text)); List<HasWord> sentence = null; for (List<HasWord> sentenceHasWord : tokenizer) { sentence = sentenceHasWord; break; } // part-of-speech tagging List<TaggedWord> tagged = parser.tagger.tagSentence(sentence); // dependency syntax parsing GrammaticalStructure gs = parser.parser.predict(tagged); // Reading the parsed sentence into ParseTree int N = sentence.size()+1; Node[] nodes = new Node[N]; root = new Node(0, "ROOT", "ROOT"); nodes[0] = root; for (int i = 0; i < N-1; i++) { nodes[i+1] = new Node(i+1, sentence.get(i).word(), tagged.get(i).tag()); } for (TypedDependency typedDep : gs.allTypedDependencies()) { int from = typedDep.gov().index(); int to = typedDep.dep().index(); // String label = typedDep.reln().getShortName(); // omitting the label nodes[to].parent = nodes[from]; nodes[from].children.add(nodes[to]); } }
Example #10
Source File: Phrase.java From minie with GNU General Public License v3.0 | 5 votes |
/** * Given a sentence semantic graph, set the typed dependencies list of the phrase. For this to work, the list of * words (this.wordlist) must be already known. Otherwise, the tds list will be empty. Each typed dependency in the list * must contain both the parent and the child in the wordslist. * @param sg: sentence semantic graph (the phrase must be derived from this graph, i.e. all the nodes and edges of the * phrase must be found in this graph. Otherwise, the TDs list will be empty) */ public void setTdsFromSentenceSemGraph(SemanticGraph sg){ // If the semantic graph of the sentence or the list of words are empty, return if (sg.isEmpty() || this.wordList.isEmpty()){ tds = new ObjectArrayList<TypedDependency>(); return; } for (TypedDependency td: sg.typedDependencies()){ if (this.wordList.contains(td.dep()) && this.wordList.contains(td.gov())) this.tds.add(td); } }
Example #11
Source File: StanfordLexicalDemo.java From Natural-Language-Processing-with-Java-Second-Edition with MIT License | 5 votes |
public static void main(String args[]){ String parseModel = getResourcePath() + "englishPCFG.ser.gz"; LexicalizedParser lexicalizedParser = LexicalizedParser.loadModel(parseModel); String [] sentenceArray = {"The", "cow" ,"jumped", "over", "the", "moon", "."}; List<CoreLabel> words = SentenceUtils.toCoreLabelList(sentenceArray); Tree parseTree = lexicalizedParser.apply(words); parseTree.pennPrint(); TreePrint treePrint = new TreePrint("typedDependenciesCollapsed"); treePrint.printTree(parseTree); String sentence = "The cow jumped over the moon."; TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); Tokenizer<CoreLabel> tokenizer = tokenizerFactory.getTokenizer(new StringReader(sentence)); List<CoreLabel> wordList = tokenizer.tokenize(); parseTree = lexicalizedParser.apply(wordList); TreebankLanguagePack tlp = lexicalizedParser.treebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parseTree); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); for(TypedDependency dependency : tdl) { System.out.println("Governor Word: [" + dependency.gov() + "] Relation: [" + dependency.reln().getLongName() + "] Dependent Word: [" + dependency.dep() + "]"); } }
Example #12
Source File: Phrase.java From minie with GNU General Public License v3.0 | 5 votes |
/** * Parametric constructor with a list of words for the phrase. The root, phrase graph and the list of typed dependencies * are empty. * @param wList: list of words for the phrase */ public Phrase(ObjectArrayList<IndexedWord> wList) { this.wordList = wList; this.phraseGraph = new SemanticGraph(); this.root = new IndexedWord(); this.tds = new ObjectArrayList<TypedDependency>(); }
Example #13
Source File: Phrase.java From minie with GNU General Public License v3.0 | 5 votes |
/** Constructors **/ public Phrase(){ this.wordList = new ObjectArrayList<IndexedWord>(); this.phraseGraph = new SemanticGraph(); this.root = new IndexedWord(); this.tds = new ObjectArrayList<TypedDependency>(); }
Example #14
Source File: CoreNLPUtils.java From minie with GNU General Public License v3.0 | 5 votes |
private static ObjectArrayList<TypedDependency> getSubgraphTypedDependencies(SemanticGraph sg, IndexedWord parent, ObjectArrayList<TypedDependency> tds){ Set<IndexedWord> children = sg.getChildren(parent); for (IndexedWord child: children){ GrammaticalRelation gRel = sg.getEdge(parent, child).getRelation(); tds.add(new TypedDependency(gRel, parent, child)); if (sg.hasChildren(child)) getSubgraphTypedDependencies(sg, child, tds); } return tds; }
Example #15
Source File: President.java From Natural-Language-Processing-with-Java-Second-Edition with MIT License | 5 votes |
public void processWhoQuestion(List<TypedDependency> tdl) { List<President> list = createPresidentList(); for (TypedDependency dependency : tdl) { if ("president".equalsIgnoreCase( dependency.gov().originalText()) && "adjectival modifier".equals( dependency.reln().getLongName())) { String positionText = dependency.dep().originalText(); int position = getOrder(positionText)-1; System.out.println("The president is " + list.get(position).getName()); } } }
Example #16
Source File: Chapter7.java From Natural-Language-Processing-with-Java-Second-Edition with MIT License | 5 votes |
private static void processWhoQuestion(List<TypedDependency> tdl) { System.out.println("Processing Who Question"); List<President> list = createPresidentList(); for (TypedDependency dependency : tdl) { if ("president".equalsIgnoreCase(dependency.gov().originalText()) && "adjectival modifier".equals(dependency.reln().getLongName())) { String positionText = dependency.dep().originalText(); int position = getOrder(positionText) - 1; System.out.println("The president is " + list.get(position).getName()); } } }
Example #17
Source File: CoreNLP.java From Criteria2Query with Apache License 2.0 | 5 votes |
/** * Word Dependency Author:chi Date:2017-3-22 * */ public Collection<TypedDependency> outputDependency(Tree t) { TreebankLanguagePack tlp = new PennTreebankLanguagePack(); // tlp.setGenerateOriginalDependencies(true); Standford Dependency GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(t); Collection<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); int countforitem = 0; int source = 0; int target = 0; return tdl; }
Example #18
Source File: CoreNLPUtils.java From minie with GNU General Public License v3.0 | 5 votes |
/** * Given a semantic graph of a whole sentence (sg) and a "local root" node, get the subgraph from 'sg' which has * 'localRoot' as a root. * @param sg: semantic graph of the whole sentence * @param localRoot: the root of the subgraph * @return semantic graph object which is the subgraph from 'sg' */ public static SemanticGraph getSubgraph(SemanticGraph sg, IndexedWord localRoot){ ObjectArrayList<TypedDependency> subGraphDependencies = getSubgraphTypedDependencies(sg, localRoot, new ObjectArrayList<TypedDependency>()); TreeGraphNode rootTGN = new TreeGraphNode(new CoreLabel(localRoot)); EnglishGrammaticalStructure gs = new EnglishGrammaticalStructure(subGraphDependencies, rootTGN); return SemanticGraphFactory.generateUncollapsedDependencies(gs); }
Example #19
Source File: CoreNLPUtils.java From minie with GNU General Public License v3.0 | 5 votes |
/** * Given a semantic graph and a node which is part of the graph, return the constituent subgraph which has as root * constituentRoot * @param sg: the semantic graph from which the constituent sub-graph should be derived * @param constituentRoot: the root node for the constituent * @return the subgraph with constituentRoot as a root */ public static SemanticGraph getSubgraph(SemanticGraph sg, IndexedWord constituentRoot, ObjectArrayList<IndexedWord> words){ int maxPathLength = -1; int pathLength; for (IndexedWord word: words){ pathLength = sg.getShortestDirectedPathEdges(sg.getFirstRoot(), word).size(); if (pathLength > maxPathLength) maxPathLength = pathLength; } ObjectArrayList<TypedDependency> tds = new ObjectArrayList<TypedDependency>(); return getSubgraph(tds, sg, constituentRoot, null, maxPathLength, words); }
Example #20
Source File: Phrase.java From minie with GNU General Public License v3.0 | 4 votes |
public ObjectArrayList<TypedDependency> getTypedDependencies(){ return this.tds; }
Example #21
Source File: Phrase.java From minie with GNU General Public License v3.0 | 4 votes |
public void setTypedDependencies(ObjectArrayList<TypedDependency> tds){ this.tds = tds; }
Example #22
Source File: LogicAnalysisTool.java From Criteria2Query with Apache License 2.0 | 4 votes |
public List<LinkedHashSet<Integer>> decompose(Sentence p) { Collection<TypedDependency> tdset = snlp.getDependency(p.getText()); int entity1_median = 0; int entity2_median = 0; List<LinkedHashSet<Integer>> conj_or = new ArrayList<LinkedHashSet<Integer>>(); for (TypedDependency item : tdset) { if (item.reln().toString().equals("conj:or")) { entity1_median = (item.dep().beginPosition() + item.dep().endPosition()) / 2; entity2_median = (item.gov().beginPosition() + item.gov().endPosition()) / 2; LinkedHashSet<Integer> conj_or_group_1 = searchGroup(conj_or, entity1_median); LinkedHashSet<Integer> conj_or_group_2 = searchGroup(conj_or, entity2_median); if (conj_or_group_1 == null && conj_or_group_2 == null) { LinkedHashSet<Integer> conj_or_group = new LinkedHashSet<Integer>(); conj_or_group.add(entity1_median); conj_or_group.add(entity2_median); conj_or.add(conj_or_group); } else if (conj_or_group_1 != null && conj_or_group_2 == null) { conj_or.remove(conj_or_group_1); conj_or_group_1.add(entity2_median); conj_or.add(conj_or_group_1); } else if (conj_or_group_1 == null && conj_or_group_2 != null) { conj_or.remove(conj_or_group_2); conj_or_group_2.add(entity1_median); conj_or.add(conj_or_group_2); } } } // printoutGroups(conj_or); List<LinkedHashSet<Integer>> entity_group = new ArrayList<LinkedHashSet<Integer>>(); for (int i = 0; i < conj_or.size(); i++) { LinkedHashSet<Integer> entities = new LinkedHashSet<Integer>(); for (Integer b : conj_or.get(i)) { if (p.getTerms() != null) { for (Term t : p.getTerms()) { if (b >= t.getStart_index() && b <= t.getEnd_index()) { entities.add(t.getTermId()); } } } } entity_group.add(entities); } return entity_group; }
Example #23
Source File: CoreNLPDependencyParser.java From Heracles with GNU General Public License v3.0 | 4 votes |
@Override public void validatedProcess(Dataset dataset, String spanTypeOfSentenceUnit) { Properties prop1 = new Properties(); prop1.setProperty("annotators", "depparse"); StanfordCoreNLP pipeline = new StanfordCoreNLP(prop1, false); for (Span span : dataset.getSpans(spanTypeOfSentenceUnit)){ HashMap<Integer, Word> wordIndex = new HashMap<>(); Annotation a = CoreNLPHelper.reconstructStanfordAnnotations(span, wordIndex); // Main.debug(span.toString()); pipeline.annotate(a); for (CoreMap sentence : a.get(SentencesAnnotation.class)){ //per sentence, get the dependencies SemanticGraph dependencies = sentence.get(EnhancedPlusPlusDependenciesAnnotation.class); for (TypedDependency td : dependencies.typedDependencies()){ // Main.debug(td.toString()); String relationType = td.reln().getLongName(); Word dep = wordIndex.get(td.dep().beginPosition()); DataEntity gov = wordIndex.get(td.gov().beginPosition()); if (gov == null){ //this is the root, link to sentence gov = span; } if (dep == null || gov == null){ Framework.debug(td.toString()); Framework.debug(td.dep().beginPosition() + "\t" + td.gov().beginPosition()); Framework.debug(wordIndex.toString()); } Relation rel = new Relation("deps", gov, dep); rel.putAnnotation("relationLongName", td.reln().getLongName()); if (td.reln().getParent() != null) rel.putAnnotation("relationParentShortName", td.reln().getParent().getShortName()); rel.putAnnotation("relationShortName", td.reln().getShortName()); // rel.putAnnotation("relationSpecific", td.reln().getSpecific()); dep.getRelations().addRelationToParent(rel); gov.getRelations().addRelationToChild(rel); } // dependencies.prettyPrint(); } } }
Example #24
Source File: CoreNLP.java From Criteria2Query with Apache License 2.0 | 4 votes |
public Collection<TypedDependency> getDependency(String sentence){ Tree tree = parseSentence(sentence); Collection<TypedDependency> tdl=outputDependency(tree); return tdl; }
Example #25
Source File: LogicAnalysisTool.java From Criteria2Query with Apache License 2.0 | 4 votes |
public List<LinkedHashSet<Integer>> decompose(String text, List<Term> terms) { Collection<TypedDependency> tdset = snlp.getDependency(text); int entity1_median = 0; int entity2_median = 0; List<LinkedHashSet<Integer>> conj_or = new ArrayList<LinkedHashSet<Integer>>(); for (TypedDependency item : tdset) { if (item.reln().toString().equals("conj:or")) { entity1_median = (item.dep().beginPosition() + item.dep().endPosition()) / 2; entity2_median = (item.gov().beginPosition() + item.gov().endPosition()) / 2; LinkedHashSet<Integer> conj_or_group_1 = searchGroup(conj_or, entity1_median); LinkedHashSet<Integer> conj_or_group_2 = searchGroup(conj_or, entity2_median); if (conj_or_group_1 == null && conj_or_group_2 == null) { LinkedHashSet<Integer> conj_or_group = new LinkedHashSet<Integer>(); conj_or_group.add(entity1_median); conj_or_group.add(entity2_median); conj_or.add(conj_or_group); } else if (conj_or_group_1 != null && conj_or_group_2 == null) { conj_or.remove(conj_or_group_1); conj_or_group_1.add(entity2_median); conj_or.add(conj_or_group_1); } else if (conj_or_group_1 == null && conj_or_group_2 != null) { conj_or.remove(conj_or_group_2); conj_or_group_2.add(entity1_median); conj_or.add(conj_or_group_2); } } } // printoutGroups(conj_or); List<LinkedHashSet<Integer>> entity_group = new ArrayList<LinkedHashSet<Integer>>(); for (int i = 0; i < conj_or.size(); i++) { LinkedHashSet<Integer> entities = new LinkedHashSet<Integer>(); for (Integer b : conj_or.get(i)) { for (Term t : terms) { if (b >= t.getStart_index() && b <= t.getEnd_index()) { entities.add(t.getTermId()); } } } entity_group.add(entities); } return entity_group; }
Example #26
Source File: DependencyProjectorCoNLL.java From phrasal with GNU General Public License v3.0 | 4 votes |
public static HashMap<Integer, Integer> getDependenciesFromCoreMap(CoreMap annotation) { SemanticGraph semanticGraph = annotation.get(BasicDependenciesAnnotation.class); Collection<TypedDependency> dependencies = semanticGraph.typedDependencies(); HashMap<Integer, Integer> reverseDependencies = new HashMap<Integer, Integer>() ; for (TypedDependency dep : dependencies) { int govIndex = dep.gov().index() - 1; int depIndex = dep.dep().index() - 1; reverseDependencies.put(depIndex, govIndex); } return reverseDependencies; }
Example #27
Source File: Chapter7.java From Natural-Language-Processing-with-Java-Second-Edition with MIT License | 4 votes |
private static void usingStanfordLexicalizedParser() { String parserModel = "C:/Current Books in Progress/NLP and Java/Models/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"; LexicalizedParser lexicalizedParser = LexicalizedParser.loadModel(parserModel); // This option shows parsing a list of correctly tokenized words System.out.println("---First option"); String[] senetenceArray = {"The", "cow", "jumped", "over", "the", "moon", "."}; List<CoreLabel> words = Sentence.toCoreLabelList(senetenceArray); Tree parseTree = lexicalizedParser.apply(words); parseTree.pennPrint(); System.out.println(); // This option shows loading and using an explicit tokenizer System.out.println("---Second option"); String sentence = "The cow jumped over the moon."; TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); Tokenizer<CoreLabel> tokenizer = tokenizerFactory.getTokenizer(new StringReader(sentence)); List<CoreLabel> wordList = tokenizer.tokenize(); parseTree = lexicalizedParser.apply(wordList); TreebankLanguagePack tlp = lexicalizedParser.treebankLanguagePack(); // PennTreebankLanguagePack for English GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parseTree); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); for (TypedDependency dependency : tdl) { System.out.println("Governor Word: [" + dependency.gov() + "] Relation: [" + dependency.reln().getLongName() + "] Dependent Word: [" + dependency.dep() + "]"); } System.out.println(); // You can also use a TreePrint object to print trees and dependencies // System.out.println("---Using TreePrint"); // TreePrint treePrint = new TreePrint("penn,typedDependenciesCollapsed"); // treePrint.printTree(parseTree); // System.out.println("TreePrint Formats"); // for (String format : TreePrint.outputTreeFormats) { // System.out.println(format); // } // System.out.println(); }
Example #28
Source File: QuestionStructure.java From QuestionAnsweringSystem with Apache License 2.0 | 4 votes |
public Collection<TypedDependency> getTdls() { return tdls; }
Example #29
Source File: QuestionStructure.java From QuestionAnsweringSystem with Apache License 2.0 | 4 votes |
public void setTdls(Collection<TypedDependency> tdls) { this.tdls = tdls; }
Example #30
Source File: MainPartExtracter.java From QuestionAnsweringSystem with Apache License 2.0 | 4 votes |
/** * 获取句子的主谓宾 * * @param question 问题 * @param words HasWord列表 * @return 问题结构 */ public QuestionStructure getMainPart(String question, List<edu.stanford.nlp.ling.Word> words) { QuestionStructure questionStructure = new QuestionStructure(); questionStructure.setQuestion(question); Tree tree = LP.apply(words); LOG.info("句法树: "); tree.pennPrint(); questionStructure.setTree(tree); GrammaticalStructure gs = GSF.newGrammaticalStructure(tree); if(gs == null){ return null; } //获取依存关系 Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>(); String top = null; String root = null; LOG.info("句子依存关系:"); //依存关系 List<String> dependencies = new ArrayList<>(); for (TypedDependency tdl : tdls) { String item = tdl.toString(); dependencies.add(item); LOG.info("\t" + item); if (item.startsWith("top")) { top = item; } if (item.startsWith("root")) { root = item; } int start = item.indexOf("("); int end = item.lastIndexOf(")"); item = item.substring(start + 1, end); String[] attr = item.split(","); String k = attr[0].trim(); String v = attr[1].trim(); String value = map.get(k); if (value == null) { map.put(k, v); } else { //有值 value += ":"; value += v; map.put(k, value); } } questionStructure.setDependencies(dependencies); String mainPartForTop = null; String mainPartForRoot = null; if (top != null) { mainPartForTop = topPattern(top, map); } if (root != null) { mainPartForRoot = rootPattern(root, map); } questionStructure.setMainPartForTop(mainPartForTop); questionStructure.setMainPartForRoot(mainPartForRoot); if (questionStructure.getMainPart() == null) { LOG.error("未能识别主谓宾:" + question); } else { LOG.info("主谓宾:" + questionStructure.getMainPart()); } return questionStructure; }