edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations Java Examples
The following examples show how to use
edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StanfordRNNDParser.java From ambiverse-nlu with Apache License 2.0 | 6 votes |
@Override public void process(JCas jCas) throws AnalysisEngineProcessException { mappingProvider.configure(jCas.getCas()); DKPro2CoreNlp converter = new DKPro2CoreNlp(); Annotation annotatios = converter.convert(jCas, new Annotation()); List<CoreMap> sentences = annotatios.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { GrammaticalStructure gs = parser.predict(sentence); SemanticGraph semanticGraph = SemanticGraphFactory.makeFromTree(gs, SemanticGraphFactory.Mode.CCPROCESSED, GrammaticalStructure.Extras.MAXIMAL, null);; semanticGraph.prettyPrint(); semanticGraph = semanticGraphUniversalEnglishToEnglish(semanticGraph); sentence.set(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class, semanticGraph); for(SemanticGraphEdge edge: semanticGraph.edgeListSorted()) { System.out.println(edge); } } convertDependencies(jCas, annotatios, true); }
Example #2
Source File: IntelKBPAnnotator.java From InformationExtraction with GNU General Public License v3.0 | 6 votes |
/** * {@inheritDoc} */ @Override public Set<Class<? extends CoreAnnotation>> requires() { Set<Class<? extends CoreAnnotation>> requirements = new HashSet<>(Arrays.asList( CoreAnnotations.TextAnnotation.class, CoreAnnotations.TokensAnnotation.class, CoreAnnotations.IndexAnnotation.class, CoreAnnotations.SentencesAnnotation.class, CoreAnnotations.SentenceIndexAnnotation.class, CoreAnnotations.PartOfSpeechAnnotation.class, CoreAnnotations.LemmaAnnotation.class, SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, CoreAnnotations.OriginalTextAnnotation.class )); return Collections.unmodifiableSet(requirements); }
Example #3
Source File: IntelKBPSemgrexExtractor.java From InformationExtraction with GNU General Public License v3.0 | 6 votes |
@Override public Pair<String, Double> classify(KBPInput input) { for (RelationType rel : RelationType.values()) { if (rules.containsKey(rel) && rel.entityType == input.subjectType && rel.validNamedEntityLabels.contains(input.objectType)) { Collection<SemgrexPattern> rulesForRel = rules.get(rel); CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph); boolean matches = matches(sentence, rulesForRel, input, sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) || matches(sentence, rulesForRel, input, sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class)); if (matches) { //logger.log("MATCH for " + rel + ". " + sentence: + sentence + " with rules for " + rel); return Pair.makePair(rel.canonicalName, 1.0); } } } return Pair.makePair(NO_RELATION, 1.0); }
Example #4
Source File: KBPSemgrexExtractor.java From InformationExtraction with GNU General Public License v3.0 | 6 votes |
@Override public Pair<String, Double> classify(KBPInput input) { for (RelationType rel : RelationType.values()) { if (rules.containsKey(rel) && rel.entityType == input.subjectType && rel.validNamedEntityLabels.contains(input.objectType)) { Collection<SemgrexPattern> rulesForRel = rules.get(rel); CoreMap sentence = input.sentence.asCoreMap(Sentence::nerTags, Sentence::dependencyGraph); boolean matches = matches(sentence, rulesForRel, input, sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)) || matches(sentence, rulesForRel, input, sentence.get(SemanticGraphCoreAnnotations.AlternativeDependenciesAnnotation.class)); if (matches) { //logger.log("MATCH for " + rel + ". " + sentence: + sentence + " with rules for " + rel); return Pair.makePair(rel.canonicalName, 1.0); } } } return Pair.makePair(NO_RELATION, 1.0); }
Example #5
Source File: Extract.java From phrases with Apache License 2.0 | 5 votes |
private HashSet<Pattern> ExtractSentencePatterns(CoreMap sentence) { SemanticGraph semanticGraph = sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); List<Pattern> primary = ExtractPrimaryPatterns(semanticGraph.typedDependencies()); List<Pattern> combined; combined = ExtractCombinedPatterns(primary, primary); combined.addAll(ExtractCombinedPatterns(combined, primary)); combined.addAll(ExtractCombinedPatterns(combined, primary)); return PruneCombinedPatterns(combined); }
Example #6
Source File: ClausIEAnalysisEngine.java From ambiverse-nlu with Apache License 2.0 | 4 votes |
@Override public void process(JCas jCas) throws AnalysisEngineProcessException { DKPro2CoreNlp converter = new DKPro2CoreNlp(); Annotation annotatios = converter.convert(jCas, new Annotation()); List<CoreMap> sentences = annotatios.get(CoreAnnotations.SentencesAnnotation.class); long startTime = System.currentTimeMillis(); int factCount = 0; int exception = 0; for (CoreMap sentence : sentences) { try { SemanticGraph semanticGraph = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class); Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class); ClausIE clausIE = new ClausIE(semanticGraph, tree, clausieOptions); clausIE.detectClauses(); clausIE.generatePropositions(); List<Proposition> propositions = new ArrayList<>(clausIE.getPropositions()); if(removeRedundant) { removeRedundant(propositions); } for (Proposition p : propositions) { // System.out.println(p); // System.out.println(p.getDictRelation()); OpenFact of = new OpenFact(jCas); Subject subject = addConstituentToJCas(jCas, Subject.class, p, 0); of.setBegin(subject.getBegin()); of.setSubject(subject); Relation relation = addConstituentToJCas(jCas, Relation.class, p, 1); relation.setNormalizedForm(p.getDictRelation()); of.setRelation(relation); ObjectF object = addConstituentToJCas(jCas, ObjectF.class, p, 2); of.setEnd(object.getEnd()); of.setObject(object); of.setText(p.toString()); of.addToIndexes(); factCount++; } } catch (Exception e) { exception++; logger_.info("Exception at ClausIEAnalysisEngine: "+e.getMessage() + "\n" + e.getStackTrace().toString()); if(exception > 15) { throw new AnalysisEngineProcessException(e); } } } double runTime = System.currentTimeMillis() - startTime; String docId = JCasUtil.selectSingle(jCas, DocumentMetaData.class).getDocumentId(); logger_.info( "Document '" + docId + "' done in " + runTime + "ms (" + factCount + " facts)."); }
Example #7
Source File: LogicAnalysisTool.java From Criteria2Query with Apache License 2.0 | 4 votes |
public List<LinkedHashSet<Integer>> ddep(String text, List<Term> terms) { Annotation annotation = new Annotation(text); pipeline.annotate(annotation); List<LinkedHashSet<Integer>> conj_or = new ArrayList<LinkedHashSet<Integer>>(); for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) { List<SemanticGraphEdge> sges = sentence .get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class).edgeListSorted(); int entity1_median = 0; int entity2_median = 0; for (SemanticGraphEdge sge : sges) { //System.out.println( // sge.getRelation().getSpecific() + "\t" + sge.getDependent() + "\t" + sge.getGovernor()); if (sge.getRelation().getSpecific() != null && sge.getRelation().getSpecific().equals("or")) { entity1_median = (sge.getDependent().beginPosition() + sge.getDependent().endPosition()) / 2; entity2_median = (sge.getGovernor().beginPosition() + sge.getGovernor().endPosition()) / 2; LinkedHashSet<Integer> conj_or_group_1 = searchGroup(conj_or, entity1_median); LinkedHashSet<Integer> conj_or_group_2 = searchGroup(conj_or, entity2_median); if (conj_or_group_1 == null && conj_or_group_2 == null) { LinkedHashSet<Integer> conj_or_group = new LinkedHashSet<Integer>(); conj_or_group.add(entity1_median); conj_or_group.add(entity2_median); conj_or.add(conj_or_group); } else if (conj_or_group_1 != null && conj_or_group_2 == null) { conj_or.remove(conj_or_group_1); conj_or_group_1.add(entity2_median); conj_or.add(conj_or_group_1); } else if (conj_or_group_1 == null && conj_or_group_2 != null) { conj_or.remove(conj_or_group_2); conj_or_group_2.add(entity1_median); conj_or.add(conj_or_group_2); } } } } List<LinkedHashSet<Integer>> entity_group = new ArrayList<LinkedHashSet<Integer>>(); for (int i = 0; i < conj_or.size(); i++) { LinkedHashSet<Integer> entities = new LinkedHashSet<Integer>(); for (Integer b : conj_or.get(i)) { for (Term t : terms) { if (b >= t.getStart_index() && b <= t.getEnd_index()) { entities.add(t.getTermId()); } } } entity_group.add(entities); } return entity_group; }
Example #8
Source File: ParserAnnotatorUtils.java From Heracles with GNU General Public License v3.0 | 4 votes |
/** Put the tree in the CoreMap for the sentence, also add any * dependency graphs to the sentence, and fill in missing tag annotations. * * Thread safety note: nothing special is done to ensure the thread * safety of the GrammaticalStructureFactory. However, both the * EnglishGrammaticalStructureFactory and the * ChineseGrammaticalStructureFactory are thread safe. */ public static void fillInParseAnnotations(boolean verbose, boolean buildGraphs, GrammaticalStructureFactory gsf, CoreMap sentence, List<Tree> trees, GrammaticalStructure.Extras extras) { boolean first = true; for (Tree tree : trees) { // make sure all tree nodes are CoreLabels // TODO: why isn't this always true? something fishy is going on Trees.convertToCoreLabels(tree); // index nodes, i.e., add start and end token positions to all nodes // this is needed by other annotators down stream, e.g., the NFLAnnotator tree.indexSpans(0); if (first) { sentence.set(TreeCoreAnnotations.TreeAnnotation.class, tree); if (verbose) { log.info("Tree is:"); tree.pennPrint(System.err); } setMissingTags(sentence, tree); if (buildGraphs) { // generate the dependency graph // unfortunately, it is necessary to make the // GrammaticalStructure three times, as the dependency // conversion changes the given data structure SemanticGraph deps = SemanticGraphFactory.generateCollapsedDependencies(gsf.newGrammaticalStructure(tree), extras); SemanticGraph uncollapsedDeps = SemanticGraphFactory.generateUncollapsedDependencies(gsf.newGrammaticalStructure(tree), extras); SemanticGraph ccDeps = SemanticGraphFactory.generateCCProcessedDependencies(gsf.newGrammaticalStructure(tree), extras); SemanticGraph enhancedDeps = SemanticGraphFactory.generateEnhancedDependencies(gsf.newGrammaticalStructure(tree)); SemanticGraph enhancedPlusPlusDeps = SemanticGraphFactory.generateEnhancedPlusPlusDependencies(gsf.newGrammaticalStructure(tree)); if (verbose) { log.info("SDs:"); log.info(deps.toString(SemanticGraph.OutputFormat.LIST)); } sentence.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, deps); sentence.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, uncollapsedDeps); sentence.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, ccDeps); sentence.set(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class, enhancedDeps); sentence.set(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class, enhancedPlusPlusDeps); } first = false; } } if (trees.size() > 1) { sentence.set(TreeCoreAnnotations.KBestTreesAnnotation.class, trees); } }
Example #9
Source File: ReplaceSubordinateRule.java From tint with GNU General Public License v3.0 | 4 votes |
@Override public String apply(Annotation annotation, Map<Integer, HashMultimap<Integer, Integer>> children) { InverseDigiMorph dm = new InverseDigiMorph(); int conj = 0; List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class); CoreMap sentence = sentences.get(0); // SemanticGraph semanticGraph = sentence .get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); IndexedWord node = semanticGraph.getNodeByIndex(conj + 1); List<IndexedWord> history = getHistory(semanticGraph, node); if (history.size() == 1) { return null; } IndexedWord verb = history.get(1); CoreLabel token = sentence.get(CoreAnnotations.TokensAnnotation.class).get(verb.index() - 1); String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class); if (!pos.startsWith("V")) { return null; } // todo: check subject in parse tree // todo: check clitics String morpho = token.get(DigiMorphAnnotations.MorphoAnnotation.class); String[] parts = morpho.split("\\s+"); TreeSet<String> persons = new TreeSet<>(); String tempo = null; for (int i = 1; i < parts.length; i++) { String[] vParts = parts[i].split("\\+"); if (!vParts[1].equals("v")) { continue; } String modo = vParts[2]; if (!modo.equals("cong")) { continue; } tempo = vParts[3]; persons.add(vParts[5] + "+" + vParts[6]); } IndexedWord next = null; if (persons.size() != 1) { for (int i = 2; i < history.size(); i++) { if (history.get(i).get(CoreAnnotations.PartOfSpeechAnnotation.class).startsWith("V")) { next = history.get(i); break; } } persons = getPersons(semanticGraph, next, sentence); } StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append(token.lemma()); stringBuffer.append("+v+indic+").append(tempo); stringBuffer.append("+nil+"); // Add person stringBuffer.append(persons.last()); String find = stringBuffer.toString(); System.out.println(find); String inverseMorphology = dm.getInverseMorphology(find); System.out.println(inverseMorphology); System.out.println(morpho); System.out.println(tempo); System.out.println(persons); // System.out.println(annotation.get(UDPipeAnnotations.UDPipeOriginalAnnotation.class)); // System.out.println(sentence.get(CoreAnnotations.TokensAnnotation.class).get(2) // .get(UDPipeAnnotations.FeaturesAnnotation.class)); // System.out.println(token // .get(UDPipeAnnotations.FeaturesAnnotation.class)); // // System.out.println(children.get(0).get(verb.index())); // System.out.println(children); // System.out.println(verb.get(UDPipeAnnotations.FeaturesAnnotation.class)); // try { // System.out.println(JSONOutputter.jsonPrint(annotation)); // } catch (IOException e) { // e.printStackTrace(); // } // System.out.println(getHistory(semanticGraph, node)); // System.out.println(semanticGraph.getOutEdgesSorted(node)); // System.out.println(semanticGraph.getIncomingEdgesSorted(node)); // System.out.println(node); return null; }
Example #10
Source File: Simplifier.java From tint with GNU General Public License v3.0 | 4 votes |
public static void main(String[] args) { String sentenceText; sentenceText = "Per gli interventi di seguito descritti, la cui autorizzazione può risultare di competenza dei Comuni o delle CTC in relazione alla tipologia ed alla localizzazione dell'intervento, si indicano i seguenti elaborati, precisando che essi sono orientativi e che comunque devono mostrare chiaramente dove si interviene e come si interviene."; sentenceText = "Il mondo, precisando che si tratta della Terra, è molto bello."; sentenceText = "In particolare, andranno rilevati e descritti tutti gli elementi di criticità paesaggistica, insiti nel progetto, e andranno messi in relazione a quanto è stato operato, per eliminare o mitigare tali criticità (impatti), garantendo così un migliore inserimento paesaggistico dell'intervento."; sentenceText = "In funzione della tipologia dell'opera, oggetto di richiesta di autorizzazione, sono previste due forme diverse di relazione paesaggistica, denominate rispettivamente:"; sentenceText = "Sebbene non sappia l'inglese, si è fatto capire dai turisti."; // sentenceText = "Io cancello il gesso dalla lavagna."; try { TintPipeline pipeline = new TintPipeline(); pipeline.loadDefaultProperties(); pipeline.setProperty("annotators", "ita_toksent, udpipe, ita_morpho, ita_lemma, ita_comp_morpho"); pipeline.setProperty("customAnnotatorClass.udpipe", "eu.fbk.dh.fcw.udpipe.api.UDPipeAnnotator"); pipeline.setProperty("customAnnotatorClass.ita_comp_morpho", "eu.fbk.dh.tint.digimorph.annotator.DigiCompMorphAnnotator"); pipeline.setProperty("udpipe.server", "gardner"); pipeline.setProperty("udpipe.port", "50020"); pipeline.setProperty("udpipe.keepOriginal", "1"); pipeline.load(); Annotation annotation = pipeline.runRaw(sentenceText); System.out.println(JSONOutputter.jsonPrint(annotation)); Map<Integer, HashMultimap<Integer, Integer>> children = new HashMap<>(); List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class); for (int sentIndex = 0; sentIndex < sentences.size(); sentIndex++) { CoreMap sentence = sentences.get(sentIndex); children.put(sentIndex, HashMultimap.create()); SemanticGraph semanticGraph = sentence .get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); Collection<IndexedWord> rootNodes = semanticGraph.getRoots(); if (rootNodes.isEmpty()) { continue; } for (IndexedWord root : rootNodes) { Set<Integer> stack = new HashSet<>(); Set<IndexedWord> used = new HashSet<>(); addChildren(children.get(sentIndex), stack, root, semanticGraph, used); } } SimplificationRule rule; String output; rule = new ReplaceSubordinateRule(); output = rule.apply(annotation, children); System.out.println(output); // rule = new DenominatiSplittingRule(); // output = rule.apply(annotation, children); // // System.out.println(output); // // rule = new GarantendoSplittingRule(); // output = rule.apply(annotation, children); // // System.out.println(output); // // rule = new GarantendoSplittingRule(); // output = rule.apply(annotation, children); // // System.out.println(output); // } catch (Exception e) { e.printStackTrace(); } }
Example #11
Source File: CoreNLP.java From Shour with MIT License | 4 votes |
public static SemanticGraph getGraph(String sentence) { return getOneSentence(sentence).get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); }
Example #12
Source File: CoreNLP.java From Shour with MIT License | 4 votes |
public static SemanticGraph getDependency(String sentence) { return getOneSentence(sentence).get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); }