org.apache.jena.update.UpdateRequest Java Examples
The following examples show how to use
org.apache.jena.update.UpdateRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TriplestoreResourceService.java From trellis with Apache License 2.0 | 6 votes |
/** * This code is equivalent to the SPARQL query below. * * <p><pre><code> * WITH trellis:PreferServerManaged * DELETE { IDENTIFIER dc:modified ?time } * INSERT { IDENTIFIER dc:modified TIME } * WHERE { IDENTIFIER dc:modified ?time } . * </code></pre></p> */ private UpdateRequest buildUpdateModificationRequest(final IRI identifier, final Literal time) { final UpdateRequest req = new UpdateRequest(); final Var modified = Var.alloc(MODIFIED); final UpdateDeleteInsert modify = new UpdateDeleteInsert(); modify.setWithIRI(toJena(PreferServerManaged)); modify.getDeleteAcc().addTriple(triple(toJena(identifier), toJena(DC.modified), modified)); modify.getInsertAcc().addTriple(triple(toJena(identifier), toJena(DC.modified), toJena(time))); final ElementGroup eg = new ElementGroup(); final ElementPathBlock epb = new ElementPathBlock(); epb.addTriple(triple(toJena(identifier), toJena(DC.modified), modified)); eg.addElement(epb); modify.setElement(eg); req.add(modify); return req; }
Example #2
Source File: TriplestoreResourceServiceTest.java From trellis with Apache License 2.0 | 6 votes |
@Test void testRDFConnectionError() { final TriplestoreResourceService svc = new TriplestoreResourceService(mockRdfConnection); svc.initialize(); doThrow(new RuntimeException("Expected exception")).when(mockRdfConnection).update(any(UpdateRequest.class)); doThrow(new RuntimeException("Expected exception")).when(mockRdfConnection) .loadDataset(any(org.apache.jena.query.Dataset.class)); assertThrows(ExecutionException.class, () -> svc.create(builder(resource).interactionModel(LDP.RDFSource).container(root).build(), rdf.createDataset()).toCompletableFuture().get(), "No (create) exception with dropped backend connection!"); assertThrows(ExecutionException.class, () -> svc.add(resource, rdf.createDataset()).toCompletableFuture().get(), "No (add) exception with dropped backend connection!"); assertThrows(ExecutionException.class, () -> svc.delete(builder(resource).interactionModel(LDP.RDFSource).container(root).build()) .toCompletableFuture().get(), "No (delete) exception with dropped backend connection!"); assertThrows(ExecutionException.class, () -> svc.touch(resource).toCompletableFuture().get(), "No (touch) exception with dropped backend connection!"); }
Example #3
Source File: DeltaEx03_FusekiLogChanges.java From rdf-delta with Apache License 2.0 | 6 votes |
public static void main2(String ...args) { int PORT = 2020; DatasetGraph dsgBase = DatasetGraphFactory.createTxnMem(); RDFChanges changeLog = RDFPatchOps.textWriter(System.out); DatasetGraph dsg = RDFPatchOps.changes(dsgBase, changeLog); // Create a server with the changes-enables dataset. // Plain server. No other registration necessary. FusekiServer server = FusekiServer.create() .port(PORT) .add("/ds", dsg) .build(); server.start(); RDFConnection conn = RDFConnectionFactory.connect("http://localhost:"+PORT+"/ds"); UpdateRequest update = UpdateFactory.create("PREFIX : <http://example/> INSERT DATA { :s :p 123 }"); // Note - no prefix in changes. The SPARQL Update prefix is not a chnage to the dataset prefixes. conn.update(update); server.stop(); // // Server in the background so explicitly exit. // System.exit(0); }
Example #4
Source File: UpdateExecuteOperations.java From xcurator with Apache License 2.0 | 6 votes |
public static void ex3(Dataset dataset) { // Build up the request then execute it. // This is the preferred way for complex sequences of operations. UpdateRequest request = UpdateFactory.create() ; request.add("DROP ALL") .add("CREATE GRAPH <http://example/g2>") ; // Different style. // Equivalent to request.add("...") UpdateFactory.parse(request, "LOAD <file:etc/update-data.ttl> INTO GRAPH <http://example/g2>") ; // And perform the operations. UpdateAction.execute(request, dataset) ; System.out.println("# Debug format"); SSE.write(dataset) ; System.out.println(); System.out.println("# N-Quads: S P O G") ; RDFDataMgr.write(System.out, dataset, Lang.NQUADS) ; }
Example #5
Source File: SparqlService.java From SDA with BSD 2-Clause "Simplified" License | 5 votes |
private void runModifySparql(String sparql, String[] idxVals) throws Exception { String updateService = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.sparql.endpoint") + "/update"; String madeQl = makeSparql(sparql, idxVals); UpdateRequest ur = UpdateFactory.create(madeQl); UpdateProcessor up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); }
Example #6
Source File: SparqlService.java From SDA with BSD 2-Clause "Simplified" License | 5 votes |
private void runModifySparql(String sparql, String[] idxVals) throws Exception { String updateService = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.sparql.endpoint") + "/update"; String madeQl = makeSparql(sparql, idxVals); UpdateRequest ur = UpdateFactory.create(madeQl); UpdateProcessor up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); }
Example #7
Source File: ARQFactory.java From shacl with Apache License 2.0 | 5 votes |
public UpdateRequest createUpdateRequest(String parsableString) { UpdateRequest result = string2Update.get(parsableString); if(result == null) { result = UpdateFactoryFilter.get().create(parsableString); if(useCaches) { string2Update.put(parsableString, result); } } return result; }
Example #8
Source File: UPDATEWorker.java From IGUANA with GNU Affero General Public License v3.0 | 5 votes |
@Override public void executeQuery(String query, String queryID) { UpdateRequest update = UpdateFactory.create(query); // Set update timeout RequestConfig requestConfig = RequestConfig.custom().setConnectionRequestTimeout(this.timeOut.intValue()) .setConnectTimeout(this.timeOut.intValue()).setSocketTimeout(this.timeOut.intValue()).build(); CloseableHttpClient client = HttpClientBuilder.create().setDefaultRequestConfig(requestConfig).build(); // create Update Processor and use timeout config UpdateProcessor exec = UpdateExecutionFactory.createRemote(update, service, client); setCredentials(exec); Instant start = Instant.now(); try { // Execute Update exec.execute(); double duration = durationInMilliseconds(start, Instant.now()); LOGGER.debug("Worker[{{}} : {{}}]: Update with ID {{}} took {{}}.", this.workerType, this.workerID, queryID, duration); // Return time super.addResults(new QueryExecutionStats(queryID, COMMON.QUERY_SUCCESS, duration)); return; } catch (Exception e) { LOGGER.warn("Worker[{{}} : {{}}]: Could not execute the following update\n{{}}\n due to", this.workerType, this.workerID, query, e); } // Exception was thrown, return error //return -1L; super.addResults(new QueryExecutionStats(queryID, COMMON.QUERY_UNKNOWN_EXCEPTION, durationInMilliseconds(start, Instant.now()))); }
Example #9
Source File: TriplestoreResourceService.java From trellis with Apache License 2.0 | 4 votes |
/** * This is equivalent to the SPARQL below. * * <p><pre><code> * DELETE WHERE { GRAPH IDENTIFIER { ?s ?p ?o } }; * DELETE WHERE { GRAPH IDENTIFIER?ext=acl { ?s ?p ?o } }; * DELETE WHERE { GRAPH trellis:PreferServerManaged { * IDENTIFIER a ldp:NonRDFSource . * IDENTIFIER dc:hasPart ?s . * ?s ?p ?o . * }; * DELETE WHERE { GRAPH trellis:PreferServerManaged { IDENTIFIER ?p ?o } }; * INSERT DATA { * GRAPH IDENTIFIER { ... } * GRAPH IDENTIFIER?ext=acl { ... } * GRAPH trellis:PreferServerManaged { ... } * GRAPH IDENTIFIER?ext=audit { ... } * } * </code></pre></p> */ private UpdateRequest buildUpdateRequest(final IRI identifier, final Literal time, final Dataset dataset, final OperationType operation) { // Set the time dataset.add(PreferServerManaged, identifier, DC.modified, time); final UpdateRequest req = new UpdateRequest(); req.add(new UpdateDeleteWhere(new QuadAcc(singletonList(new Quad(toJena(identifier), SUBJECT, PREDICATE, OBJECT))))); extensions.forEach((ext, graph) -> req.add(new UpdateDeleteWhere(new QuadAcc(singletonList(new Quad( getExtIRI(identifier, ext), SUBJECT, PREDICATE, OBJECT)))))); req.add(new UpdateDeleteWhere(new QuadAcc(asList( new Quad(toJena(PreferServerManaged), toJena(identifier), toJena(type), toJena(LDP.NonRDFSource)), new Quad(toJena(PreferServerManaged), toJena(identifier), toJena(DC.hasPart), SUBJECT), new Quad(toJena(PreferServerManaged), SUBJECT, PREDICATE, OBJECT))))); req.add(new UpdateDeleteWhere(new QuadAcc(singletonList(new Quad(toJena(PreferServerManaged), toJena(identifier), PREDICATE, OBJECT))))); final QuadDataAcc sink = new QuadDataAcc(synchronizedList(new ArrayList<>())); if (operation == OperationType.DELETE) { dataset.stream().filter(q -> q.getGraphName().filter(PreferServerManaged::equals).isPresent()) .map(JenaCommonsRDF::toJena).forEach(sink::addQuad); } else { dataset.stream().filter(q -> q.getGraphName().filter(PreferServerManaged::equals).isPresent()) .map(JenaCommonsRDF::toJena).forEach(sink::addQuad); dataset.getGraph(PreferUserManaged).ifPresent(g -> g.stream() .map(t -> new Quad(toJena(identifier), toJena(t))).forEach(sink::addQuad)); dataset.getGraph(PreferAudit).ifPresent(g -> g.stream() .map(t -> new Quad(getExtIRI(identifier, "audit"), toJena(t))).forEach(sink::addQuad)); extensions.forEach((ext, graph) -> dataset.getGraph(graph).ifPresent(g -> g.stream() .map(t -> new Quad(getExtIRI(identifier, ext), toJena(t))).forEach(sink::addQuad))); } req.add(new UpdateDataInsert(sink)); return req; }
Example #10
Source File: TriplestoreResourceService.java From trellis with Apache License 2.0 | 4 votes |
/** * This code is equivalent to the SPARQL queries below. * * <pre><code> * SELECT ?object WHERE { * GRAPH trellis:PreferServerManaged { IDENTIFIER rdf:type ?object } * } * </code></pre> * * <pre><code> * INSERT DATA { * GRAPH trellis:PreferServerManaged { * IDENTIFIER rdf:type ldp:Container ; * dc:modified "NOW"^^xsd:dateTime } * GRAPH IDENTIFIER?ext=audit { * IDENTIFIER prov:wasGeneratedBy [ * rdf:type prov:Activity , as:Create ; * prov:wasAssociatedWith trellis:AdministorAgent ; * prov:atTime "TIME"^^xsd:dateTime ] } * GRAPH IDENTIFIER?ext=acl { * IDENTIFIER acl:mode acl.Read , acl:Write , acl:Control ; * acl:agentClass foaf:Agent ; * acl:accessTo IDENTIFIER } * } * * </code></pre> */ @PostConstruct public void initialize() { final IRI root = rdf.createIRI(TRELLIS_DATA_PREFIX); final Query q = new Query(); q.setQuerySelectType(); q.addResultVar(OBJECT); final ElementPathBlock epb = new ElementPathBlock(); epb.addTriple(triple(toJena(root), toJena(type), OBJECT)); final ElementNamedGraph ng = new ElementNamedGraph(toJena(PreferServerManaged), epb); final ElementGroup elg = new ElementGroup(); elg.addElement(ng); q.setQueryPattern(elg); final Stream.Builder<RDFTerm> builder = builder(); rdfConnection.querySelect(q, qs -> builder.accept(getObject(qs))); if (!builder.build().findFirst().isPresent()) { final Literal time = rdf.createLiteral(now().toString(), XSD.dateTime); final IRI auth = rdf.createIRI(TRELLIS_DATA_PREFIX + "#auth"); final UpdateRequest update = new UpdateRequest(); final QuadDataAcc sink = new QuadDataAcc(); sink.addQuad(new Quad(toJena(PreferServerManaged), triple(toJena(root), toJena(type), toJena(LDP.BasicContainer)))); sink.addQuad(new Quad(toJena(PreferServerManaged), triple(toJena(root), toJena(DC.modified), toJena(time)))); sink.addQuad(new Quad(getExtIRI(root, ACL_EXT), triple(toJena(auth), toJena(ACL.mode), toJena(ACL.Read)))); sink.addQuad(new Quad(getExtIRI(root, ACL_EXT), triple(toJena(auth), toJena(ACL.mode), toJena(ACL.Write)))); sink.addQuad(new Quad(getExtIRI(root, ACL_EXT), triple(toJena(auth), toJena(ACL.mode), toJena(ACL.Control)))); sink.addQuad(new Quad(getExtIRI(root, ACL_EXT), triple(toJena(auth), toJena(ACL.agentClass), toJena(FOAF.Agent)))); sink.addQuad(new Quad(getExtIRI(root, ACL_EXT), triple(toJena(auth), toJena(ACL.accessTo), toJena(root)))); update.add(new UpdateDataInsert(sink)); rdfConnection.update(update); } LOGGER.info("Initialized Trellis Triplestore Resource Service"); }
Example #11
Source File: SparqlFusekiQueryImpl.java From SDA with BSD 2-Clause "Simplified" License | 4 votes |
/** * update쿼리수행(sparql만 해당됨) * @param sparql * @param idxVals * @param dest * @throws Exception * @return void */ public void runModifySparql(String sparql, String[] idxVals, String dest) throws Exception { String madeQl = makeFinal(sparql, idxVals); if(dest.equals("ALL") || dest.equals("DW")) { String updateService = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.dw.sparql.endpoint") + "/update"; //String madeQl = makeFinal(sparql, idxVals); UpdateRequest ur = UpdateFactory.create(madeQl); UpdateProcessor up; log.debug("runModifySparql() on DatWarehouse server start.................................. "); try { log.debug("try (first).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception e) { int waitTime = 15*1000; log.debug("Exception message in runModifySparql() =====> "+e.getMessage()); try { // 일정시간 대기 했다가 다시 수행함 log.debug("sleeping.(first).................................. in "+waitTime); Thread.sleep(waitTime); log.debug("try (second).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception ee) { log.debug("Exception 1====>"+ee.getMessage()); waitTime = 30*1000; if(ee.getMessage().contains("Service Unavailable") || ee.getMessage().contains("java.net.ConnectException") ) { try { // restart fuseki Utils.restartFuseki(); // 일정시간을 대기 한다. log.debug("sleeping (final)................................. in "+waitTime); Thread.sleep(waitTime); // 마지막으로 다시한번 시도한다. log.debug("try (final).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception eee) { log.debug("Exception 2====>"+eee.getMessage()); throw eee; } } throw ee; } // 두번째 try } // 첫번째 try log.debug("runModifySparql() on DataWarehouse server end.................................. "); } if(dest.equals("ALL") || dest.equals("DM")) { //동일한(delete or insert) sparql를 DM서버에도 수행함(최근값 혹은 추론결과, subscription값등을 등록한다.) log.debug("runModifySparql() on DataMart server start.................................. "); String updateService2 = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.dm.sparql.endpoint") + "/update"; UpdateRequest ur2 = UpdateFactory.create(madeQl); UpdateProcessor up2 = UpdateExecutionFactory.createRemote(ur2, updateService2); up2.execute(); log.debug("runModifySparql() on DataMart server end.................................. "); } }
Example #12
Source File: SparqlFusekiQueryImpl.java From SDA with BSD 2-Clause "Simplified" License | 4 votes |
/** * update쿼리수행(sparql만 해당됨) * @param sparql * @param idxVals * @param dest * @throws Exception * @return void */ public void runModifySparql(String sparql, String[] idxVals, String dest) throws Exception { String madeQl = makeFinal(sparql, idxVals); if(dest.equals("ALL") || dest.equals("DW")) { String updateService = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.dw.sparql.endpoint") + "/update"; //String madeQl = makeFinal(sparql, idxVals); UpdateRequest ur = UpdateFactory.create(madeQl); UpdateProcessor up; log.debug("runModifySparql() on DatWarehouse server start.................................. "); try { log.debug("try (first).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception e) { int waitTime = 15*1000; log.debug("Exception message in runModifySparql() =====> "+e.getMessage()); try { // 일정시간 대기 했다가 다시 수행함 log.debug("sleeping.(first).................................. in "+waitTime); Thread.sleep(waitTime); log.debug("try (second).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception ee) { log.debug("Exception 1====>"+ee.getMessage()); waitTime = 30*1000; if(ee.getMessage().contains("Service Unavailable") || ee.getMessage().contains("java.net.ConnectException") ) { try { // restart fuseki Utils.restartFuseki(); // 일정시간을 대기 한다. log.debug("sleeping (final)................................. in "+waitTime); Thread.sleep(waitTime); // 마지막으로 다시한번 시도한다. log.debug("try (final).................................. "); up = UpdateExecutionFactory.createRemote(ur, updateService); up.execute(); } catch (Exception eee) { log.debug("Exception 2====>"+eee.getMessage()); throw eee; } } throw ee; } // 두번째 try } // 첫번째 try log.debug("runModifySparql() on DataWarehouse server end.................................. "); } if(dest.equals("ALL") || dest.equals("DM")) { //동일한(delete or insert) sparql를 DM서버에도 수행함(최근값 혹은 추론결과, subscription값등을 등록한다.) log.debug("runModifySparql() on DataMart server start.................................. "); String updateService2 = Utils.getSdaProperty("com.pineone.icbms.sda.knowledgebase.dm.sparql.endpoint") + "/update"; UpdateRequest ur2 = UpdateFactory.create(madeQl); UpdateProcessor up2 = UpdateExecutionFactory.createRemote(ur2, updateService2); up2.execute(); log.debug("runModifySparql() on DataMart server end.................................. "); } }
Example #13
Source File: UpdateFactoryFilter.java From shacl with Apache License 2.0 | 4 votes |
public UpdateRequest create(String str) { analyzeRequest(str); return UpdateFactory.create(str, Syntax.syntaxARQ); }
Example #14
Source File: ExTDB_Txn2.java From xcurator with Apache License 2.0 | 4 votes |
public static void execUpdate(String sparqlUpdateString, GraphStore graphStore) { UpdateRequest request = UpdateFactory.create(sparqlUpdateString) ; UpdateProcessor proc = UpdateExecutionFactory.create(request, graphStore) ; proc.execute() ; }
Example #15
Source File: QueriedResource.java From Processor with Apache License 2.0 | 2 votes |
/** * Returns the SPARQL query that is used to retrieve RDF description of this resource. * * @return query builder */ public UpdateRequest getUpdate();
Example #16
Source File: ResourceBase.java From Processor with Apache License 2.0 | 2 votes |
/** * Returns update used to remove RDF description of this resource. * Query solution bindings are applied by default. * * @return update object with applied solution bindings * @see #getQuerySolutionMap() */ @Override public UpdateRequest getUpdate() { return update; }