org.openrdf.model.impl.LinkedHashModel Java Examples
The following examples show how to use
org.openrdf.model.impl.LinkedHashModel.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 6 votes |
/** * Sets up a simple data set on the server. * * @throws Exception */ protected void setupDataOnServer() throws Exception { final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); final URI person = new URIImpl(BD.NAMESPACE + "Person"); final URI likes = new URIImpl(BD.NAMESPACE + "likes"); final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); final Literal label1 = new LiteralImpl("Mike"); final Literal label2 = new LiteralImpl("Bryan"); { final Graph g = new LinkedHashModel(); g.add(mike, RDF.TYPE, person); g.add(mike, likes, rdf); g.add(mike, RDFS.LABEL, label1); g.add(bryan, RDF.TYPE, person); g.add(bryan, likes, rdfs); g.add(bryan, RDFS.LABEL, label2); m_repo.add(new AddOp(g)); } }
Example #2
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 5 votes |
protected void doConstructTest(final String method, final RDFFormat format) throws Exception { setupDataOnServer(); final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); final URI person = new URIImpl(BD.NAMESPACE + "Person"); // The expected results. final Graph expected = new LinkedHashModel(); { // expected.add(new StatementImpl(mike, likes, rdf)); expected.add(new StatementImpl(mike, RDF.TYPE, person)); expected.add(new StatementImpl(bryan, RDF.TYPE, person)); // expected.add(new StatementImpl(mike, RDFS.LABEL, label1)); } // Run the query and verify the results. { final String queryStr = "prefix bd: <"+BD.NAMESPACE+"> " +// "prefix rdf: <"+RDF.NAMESPACE+"> " +// "prefix rdfs: <"+RDFS.NAMESPACE+"> " +// "CONSTRUCT { ?x rdf:type bd:Person }" +// "WHERE { " +// " ?x rdf:type bd:Person . " +// // " ?x bd:likes bd:RDF " +// "}"; final IPreparedGraphQuery query = m_repo.prepareGraphQuery(queryStr); // final Graph actual = asGraph(query.evaluate()); assertSameGraph(expected, query); } }
Example #3
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 5 votes |
/** * Sets up a simple data set on the server. * @throws Exception */ protected void setupQuadsDataOnServer() throws Exception { final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); final URI person = new URIImpl(BD.NAMESPACE + "Person"); final URI likes = new URIImpl(BD.NAMESPACE + "likes"); final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); final URI c1 = new URIImpl(BD.NAMESPACE + "c1"); final URI c2 = new URIImpl(BD.NAMESPACE + "c2"); final URI c3 = new URIImpl(BD.NAMESPACE + "c3"); final Literal label1 = new LiteralImpl("Mike"); final Literal label2 = new LiteralImpl("Bryan"); { final Graph g = new LinkedHashModel(); g.add(mike, RDF.TYPE, person, c1, c2, c3); g.add(mike, likes, rdf, c1, c2, c3); g.add(mike, RDFS.LABEL, label1, c1, c2, c3); g.add(bryan, RDF.TYPE, person, c1, c2, c3); g.add(bryan, likes, rdfs, c1, c2, c3); g.add(bryan, RDFS.LABEL, label2, c1, c2, c3); m_repo.add(new AddOp(g)); } }
Example #4
Source File: TripleStoreBlazegraph.java From powsybl-core with Mozilla Public License 2.0 | 5 votes |
private void write(DataSource ds, RepositoryConnection conn, Resource context) throws RepositoryException { LOG.info("Writing context {}", context); RepositoryResult<Statement> statements = conn.getStatements(null, null, null, true, context); Model model = new LinkedHashModel(); QueryResults.addAll(statements, model); copyNamespacesToModel(conn, model); String outname = context.toString(); write(model, outputStream(ds, outname)); }
Example #5
Source File: TestBigdataSailRemoteRepository.java From database with GNU General Public License v2.0 | 5 votes |
/** * Test of insert and retrieval of a large literal. */ public void test_INSERT_veryLargeLiteral() throws Exception { final Graph g = new LinkedHashModel(); final URI s = new URIImpl("http://www.bigdata.com/"); final URI p = RDFS.LABEL; final Literal o = getVeryLargeLiteral(); final Statement stmt = new StatementImpl(s, p, o); g.add(stmt); // Load the resource into the KB. assertEquals( 1L, doInsertByBody("POST", RDFFormat.RDFXML, g, null/* defaultContext */)); // Read back the data into a graph. final Graph g2; { final String queryStr = "DESCRIBE <" + s.stringValue() + ">"; final GraphQuery query = cxn.prepareGraphQuery(QueryLanguage.SPARQL, queryStr); g2 = asGraph(query.evaluate()); } assertEquals(1, g2.size()); assertTrue(g2.match(s, p, o).hasNext()); }
Example #6
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 4 votes |
/** * Preferred version executes the {@link IPreparedGraphQuery} and ensures * that the {@link GraphQueryResult} is closed. * * @param preparedQuery * The prepared query. * * @return The resulting graph. * * @throws Exception */ static protected Graph asGraph(final IPreparedGraphQuery preparedQuery) throws Exception { final GraphQueryResult result = preparedQuery.evaluate(); try { final Graph g = new LinkedHashModel(); while (result.hasNext()) { g.add(result.next()); } return g; } finally { result.close(); } }
Example #7
Source File: LinkedDataServletTest.java From cumulusrdf with Apache License 2.0 | 4 votes |
@Test public void post() throws IOException, CumulusStoreException, RDFHandlerException, ServletException { for (int i = 0; i < TRIPLES_NT.size(); i++) { for (String mime_type : MimeTypes.RDF_SERIALIZATIONS) { /* * clear data ... */ TRIPLE_STORE.clear(); assertTrue("store should be empty", !TRIPLE_STORE.query(new Value[] { null, null, null }).hasNext()); /* * prepare data in desired RDF serialization */ Model model = new LinkedHashModel(parseNX(TRIPLES_NT.get(i))); final ByteArrayOutputStream out = new ByteArrayOutputStream(); Rio.write(model, out, RDFFormat.forMIMEType(mime_type)); /* * prepare mock ... */ when(_request.getHeader(Headers.CONTENT_TYPE)).thenReturn(mime_type); when(_request.getInputStream()).thenReturn(new ServletInputStream() { final InputStream _inputStream = new ByteArrayInputStream(out.toByteArray()); @Override public int read() throws IOException { return _inputStream.read(); } }); /* * HTTP POST */ _ld_servlet.doPost(_request, _response); /* * verify the HTTP POST ... */ verify(_response, atLeastOnce()).setStatus(HttpServletResponse.SC_CREATED); for (Statement stmt : model) { assertTrue("statement '" + stmt + "' has not been been added correctly for serialization '" + mime_type + "'", TRIPLE_STORE.query(Util.toValueArray(stmt)) .hasNext()); } } } }
Example #8
Source File: QueryServlet.java From database with GNU General Public License v2.0 | 4 votes |
/** * Generate a SPARQL 1.1 Service Description for the addressed triple store * or quad store. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/500 */ private void doServiceDescription(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { /** * Protect the entire operation with a transaction. * * @see <a href="http://trac.blazegraph.com/ticket/867"> NSS concurrency * problem with list namespaces and create namespace </a> */ final long tx = getBigdataRDFContext().newTx(getTimestamp(req)); try { final AbstractTripleStore tripleStore = getBigdataRDFContext() .getTripleStore(getNamespace(req), tx); if (tripleStore == null) { /* * There is no such triple/quad store instance. */ buildAndCommitNamespaceNotFoundResponse(req, resp); return; } // The serviceURIs for this graph. final String[] serviceURI = BigdataServlet.getServiceURIs( getServletContext(), req); /* * TODO Resolve the SD class name and ctor via a configuration * property for extensible descriptions. */ final Graph g = new LinkedHashModel(); { final SD sd = new SD(g, tripleStore, serviceURI); final SparqlEndpointConfig config = getBigdataRDFContext() .getConfig(); sd.describeService(true/* describeStatistics */, config.describeEachNamedGraph); } sendGraph(req, resp, g); } catch (Throwable t) { launderThrowable(t, resp, ""); } finally { getBigdataRDFContext().abortTx(tx); } }
Example #9
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 4 votes |
/** * Inserts some data into the KB and then issues a DESCRIBE query against * the REST API and verifies the expected results. * * @param format * The format is used to specify the Accept header. * * @throws Exception */ protected void doDescribeTest(final String method, final RDFFormat format) throws Exception { final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); final URI person = new URIImpl(BD.NAMESPACE + "Person"); final URI likes = new URIImpl(BD.NAMESPACE + "likes"); final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); final Literal label1 = new LiteralImpl("Mike"); final Literal label2 = new LiteralImpl("Bryan"); { final Graph g = new LinkedHashModel(); g.add(mike, RDF.TYPE, person); g.add(mike, likes, rdf); g.add(mike, RDFS.LABEL, label1); g.add(bryan, RDF.TYPE, person); g.add(bryan, likes, rdfs); g.add(bryan, RDFS.LABEL, label2); m_repo.add(new AddOp(g)); } // The expected results. final Graph expected = new LinkedHashModel(); { expected.add(new StatementImpl(mike, likes, rdf)); expected.add(new StatementImpl(mike, RDF.TYPE, person)); expected.add(new StatementImpl(mike, RDFS.LABEL, label1)); } // Run the query and verify the results. { final String queryStr = "prefix bd: <"+BD.NAMESPACE+"> " +// "prefix rdf: <"+RDF.NAMESPACE+"> " +// "prefix rdfs: <"+RDFS.NAMESPACE+"> " +// "DESCRIBE ?x " +// "WHERE { " +// " ?x rdf:type bd:Person . " +// " ?x bd:likes bd:RDF " +// "}"; assertSameGraph(expected, m_repo.prepareGraphQuery(queryStr)); } }
Example #10
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 4 votes |
protected Graph genNTRIPLES2(final int ntriples) throws RDFHandlerException { final Graph g = new LinkedHashModel(); final ValueFactory f = new ValueFactoryImpl(); final URI s = f.createURI("http://www.bigdata.org/b"); final URI rdfType = f .createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"); for (int i = 0; i < ntriples; i++) { final URI o = f.createURI("http://www.bigdata.org/c#" + i); g.add(s, rdfType, o); } return g; }
Example #11
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 4 votes |
/** * @deprecated by {@link #asGraph(IPreparedGraphQuery)} which can ensure that * the {@link GraphQueryResult} is closed. */ static protected Graph asGraph(final GraphQueryResult result) throws Exception { try { final Graph g = new LinkedHashModel(); while (result.hasNext()) { g.add(result.next()); } return g; } finally { result.close(); } }
Example #12
Source File: BigdataSailRemoteRepositoryConnection.java From database with GNU General Public License v2.0 | 4 votes |
@Override public <E extends Exception> void remove( final Iteration<? extends Statement, E> stmts, final Resource... c) throws RepositoryException, E { final Graph g = new LinkedHashModel(); while (stmts.hasNext()) { g.add(stmts.next()); } remove(g, c); }
Example #13
Source File: BigdataSailRemoteRepositoryConnection.java From database with GNU General Public License v2.0 | 4 votes |
@Override public <E extends Exception> void add( final Iteration<? extends Statement, E> stmts, final Resource... c) throws RepositoryException, E { final Graph g = new LinkedHashModel(); while (stmts.hasNext()) { g.add(stmts.next()); } add(g, c); }
Example #14
Source File: SparqlEvaluator.java From anno4j with Apache License 2.0 | 4 votes |
public Model asModel() throws OpenRDFException { GraphQuery qry = prepareGraphQuery(); Model model = new LinkedHashModel(); qry.evaluate(new StatementCollector(model)); return model; }
Example #15
Source File: OntologyLoader.java From anno4j with Apache License 2.0 | 4 votes |
public OntologyLoader() { this(new LinkedHashModel()); }
Example #16
Source File: RDFDataSource.java From anno4j with Apache License 2.0 | 4 votes |
public Model match(Value subj, URI pred, Value obj, URI graph) { if (subj == null || subj instanceof Resource) return new LinkedHashModel(model.filter((Resource) subj, pred, obj, graph)); return new LinkedHashModel(); }
Example #17
Source File: RDFDataSource.java From anno4j with Apache License 2.0 | 4 votes |
public Model match(Value subj, URI pred, Value obj) { if (subj == null || subj instanceof Resource) return new LinkedHashModel(model.filter((Resource) subj, pred, obj)); return new LinkedHashModel(); }
Example #18
Source File: BigdataSailRemoteRepositoryConnection.java From database with GNU General Public License v2.0 | 3 votes |
/** * <strong>single statement updates not recommended for performance * reasons</strong>. Remember, batch is beautiful. * <p> * {@inheritDoc} */ @Override public void remove(final Statement stmt, final Resource... c) throws RepositoryException { // log.warn("single statement updates not recommended"); final Graph g = new LinkedHashModel(); g.add(stmt); remove(g, c); }
Example #19
Source File: BigdataSailRemoteRepositoryConnection.java From database with GNU General Public License v2.0 | 3 votes |
/** * <strong>single statement updates not recommended for performance * reasons</strong>. Remember, batch is beautiful. * <p> * {@inheritDoc} */ @Override public void add(final Statement stmt, final Resource... c) throws RepositoryException { // log.warn("single statement updates not recommended"); final Graph g = new LinkedHashModel(); g.add(stmt); add(g, c); }
Example #20
Source File: RemoteRepositoryBase.java From database with GNU General Public License v2.0 | 3 votes |
/** * Utility method to turn a {@link GraphQueryResult} into a {@link Graph}. * * @param result * The {@link GraphQueryResult}. * * @return The {@link Graph}. * * @throws Exception */ static public Graph asGraph(final GraphQueryResult result) throws Exception { final Graph g = new LinkedHashModel(); while (result.hasNext()) { g.add(result.next()); } return g; }
Example #21
Source File: AbstractTestNanoSparqlClient.java From database with GNU General Public License v2.0 | 2 votes |
protected static Graph readGraphFromFile(final File file) throws RDFParseException, RDFHandlerException, IOException { final RDFFormat format = RDFFormat.forFileName(file.getName()); final RDFParserFactory rdfParserFactory = RDFParserRegistry .getInstance().get(format); if (rdfParserFactory == null) { throw new RuntimeException("Parser not found: file=" + file + ", format=" + format); } final RDFParser rdfParser = rdfParserFactory .getParser(); rdfParser.setValueFactory(new ValueFactoryImpl()); rdfParser.setVerifyData(true); rdfParser.setStopAtFirstError(true); rdfParser .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); final StatementCollector rdfHandler = new StatementCollector(); rdfParser.setRDFHandler(rdfHandler); /* * Run the parser, which will cause statements to be * inserted. */ final FileReader r = new FileReader(file); try { rdfParser.parse(r, file.toURI().toString()/* baseURL */); } finally { r.close(); } final Graph g = new LinkedHashModel(); g.addAll(rdfHandler.getStatements()); return g; }
Example #22
Source File: WorkbenchServlet.java From database with GNU General Public License v2.0 | 2 votes |
/** * Convert RDF data from one format to another. */ private void doConvert(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { final String baseURI = req.getRequestURL().toString(); // The content type of the request. final String contentType = req.getContentType(); if (log.isInfoEnabled()) log.info("Request body: " + contentType); /** * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> * UpdateServlet fails to parse MIMEType when doing conneg. </a> */ final RDFFormat requestBodyFormat = RDFFormat.forMIMEType(new MiniMime( contentType).getMimeType()); if (requestBodyFormat == null) { buildAndCommitResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, "Content-Type not recognized as RDF: " + contentType); return; } final RDFParserFactory rdfParserFactory = RDFParserRegistry .getInstance().get(requestBodyFormat); if (rdfParserFactory == null) { buildAndCommitResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, "Parser factory not found: Content-Type=" + contentType + ", format=" + requestBodyFormat); return; } // final String s= IOUtil.readString(req.getInputStream()); // System.err.println(s); final Graph g = new LinkedHashModel(); try { /* * There is a request body, so let's try and parse it. */ final RDFParser rdfParser = rdfParserFactory .getParser(); rdfParser.setValueFactory(new ValueFactoryImpl()); rdfParser.setVerifyData(true); rdfParser.setStopAtFirstError(true); rdfParser .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); rdfParser.setRDFHandler(new StatementCollector(g)); /* * Run the parser, which will cause statements to be * inserted. */ rdfParser.parse(req.getInputStream(), baseURI); /* * Send back the graph using CONNEG to decide the MIME Type of the * response. */ sendGraph(req, resp, g); } catch (Throwable t) { BigdataRDFServlet.launderThrowable(t, resp, null); } }