From: <tho...@us...> - 2011-06-24 19:21:57
|
Revision: 4795 http://bigdata.svn.sourceforge.net/bigdata/?rev=4795&view=rev Author: thompsonbry Date: 2011-06-24 19:21:49 +0000 (Fri, 24 Jun 2011) Log Message: ----------- Bug fix where mutation servlet code paths did not do an explicit conn.rollback() on an error path. Added support for ACID PUT using DESCRIBE or CONSTRUCT query in combination with a request body containing RDF data. This closes [1]. [1] https://sourceforge.net/apps/trac/bigdata/ticket/332 (ACID PUT API for NSS) Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -776,8 +776,18 @@ * "DELETE WITH QUERY" where this method is used in a context which writes * onto an internal pipe rather than onto the {@link HttpServletResponse}. * + * @param namespace + * The namespace associated with the {@link AbstractTripleStore} + * view. + * @param timestamp + * The timestamp associated with the {@link AbstractTripleStore} + * view. * @param queryStr * The query. + * @param acceptOverride + * Override the Accept header (optional). This is used by UPDATE + * and DELETE so they can control the {@link RDFFormat} of the + * materialized query results. * @param req * The request. * @param os @@ -791,6 +801,7 @@ final String namespace,// final long timestamp,// final String queryStr,// + final String acceptOverride,// final HttpServletRequest req,// final OutputStream os) throws MalformedQueryException { @@ -807,13 +818,15 @@ * query exactly once in order to minimize the resources associated with * the query parser. */ - final ParsedQuery parsedQuery = m_queryParser.parseQuery(queryStr, baseURI); + final ParsedQuery parsedQuery = m_queryParser.parseQuery(queryStr, + baseURI); - if(log.isDebugEnabled()) + if (log.isDebugEnabled()) log.debug(parsedQuery.toString()); - - final QueryType queryType = ((IBigdataParsedQuery) parsedQuery).getQueryType(); + final QueryType queryType = ((IBigdataParsedQuery) parsedQuery) + .getQueryType(); + /* * When true, provide an "explanation" for the query (query plan, query * evaluation statistics) rather than the results of the query. @@ -833,8 +846,8 @@ * has some stuff related to generating Accept headers in their * RDFFormat which could bear some more looking into in this regard.) */ - final String acceptStr = explain ? "text/html" : req - .getHeader("Accept"); + final String acceptStr = explain ? "text/html" + : acceptOverride != null ? acceptOverride : req.getHeader("Accept"); switch (queryType) { case ASK: { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -98,8 +98,12 @@ final InputStream is = newPipedInputStream(os); try { + // Use this format for the query results. + final RDFFormat format = RDFFormat.NTRIPLES; + final AbstractQueryTask queryTask = getBigdataRDFContext() .getQueryTask(namespace, ITx.READ_COMMITTED, queryStr, + format.getDefaultMIMEType(), req, os); switch (queryTask.queryType) { @@ -120,21 +124,6 @@ conn = getBigdataRDFContext().getUnisolatedConnection( namespace); - /* - * TODO The RDF for the *query* will be generated using the - * MIME type negotiated based on the Accept header (if any) - * in the DELETE request. That means that we need to look at - * the Accept header here and chose the right RDFFormat for - * the parser. (The alternative is to have an alternative - * way to run the query task where we specify the MIME Type - * of the result directly. That might be better all around.) - */ - - final String contentType = req.getContentType(); - - final RDFFormat format = RDFFormat.forMIMEType(contentType, - RDFFormat.RDFXML); - final RDFParserFactory factory = RDFParserRegistry .getInstance().get(format); @@ -171,7 +160,14 @@ final long elapsed = System.currentTimeMillis() - begin; reportModifiedCount(resp, nmodified.get(), elapsed); + + } catch(Throwable t) { + if(conn != null) + conn.rollback(); + + throw new RuntimeException(t); + } finally { if (conn != null) @@ -200,15 +196,15 @@ final String contentType = req.getContentType(); - final String queryStr = req.getRequestURI(); + final String queryStr = req.getParameter("query"); - if (contentType != null) { + if (queryStr != null) { - doDeleteWithBody(req, resp); + doDeleteWithQuery(req, resp); - } else if (queryStr != null) { + } else if (contentType != null) { - doDeleteWithQuery(req, resp); + doDeleteWithBody(req, resp); } else { @@ -304,6 +300,13 @@ reportModifiedCount(resp, nmodified.get(), elapsed); + } catch(Throwable t) { + + if (conn != null) + conn.rollback(); + + throw new RuntimeException(t); + } finally { if (conn != null) @@ -323,7 +326,7 @@ /** * Helper class removes statements from the sail as they are visited by a parser. */ - private static class RemoveStatementHandler extends RDFHandlerBase { + static class RemoveStatementHandler extends RDFHandlerBase { private final BigdataSailConnection conn; private final AtomicLong nmodified; @@ -337,7 +340,8 @@ } - public void handleStatement(Statement stmt) throws RDFHandlerException { + public void handleStatement(final Statement stmt) + throws RDFHandlerException { try { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -186,6 +186,13 @@ return; + } catch(Throwable t) { + + if(conn != null) + conn.rollback(); + + throw new RuntimeException(t); + } finally { if (conn != null) @@ -350,6 +357,13 @@ reportModifiedCount(resp, nmodified.get(), elapsed); + } catch(Throwable t) { + + if(conn != null) + conn.rollback(); + + throw new RuntimeException(t); + } finally { if (conn != null) @@ -369,7 +383,7 @@ /** * Helper class adds statements to the sail as they are visited by a parser. */ - private static class AddStatementHandler extends RDFHandlerBase { + static class AddStatementHandler extends RDFHandlerBase { private final BigdataSailConnection conn; private final AtomicLong nmodified; @@ -380,7 +394,8 @@ this.nmodified = nmodified; } - public void handleStatement(Statement stmt) throws RDFHandlerException { + public void handleStatement(final Statement stmt) + throws RDFHandlerException { try { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -130,7 +130,7 @@ * query. */ queryTask = context.getQueryTask(namespace, timestamp, - queryStr, req, os); + queryStr, null/*acceptOverride*/, req, os); } catch (MalformedQueryException ex) { /* * Send back a BAD REQUEST (400) along with the text of the Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -1,17 +1,30 @@ package com.bigdata.rdf.sail.webapp; +import java.io.IOException; +import java.io.InputStream; +import java.io.PipedOutputStream; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicLong; + import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.log4j.Logger; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserFactory; +import org.openrdf.rio.RDFParserRegistry; +import com.bigdata.journal.ITx; +import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; +import com.bigdata.rdf.sail.webapp.DeleteServlet.RemoveStatementHandler; +import com.bigdata.rdf.sail.webapp.InsertServlet.AddStatementHandler; + /** * Handler for UPDATE operations (PUT). * * @author martyncutcher - * - * FIXME The UPDATE API is not finished yet. It will provide - * DELETE+INSERT semantics. */ public class UpdateServlet extends BigdataRDFServlet { @@ -28,8 +41,222 @@ } @Override - protected void doPut(HttpServletRequest req, HttpServletResponse resp) { - throw new UnsupportedOperationException(); + protected void doPut(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + + final String queryStr = req.getParameter("query"); + + final String contentType = req.getContentType(); + + if(contentType == null) { + + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + + } + + if(queryStr == null) { + + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + + } + + doUpdateWithQuery(req, resp); + } + /** + * Delete all statements materialized by a DESCRIBE or CONSTRUCT query and + * then insert all statements in the request body. + * <p> + * Note: To avoid materializing the statements, this runs the query against + * the last commit time and uses a pipe to connect the query directly to the + * process deleting the statements. This is done while it is holding the + * unisolated connection which prevents concurrent modifications. Therefore + * the entire SELECT + DELETE operation is ACID. + */ + private void doUpdateWithQuery(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { + + final long begin = System.currentTimeMillis(); + + final String baseURI = req.getRequestURL().toString(); + + final String namespace = getNamespace(req); + + final String queryStr = req.getParameter("query"); + + if (queryStr == null) + throw new UnsupportedOperationException(); + + final String contentType = req.getContentType(); + + if (log.isInfoEnabled()) + log.info("Request body: " + contentType); + + final RDFFormat requestBodyFormat = RDFFormat.forMIMEType(contentType); + + if (requestBodyFormat == null) { + + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Content-Type not recognized as RDF: " + contentType); + + return; + + } + + final RDFParserFactory rdfParserFactory = RDFParserRegistry + .getInstance().get(requestBodyFormat); + + if (rdfParserFactory == null) { + + buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, + "Parser factory not found: Content-Type=" + + contentType + ", format=" + requestBodyFormat); + + return; + + } + + if (log.isInfoEnabled()) + log.info("update with query: " + queryStr); + + try { + + /* + * Note: pipe is drained by this thread to consume the query + * results, which are the statements to be deleted. + */ + final PipedOutputStream os = new PipedOutputStream(); + final InputStream is = newPipedInputStream(os); + try { + + // Use this format for the query results. + final RDFFormat deleteQueryFormat = RDFFormat.NTRIPLES; + + final AbstractQueryTask queryTask = getBigdataRDFContext() + .getQueryTask(namespace, ITx.READ_COMMITTED, queryStr, + deleteQueryFormat.getDefaultMIMEType(), + req, os); + + switch (queryTask.queryType) { + case DESCRIBE: + case CONSTRUCT: + break; + default: + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Must be DESCRIBE or CONSTRUCT query."); + return; + } + + final AtomicLong nmodified = new AtomicLong(0L); + + BigdataSailRepositoryConnection conn = null; + try { + + conn = getBigdataRDFContext().getUnisolatedConnection( + namespace); + + // Run DELETE + { + + final RDFParserFactory factory = RDFParserRegistry + .getInstance().get(deleteQueryFormat); + + final RDFParser rdfParser = factory.getParser(); + + rdfParser.setValueFactory(conn.getTripleStore() + .getValueFactory()); + + rdfParser.setVerifyData(false); + + rdfParser.setStopAtFirstError(true); + + rdfParser + .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); + + rdfParser.setRDFHandler(new RemoveStatementHandler(conn + .getSailConnection(), nmodified)); + + // Wrap as Future. + final FutureTask<Void> ft = new FutureTask<Void>( + queryTask); + + // Submit query for evaluation. + getBigdataRDFContext().queryService.execute(ft); + + // Run parser : visited statements will be deleted. + rdfParser.parse(is, baseURI); + + // Await the Future (of the Query) + ft.get(); + + } + + // Run INSERT + { + + /* + * There is a request body, so let's try and parse it. + */ + + final RDFParser rdfParser = rdfParserFactory + .getParser(); + + rdfParser.setValueFactory(conn.getTripleStore() + .getValueFactory()); + + rdfParser.setVerifyData(true); + + rdfParser.setStopAtFirstError(true); + + rdfParser + .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); + + rdfParser.setRDFHandler(new AddStatementHandler(conn + .getSailConnection(), nmodified)); + + /* + * Run the parser, which will cause statements to be + * inserted. + */ + rdfParser.parse(req.getInputStream(), baseURI); + + } + + // Commit the mutation. + conn.commit(); + + final long elapsed = System.currentTimeMillis() - begin; + + reportModifiedCount(resp, nmodified.get(), elapsed); + + } catch(Throwable t) { + + if(conn != null) + conn.rollback(); + + throw new RuntimeException(t); + + } finally { + + if (conn != null) + conn.close(); + + } + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); + + } + + } catch (Exception ex) { + + // Will be rendered as an INTERNAL_ERROR. + throw new RuntimeException(ex); + + } + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-06-24 17:43:27 UTC (rev 4794) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-06-24 19:21:49 UTC (rev 4795) @@ -36,6 +36,7 @@ import org.openrdf.model.vocabulary.RDF; import org.openrdf.model.vocabulary.RDFS; import org.openrdf.query.BindingSet; +import org.openrdf.query.MalformedQueryException; import org.openrdf.query.TupleQueryResultHandlerBase; import org.openrdf.query.resultio.BooleanQueryResultFormat; import org.openrdf.query.resultio.BooleanQueryResultParser; @@ -45,6 +46,7 @@ import org.openrdf.query.resultio.TupleQueryResultParser; import org.openrdf.query.resultio.TupleQueryResultParserFactory; import org.openrdf.query.resultio.TupleQueryResultParserRegistry; +import org.openrdf.repository.RepositoryException; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParser; @@ -54,6 +56,7 @@ import org.openrdf.rio.RDFWriterFactory; import org.openrdf.rio.RDFWriterRegistry; import org.openrdf.rio.helpers.StatementCollector; +import org.openrdf.sail.SailException; import org.xml.sax.Attributes; import org.xml.sax.ext.DefaultHandler2; @@ -65,6 +68,7 @@ import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSailRepository; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.sparql.BigdataSPARQLParser; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.BD; import com.bigdata.rdf.store.LocalTripleStore; @@ -270,6 +274,16 @@ RDFFormat.RDFXML.getDefaultMIMEType() + ";q=1"// ; + /** + * The Content-Type (iff there will be a request body). + */ + public String contentType = null; + + /** + * The data to send as the request body (optional). + */ + public byte[] data = null; + /** The connection timeout (ms) -or- ZERO (0) for an infinite timeout. */ public int timeout = 0; @@ -350,22 +364,45 @@ // opts.defaultGraphUri, "UTF-8"))); } + if (log.isDebugEnabled()) { + log.debug("*** Request ***"); + log.debug(opts.serviceURL); + log.debug(opts.queryStr); + } + HttpURLConnection conn = null; try { - conn = doConnect(urlString.toString(), opts.method); - - conn.setReadTimeout(opts.timeout); - +// conn = doConnect(urlString.toString(), opts.method); + final URL url = new URL(urlString.toString()); + conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(opts.method); + conn.setDoOutput(true); + conn.setDoInput(true); + conn.setUseCaches(false); + conn.setReadTimeout(opts.timeout); conn.setRequestProperty("Accept", opts.acceptHeader); + + if (opts.contentType != null) { + + if (opts.data == null) + throw new AssertionError(); + + conn.setRequestProperty("Content-Type", opts.contentType); + + conn.setRequestProperty("Content-Length", Integer + .toString(opts.data.length)); - // write out the request headers - if (log.isDebugEnabled()) { - log.debug("*** Request ***"); - log.debug(opts.serviceURL); - log.debug(opts.queryStr); - } - + final OutputStream os = conn.getOutputStream(); + try { + os.write(opts.data); + os.flush(); + } finally { + os.close(); + } + + } + // connect. conn.connect(); @@ -1479,8 +1516,13 @@ doConstructTest("POST",RDFFormat.TRIX); } - private void doConstructTest(final String method, final RDFFormat format) - throws Exception { + /** + * Sets up a simple data set on the server. + * + * @throws SailException + * @throws RepositoryException + */ + private void setupDataOnServer() throws SailException, RepositoryException { final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); @@ -1492,11 +1534,11 @@ final Literal label2 = new LiteralImpl("Bryan"); final BigdataSail sail = getSail(); - sail.initialize(); - final BigdataSailRepository repo = new BigdataSailRepository(sail); - try { + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo .getConnection(); try { @@ -1523,6 +1565,53 @@ } finally { sail.shutDown(); } + } + + private void doConstructTest(final String method, final RDFFormat format) + throws Exception { + + setupDataOnServer(); + final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); + final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); + final URI person = new URIImpl(BD.NAMESPACE + "Person"); +// final URI likes = new URIImpl(BD.NAMESPACE + "likes"); +// final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); +// final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); +// final Literal label1 = new LiteralImpl("Mike"); +// final Literal label2 = new LiteralImpl("Bryan"); +// +// final BigdataSail sail = getSail(); +// sail.initialize(); +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// +// try { +// +// final BigdataSailRepositoryConnection cxn = (BigdataSailRepositoryConnection) repo +// .getConnection(); +// try { +// +// cxn.setAutoCommit(false); +// +// cxn.add(mike, RDF.TYPE, person); +// cxn.add(mike, likes, rdf); +// cxn.add(mike, RDFS.LABEL, label1); +// cxn.add(bryan, RDF.TYPE, person); +// cxn.add(bryan, likes, rdfs); +// cxn.add(bryan, RDFS.LABEL, label2); +// +// /* +// * Note: The either flush() or commit() is required to flush the +// * statement buffers to the database before executing any +// * operations that go around the sail. +// */ +// cxn.commit(); +// } finally { +// cxn.close(); +// } +// +// } finally { +// sail.shutDown(); +// } // The expected results. final Graph expected = new GraphImpl(); @@ -1583,4 +1672,367 @@ } + /** + * Unit test for ACID UPDATE using PUT. This test is for the operation where + * a SPARQL selects the data to be deleted and the request body contains the + * statements to be inserted. + */ + public void test_PUT_UPDATE_WITH_QUERY() throws Exception { + + setupDataOnServer(); + + final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); + final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); +// final URI person = new URIImpl(BD.NAMESPACE + "Person"); + final URI likes = new URIImpl(BD.NAMESPACE + "likes"); + final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); + final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); + + // The format used to PUT the data. + final RDFFormat format = RDFFormat.NTRIPLES; + + /* + * This is the query that we will use to delete some triples from the + * database. + */ + final String deleteQueryStr =// + "prefix bd: <"+BD.NAMESPACE+"> " +// + "prefix rdf: <"+RDF.NAMESPACE+"> " +// + "prefix rdfs: <"+RDFS.NAMESPACE+"> " +// + "CONSTRUCT { ?x bd:likes bd:RDFS }" +// + "WHERE { " +// +// " ?x rdf:type bd:Person . " +// + " ?x bd:likes bd:RDFS " +// + "}"; + + /* + * First, run the query that we will use the delete the triples. This + * is a cross check on the expected behavior of the query. + */ + { + + // The expected results. + final Graph expected = new GraphImpl(); + { +// expected.add(new StatementImpl(mike, RDF.TYPE, person)); + expected.add(new StatementImpl(bryan, likes, rdfs)); + } + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = deleteQueryStr; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + + } + + /* + * Setup the document containing the statement to be inserted by the + * UPDATE operation. + */ + final byte[] data; + { + final Graph g = new GraphImpl(); + + // The new data. + g.add(new StatementImpl(bryan, likes, rdf)); + + final RDFWriterFactory writerFactory = RDFWriterRegistry + .getInstance().get(format); + if (writerFactory == null) + fail("RDFWriterFactory not found: format=" + format); + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final RDFWriter writer = writerFactory.getWriter(baos); + writer.startRDF(); + for (Statement stmt : g) { + writer.handleStatement(stmt); + } + writer.endRDF(); + data = baos.toByteArray(); + } + + /* + * Now, run the UPDATE operation. + */ + { + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = deleteQueryStr; + opts.method = "PUT"; + //opts.acceptHeader = ...; + opts.contentType = RDFFormat.NTRIPLES.getDefaultMIMEType(); + opts.data = data; + final MutationResult ret = getMutationResult(doSparqlQuery(opts, + requestPath)); + assertEquals(2, ret.mutationCount);// FIXME 1 removed, but also 1 added. + + } + + /* + * Now verify the post-condition state. + */ + { + + /* + * This query verifies that we removed the right triple (nobody is + * left who likes 'rdfs'). + */ + { + + // The expected results. + final Graph expected = new GraphImpl(); + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = deleteQueryStr; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + } + + /* This query verifies that we added the right triple (two people + * now like 'rdf'). + */ + { + + final String queryStr2 = // + "prefix bd: <" + BD.NAMESPACE + "> " + // + "prefix rdf: <" + RDF.NAMESPACE + "> " + // + "prefix rdfs: <" + RDFS.NAMESPACE + "> " + // + "CONSTRUCT { ?x bd:likes bd:RDF }" + // + "WHERE { " + // +// " ?x rdf:type bd:Person . " + // + " ?x bd:likes bd:RDF " + // + "}"; + + // The expected results. + final Graph expected = new GraphImpl(); + + expected.add(new StatementImpl(mike, likes, rdf)); + expected.add(new StatementImpl(bryan, likes, rdf)); + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = queryStr2; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + + } + + } + + } + + /** + * Unit test verifies that you can have a CONSTRUCT SPARQL with an empty + * WHERE clause. + * + * @throws MalformedQueryException + */ + public void test_CONSTRUCT_TEMPLATE_ONLY() throws MalformedQueryException { + + final String deleteQueryStr =// + "prefix bd: <"+BD.NAMESPACE+"> " +// + "CONSTRUCT { bd:Bryan bd:likes bd:RDFS }" +// + "{}"; + + new BigdataSPARQLParser().parseQuery(deleteQueryStr, + "http://www.bigdata.com"); + + } + + /** + * Unit test where the "query" used to delete triples from the database + * consists solely of a CONSTRUCT "template" without a WHERE clause (the + * WHERE clause is basically optional as all elements of it are optional). + * + * @throws Exception + */ + public void test_PUT_UPDATE_WITH_CONSTRUCT_TEMPLATE_ONLY() throws Exception { + + setupDataOnServer(); + + final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); + final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); +// final URI person = new URIImpl(BD.NAMESPACE + "Person"); + final URI likes = new URIImpl(BD.NAMESPACE + "likes"); + final URI rdf = new URIImpl(BD.NAMESPACE + "RDF"); + final URI rdfs = new URIImpl(BD.NAMESPACE + "RDFS"); + + // The format used to PUT the data. + final RDFFormat format = RDFFormat.NTRIPLES; + + /* + * This is the query that we will use to delete some triples from the + * database. + */ + final String deleteQueryStr =// + "prefix bd: <"+BD.NAMESPACE+"> " +// + "CONSTRUCT { bd:Bryan bd:likes bd:RDFS }" +// + "{ }"; + + new BigdataSPARQLParser().parseQuery(deleteQueryStr, + "http://www.bigdata.com"); + + /* + * First, run the query that we will use the delete the triples. This + * is a cross check on the expected behavior of the query. + */ + { + + // The expected results. + final Graph expected = new GraphImpl(); + { +// expected.add(new StatementImpl(mike, RDF.TYPE, person)); + expected.add(new StatementImpl(bryan, likes, rdfs)); + } + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = deleteQueryStr; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + + } + + /* + * Setup the document containing the statement to be inserted by the + * UPDATE operation. + */ + final byte[] data; + { + final Graph g = new GraphImpl(); + + // The new data. + g.add(new StatementImpl(bryan, likes, rdf)); + + final RDFWriterFactory writerFactory = RDFWriterRegistry + .getInstance().get(format); + if (writerFactory == null) + fail("RDFWriterFactory not found: format=" + format); + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final RDFWriter writer = writerFactory.getWriter(baos); + writer.startRDF(); + for (Statement stmt : g) { + writer.handleStatement(stmt); + } + writer.endRDF(); + data = baos.toByteArray(); + } + + /* + * Now, run the UPDATE operation. + */ + { + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = deleteQueryStr; + opts.method = "PUT"; + //opts.acceptHeader = ...; + opts.contentType = RDFFormat.NTRIPLES.getDefaultMIMEType(); + opts.data = data; + final MutationResult ret = getMutationResult(doSparqlQuery(opts, + requestPath)); + assertEquals(2, ret.mutationCount);// FIXME 1 removed, but also 1 added. + + } + + /* + * Now verify the post-condition state. + */ + { + + /* + * This query verifies that we removed the right triple (nobody is + * left who likes 'rdfs'). + */ + { + + final String queryStr2 = // + "prefix bd: <" + BD.NAMESPACE + "> " + // + "prefix rdf: <" + RDF.NAMESPACE + "> " + // + "prefix rdfs: <" + RDFS.NAMESPACE + "> " + // + "CONSTRUCT { ?x bd:likes bd:RDFS }" + // + "WHERE { " + // +// " ?x rdf:type bd:Person . " + // + " ?x bd:likes bd:RDFS " + // NB: Checks the kb! + "}"; + + // The expected results. + final Graph expected = new GraphImpl(); + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = queryStr2; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + } + + /* This query verifies that we added the right triple (two people + * now like 'rdf'). + */ + { + + final String queryStr2 = // + "prefix bd: <" + BD.NAMESPACE + "> " + // + "prefix rdf: <" + RDF.NAMESPACE + "> " + // + "prefix rdfs: <" + RDFS.NAMESPACE + "> " + // + "CONSTRUCT { ?x bd:likes bd:RDF }" + // + "WHERE { " + // +// " ?x rdf:type bd:Person . " + // + " ?x bd:likes bd:RDF " + // + "}"; + + // The expected results. + final Graph expected = new GraphImpl(); + + expected.add(new StatementImpl(mike, likes, rdf)); + expected.add(new StatementImpl(bryan, likes, rdf)); + + final QueryOptions opts = new QueryOptions(); + opts.serviceURL = m_serviceURL; + opts.queryStr = queryStr2; + opts.method = "GET"; + opts.acceptHeader = TupleQueryResultFormat.SPARQL + .getDefaultMIMEType(); + + assertSameGraph(expected, buildGraph(doSparqlQuery(opts, + requestPath))); + + } + + } + + } + +// /** +// * Unit test for ACID UPDATE using PUT. This test is for the operation where +// * the request body is a multi-part MIME document conveying both the +// * statements to be removed and the statement to be inserted. +// */ +// public void test_PUT_UPDATE_WITH_MULTI_PART_MIME() { +// fail("write test"); +// } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |