From: <tho...@us...> - 2011-04-15 00:33:36
|
Revision: 4400 http://bigdata.svn.sourceforge.net/bigdata/?rev=4400&view=rev Author: thompsonbry Date: 2011-04-15 00:33:29 +0000 (Fri, 15 Apr 2011) Log Message: ----------- Added CONNEG support to the REST API and expanded the test suite to provide some coverage for this.s Javadoc on TimestampUtility. Javadoc on NQuadsParser (we need to implement an NQuadsWriter before we can test interchange via the REST API). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TimestampUtility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/NQuadsParser.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TimestampUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TimestampUtility.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TimestampUtility.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -57,13 +57,20 @@ return "readOnly(" + timestamp + ")"; } - + /** * True iff the timestamp is a possible commit time (GT ZERO). + * <p> + * Note: Both read-only transactions are commit times are positive. The + * transition identifier for a read-only transaction is chosen from among + * those distinct timestamps available between the effective commit time + * requested for the read-only transaction and the next commit time on the + * database. * * @param timestamp * The timestamp. - * @return + * + * @return <code>true</code> for a possible commit time or a read-only tx. */ static public boolean isCommitTime(final long timestamp) { @@ -71,6 +78,16 @@ } + /** + * True iff the timestamp is a possible commit time (GT ZERO) -OR- a + * {@link ITx#READ_COMMITTED} request. + * + * @param timestamp + * The timestamp. + * + * @return <code>true</code> for a possible commit time, a read-only tx, or + * a {@link ITx#READ_COMMITTED} request. + */ static public boolean isReadOnly(final long timestamp) { // return timestamp < ITx.READ_COMMITTED; @@ -78,6 +95,16 @@ } + /** + * Return <code>true</code> iff the timestamp is a possible read-write + * transaction identifier (LT ZERO). + * + * @param timestamp + * The timestamp. + * + * @return <code>true</code> iff the timestamp is a possible read-write + * transaction identifier. + */ static public boolean isReadWriteTx(final long timestamp) { // return timestamp > 0; Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/NQuadsParser.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/NQuadsParser.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/NQuadsParser.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -48,6 +48,7 @@ import org.openrdf.rio.RDFParser; import org.openrdf.rio.RDFParserFactory; import org.openrdf.rio.RDFParserRegistry; +import org.openrdf.rio.RDFWriter; import org.openrdf.rio.helpers.RDFParserBase; import org.semanticweb.yars.nx.Node; import org.semanticweb.yars.nx.parser.NxParser; @@ -63,6 +64,8 @@ * @version $Id$ * * FIXME Write some unit tests for this integration. + * + * FIXME Add {@link RDFWriter} for NQUADS. */ public class NQuadsParser extends RDFParserBase implements RDFParser { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -26,8 +26,14 @@ import org.openrdf.query.parser.ParsedQuery; import org.openrdf.query.parser.QueryParser; import org.openrdf.query.parser.sparql.SPARQLParserFactory; +import org.openrdf.query.resultio.TupleQueryResultFormat; +import org.openrdf.query.resultio.TupleQueryResultWriter; +import org.openrdf.query.resultio.TupleQueryResultWriterRegistry; import org.openrdf.query.resultio.sparqlxml.SPARQLResultsXMLWriter; import org.openrdf.repository.RepositoryException; +import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RDFWriterRegistry; import org.openrdf.rio.rdfxml.RDFXMLWriter; import org.openrdf.sail.SailException; @@ -232,7 +238,7 @@ * The timestamp of the view for that namespace against which the query * will be run. */ - private final long timestamp; + public final long timestamp; /** The SPARQL query string. */ protected final String queryStr; @@ -247,6 +253,12 @@ */ protected final String mimeType; + /** + * The {@link RDFFormat} for the response (required only for queries + * which produce RDF data, as opposed to RDF result sets). + */ + protected final RDFFormat format; + /** The request. */ private final HttpServletRequest req; @@ -270,7 +282,7 @@ * The queryId used by the {@link QueryEngine}. */ protected final UUID queryId2; - + /** * * @param namespace @@ -280,8 +292,16 @@ * the query will be run. * @param queryStr * The SPARQL query string. - * @param req The request. - * @param resp The response. + * @param mimeType + * The MIME type to be used for the response. + * @param format + * The {@link RDFFormat} for the response (required only for + * queries which produce RDF data, as opposed to RDF result + * sets). + * @param req + * The request. + * @param resp + * The response. */ protected AbstractQueryTask(// final String namespace,// @@ -289,6 +309,7 @@ final String queryStr,// final QueryType queryType,// final String mimeType,// + final RDFFormat format,// final HttpServletRequest req,// final OutputStream os// ) { @@ -298,6 +319,7 @@ this.queryStr = queryStr; this.queryType = queryType; this.mimeType = mimeType; + this.format = format; this.req = req; this.os = os; this.queryId = Long.valueOf(m_queryIdFactory.incrementAndGet()); @@ -328,15 +350,15 @@ if(log.isTraceEnabled()) log.trace("Query running..."); // try { - doQuery(cxn, os); + doQuery(cxn, os); // } catch(Throwable t) { // /* // * Log the query and the exception together. // */ // log.error(t.getLocalizedMessage() + ":\n" + queryStr, t); // } - if(log.isTraceEnabled()) - log.trace("Query done - flushing results."); + if(log.isTraceEnabled()) + log.trace("Query done - flushing results."); os.flush(); os.close(); if(log.isTraceEnabled()) @@ -357,6 +379,8 @@ if (cxn != null) { try { cxn.close(); + if(log.isTraceEnabled()) + log.trace("Connection closed."); } catch (Throwable t) { log.error(t, t); } @@ -373,11 +397,12 @@ public TupleQueryTask(final String namespace, final long timestamp, final String queryStr, final QueryType queryType, - final String mimeType, final HttpServletRequest req, + final String mimeType, final RDFFormat format, + final HttpServletRequest req, final OutputStream os) { - super(namespace, timestamp, queryStr, queryType, mimeType, req, - os); + super(namespace, timestamp, queryStr, queryType, mimeType, format, + req, os); } @@ -386,20 +411,25 @@ final BigdataSailTupleQuery query = cxn.prepareTupleQuery( QueryLanguage.SPARQL, queryStr, baseURI); - // TODO What was this alternative logic about? -// if (true) { -// StringWriter strw = new StringWriter(); -// -// query.evaluate(new SPARQLResultsXMLWriter(new XMLWriter(strw))); -// -// OutputStreamWriter outstr = new OutputStreamWriter(os); -// String res = strw.toString(); -// outstr.write(res); -// outstr.flush(); -// outstr.close(); -// } else { - query.evaluate(new SPARQLResultsXMLWriter(new XMLWriter(os))); -// } + + /* + * FIXME Raise this into the query CONNEG logic parallel to how + * we handle queries which result in RDF data rather than SPARQL + * result sets. + */ + final TupleQueryResultFormat format = TupleQueryResultWriterRegistry + .getInstance().getFileFormatForMIMEType(mimeType); + + final TupleQueryResultWriter w = format == null ? new SPARQLResultsXMLWriter( + new XMLWriter(os)) + : TupleQueryResultWriterRegistry.getInstance().get(format) + .getWriter(os); + +// final RDFWriter w = format == null ? new RDFXMLWriter(os) +// : RDFWriterRegistry.getInstance().get(format).getWriter(os); + + query.evaluate(w); + } } @@ -411,11 +441,12 @@ public GraphQueryTask(final String namespace, final long timestamp, final String queryStr, final QueryType queryType, - final String mimeType, final HttpServletRequest req, + final String mimeType, final RDFFormat format, + final HttpServletRequest req, final OutputStream os) { - super(namespace, timestamp, queryStr, queryType, mimeType, req, - os); + super(namespace, timestamp, queryStr, queryType, mimeType, format, + req, os); } @@ -426,8 +457,33 @@ final BigdataSailGraphQuery query = cxn.prepareGraphQuery( QueryLanguage.SPARQL, queryStr, baseURI); - query.evaluate(new RDFXMLWriter(os)); + /* + * FIXME An error thrown here (such as if format is null and we do + * not check it) will cause the response to hang, at least for the + * test suite. Look into this further and make the error handling + * bullet proof! + * + * This may be related to queryId2. That should be imposed on the + * IRunningQuery via a query hint such that the QueryEngine assigns + * that UUID to the query. We can then correlate the queryId to the + * IRunningQuery, which is important for some of the status pages. + * This will also let us INTERRUPT the IRunningQuery if there is an + * error during evaluation, which might be necessary. For example, + * if the client dies while the query is running. Look at the old + * NSS code and see what it was doing and whether this was logic was + * lost of simply never implemented. + * + * However, I do not see how that would explain the failure of the + * ft.get() method to return. + */ +// if(true) +// throw new RuntimeException(); + final RDFWriter w = format == null ? new RDFXMLWriter(os) + : RDFWriterRegistry.getInstance().get(format).getWriter(os); + + query.evaluate(w); + } } @@ -474,28 +530,65 @@ final QueryType queryType = QueryType.fromQuery(queryStr); - final String mimeType; - switch (queryType) { - case ASK: - /* - * FIXME handle ASK. - */ - break; - case DESCRIBE: - case CONSTRUCT: - // FIXME Conneg for the mime type for construct/describe! - mimeType = BigdataRDFServlet.MIME_RDF_XML; + /* + * CONNEG for the mime type. + * + * TODO This is a hack which will obey an Accept header IF the header + * contains a single well-formed MIME Type. Complex accept headers will + * not be matched and quality parameters (q=...) are ignored. (Sesame + * has some stuff related to generating Accept headers in their + * RDFFormat which could bear some more looking into in this regard.) + */ + final String acceptStr = req.getHeader("Accept"); + + RDFFormat format = acceptStr == null ? null : RDFFormat + .forMIMEType(acceptStr); + + final String mimeType; + switch (queryType) { + case ASK: { + /* + * FIXME handle ASK. + */ + break; + } + case DESCRIBE: + case CONSTRUCT: { + + if (format != null) { + + mimeType = format.getDefaultMIMEType(); + + } else { + + mimeType = BigdataRDFServlet.MIME_RDF_XML; + + } + return new GraphQueryTask(namespace, timestamp, queryStr, - queryType, mimeType, req, os); - case SELECT: - mimeType = BigdataRDFServlet.MIME_SPARQL_RESULTS_XML; + queryType, mimeType, format, req, os); + } + case SELECT: { + + if (format != null) { + + mimeType = format.getDefaultMIMEType(); + + } else { + + mimeType = BigdataRDFServlet.MIME_SPARQL_RESULTS_XML; + + } + return new TupleQueryTask(namespace, timestamp, queryStr, - queryType, mimeType, req, os); + queryType, mimeType, format, req, os); + } + } // switch(queryType) - throw new RuntimeException("Unknown query type: " + queryType); + throw new RuntimeException("Unknown query type: " + queryType); - } + } /** * Metadata about running queries. @@ -833,8 +926,8 @@ } /** - * Return a list of the namespaces for the registered - * {@link AbstractTripleStore}s. + * Return a list of the namespaces for the {@link AbstractTripleStore}s + * registered against the bigdata instance. */ /*package*/ List<String> getNamespaces() { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -59,10 +59,19 @@ /** * A SPARQL results set in XML. + * + * @see http://www.w3.org/TR/rdf-sparql-XMLres/ */ static protected final transient String MIME_SPARQL_RESULTS_XML = "application/sparql-results+xml"; - + /** + * A SPARQL results set in JSON. + * + * @see http://www.w3.org/TR/rdf-sparql-json-res/ + */ + static protected final transient String MIME_SPARQL_RESULTS_JSON = "application/sparql-results+json"; + + /** * RDF/XML. */ static protected final transient String MIME_RDF_XML = "application/rdf+xml"; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -18,7 +18,6 @@ import org.openrdf.rio.RDFParserFactory; import org.openrdf.rio.RDFParserRegistry; import org.openrdf.rio.helpers.RDFHandlerBase; -import org.openrdf.rio.rdfxml.RDFXMLParser; import org.openrdf.sail.SailException; import com.bigdata.journal.ITx; @@ -121,9 +120,29 @@ conn = getBigdataRDFContext().getUnisolatedConnection( namespace); - final RDFXMLParser rdfParser = new RDFXMLParser(conn - .getTripleStore().getValueFactory()); + /* + * FIXME The RDF for the *query* will be generated using the + * MIME type negotiated based on the Accept header (if any) + * in the DELETE request. That means that we need to look at + * the Accept header here and chose the right RDFFormat for + * the parser. (The alternative is to have an alternative + * way to run the query task where we specify the MIME Type + * of the result directly. That might be better all around.) + */ + final String contentType = req.getContentType(); + + final RDFFormat format = RDFFormat.forMIMEType(contentType, + RDFFormat.RDFXML); + + final RDFParserFactory factory = RDFParserRegistry + .getInstance().get(format); + + final RDFParser rdfParser = factory.getParser(); + + rdfParser.setValueFactory(conn.getTripleStore() + .getValueFactory()); + rdfParser.setVerifyData(false); rdfParser.setStopAtFirstError(true); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -8,6 +8,7 @@ import org.apache.log4j.Logger; +import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; /** @@ -67,7 +68,7 @@ return; } - + /* * Setup task to execute the query. The task is executed on a thread * pool. This bounds the possible concurrency of query execution (as @@ -86,17 +87,96 @@ final FutureTask<Void> ft = new FutureTask<Void>(queryTask); if (log.isTraceEnabled()) - log.trace("Will run query: " + queryStr); + log.trace("Will run query: " + queryStr); /* - * Note: This is run on an ExecutorService with a configured thread - * pool size so we can avoid running too many queries concurrently. + * Setup the response headers. */ - // Setup the response. - // TODO Move charset choice into conneg logic. - buildResponse(resp, HTTP_OK, queryTask.mimeType + "; charset='" + charset + "'"); - + resp.setStatus(HTTP_OK); + + // Figure out the filename extension for the response. + + final String ext; + final String charset; + + if(queryTask.format != null) { + + /* + * If some RDFormat was negotiated, then construct the filename + * for the attachment using the default extension for that + * format and the queryId. + */ + + ext = queryTask.format.getDefaultFileExtension(); + + charset = queryTask.format.getCharset().name(); + + } else { + + if(queryTask.mimeType.equals(MIME_SPARQL_RESULTS_XML)) { + + // See http://www.w3.org/TR/rdf-sparql-XMLres/ + + ext = "srx"; // Sparql Result Set. + + } else if(queryTask.mimeType.equals(MIME_SPARQL_RESULTS_JSON)) { + + // See http://www.w3.org/TR/rdf-sparql-json-res/ + + ext = "srj"; + + } else { + + ext = "xxx"; + + } + + charset = QueryServlet.charset; + + } + + resp.setContentType(queryTask.mimeType); + + resp.setCharacterEncoding(charset); + + resp.setHeader("Content-disposition", "attachment; filename=query" + + queryTask.queryId + "." + ext); + + if(TimestampUtility.isCommitTime(queryTask.timestamp)) { + + /* + * A read against a commit time or a read-only tx. Such results + * SHOULD be cached because the data from which the response was + * constructed have snapshot isolation. (Note: It is possible + * that the commit point against which the query reads will be + * aged out of database and that the query would therefore fail + * if it were retried. This can happen with the RWStore or in + * scale-out.) + * + * Note: READ_COMMITTED requests SHOULD NOT be cached. Such + * requests will read against then current committed state of + * the database each time they are processed. + * + * Note: UNISOLATED queries SHOULD NOT be cached. Such + * operations will read on (and write on) the then current state + * of the unisolated indices on the database each time they are + * processed. The results of such operations could be different + * with each request. + * + * Note: Full read-write transaction requests SHOULD NOT be + * cached unless they are queries and the transaction scope is + * limited to the request (rather than running across multiple + * requests). + */ + + resp.addHeader("Cache-Control", "public"); + + // to disable caching. + // r.addHeader("Cache-Control", "no-cache"); + + } + // Begin executing the query (asynchronous) getBigdataRDFContext().queryService.execute(ft); Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -34,13 +34,6 @@ // static private final transient Logger log = Logger // .getLogger(StatusServlet.class); -// @Override -// public void init() throws ServletException { -// -// super.init(); -// -// } - /** * <p> * A status page. Options include: Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-04-14 14:04:58 UTC (rev 4399) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServer.java 2011-04-15 00:33:29 UTC (rev 4400) @@ -1,13 +1,12 @@ package com.bigdata.rdf.sail.webapp; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; -import java.io.OutputStreamWriter; import java.io.Reader; import java.io.StringWriter; -import java.io.Writer; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLEncoder; @@ -24,6 +23,7 @@ import org.openrdf.model.Literal; import org.openrdf.model.Statement; import org.openrdf.model.URI; +import org.openrdf.model.ValueFactory; import org.openrdf.model.impl.GraphImpl; import org.openrdf.model.impl.LiteralImpl; import org.openrdf.model.impl.StatementImpl; @@ -36,9 +36,14 @@ import org.openrdf.query.resultio.TupleQueryResultParser; import org.openrdf.query.resultio.sparqlxml.SPARQLResultsXMLParserFactory; import org.openrdf.rio.RDFFormat; +import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParser; +import org.openrdf.rio.RDFParserFactory; +import org.openrdf.rio.RDFParserRegistry; +import org.openrdf.rio.RDFWriter; +import org.openrdf.rio.RDFWriterFactory; +import org.openrdf.rio.RDFWriterRegistry; import org.openrdf.rio.helpers.StatementCollector; -import org.openrdf.rio.rdfxml.RDFXMLParser; import com.bigdata.journal.BufferMode; import com.bigdata.journal.ITx; @@ -58,9 +63,24 @@ * Test suite for {@link RESTServlet} (SPARQL end point and REST API for RDF * data). * + * @todo Test default-graph-uri(s) and named-graph-uri(s). + * + * @todo Verify conneg for various mime type for different kinds of queries. + * E.g., conneg for json result sets for SELECT, conneg for n3 response + * for CONSTRUCT, etc. The logic for handling Accept headers does not pay + * attention to q=... parameters, so only a single mime type should be + * specified in the Accept header. + * + * @todo NQUADS RDFWriter needs to be written. Then we can test NQUADS + * interchange. + * + * @todo Add tests for SIDS mode interchange of RDF XML. + * * @todo The methods which return a mutation count should verify the returned * XML document. * + * @todo Test suite for reading from a historical commit point. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id: TestNanoSparqlServer.java 4398 2011-04-14 13:55:29Z thompsonbry * $ @@ -71,10 +91,10 @@ private Server m_fixture; private String m_serviceURL; - final static String REST = ""; + final private static String requestPath = ""; protected void setUp() throws Exception { - + final Properties properties = getProperties(); final String namespace = getName(); @@ -219,27 +239,29 @@ /** The URL of the SPARQL endpoint. */ public String serviceURL = null; - // public String username = null; - // public String password = null; /** The HTTP method (GET, POST, etc). */ public String method = "GET"; /** The SPARQL query. */ public String queryStr = null; - /** The default graph URI (optional). */ - public String defaultGraphUri = null; + /** TODO DG and NG protocol params: The default graph URI (optional). */ + public String defaultGraphUri = null; + /** The accept header. */ + public String acceptHeader = // + BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON + ";q=1" + // + "," + // + RDFFormat.RDFXML.getDefaultMIMEType() + ";q=1"// + ; - /** The connection timeout (ms) -or- ZERO (0) for an infinate timeout. */ - // public int timeout = DEFAULT_TIMEOUT; + /** The connection timeout (ms) -or- ZERO (0) for an infinite timeout. */ public int timeout = 0; - // public boolean showQuery = false; } - protected HttpURLConnection doConnect(final String urlString, + private HttpURLConnection doConnect(final String urlString, final String method) throws Exception { - + final URL url = new URL(urlString); - + final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(method); @@ -247,6 +269,7 @@ conn.setUseCaches(false); return conn; + } /** @@ -256,8 +279,6 @@ * The query request. * * @return The connection. - * - * TODO Test default-graph-uri(s) and named-graph-uri(s). */ protected HttpURLConnection doSparqlQuery(final QueryOptions opts, final String servlet) throws Exception { @@ -274,21 +295,12 @@ HttpURLConnection conn = null; try { - conn = doConnect(urlString, opts.method); + + conn = doConnect(urlString, opts.method); conn.setReadTimeout(opts.timeout); - /* - * Set an appropriate Accept header for the query. - * - * @todo ASK queries have boolean data, JSON format is also - * available. - */ - conn.setRequestProperty("Accept",// - BigdataRDFServlet.MIME_SPARQL_RESULTS_XML + ";q=1" + // - "," + // - BigdataRDFServlet.MIME_RDF_XML + ";q=1"// - ); + conn.setRequestProperty("Accept", opts.acceptHeader); // write out the request headers if (log.isDebugEnabled()) { @@ -352,8 +364,26 @@ final String baseURI = ""; - final RDFXMLParser rdfParser = new RDFXMLParser(new ValueFactoryImpl()); + final String contentType = conn.getContentType(); + if (contentType == null) + fail("Not found: Content-Type"); + + final RDFFormat format = RDFFormat.forMIMEType(contentType); + + if (format == null) + fail("RDFFormat not found: Content-Type=" + contentType); + + final RDFParserFactory factory = RDFParserRegistry.getInstance().get(format); + + if (factory == null) + fail("RDFParserFactory not found: Content-Type=" + contentType + + ", format=" + format); + + final RDFParser rdfParser = factory.getParser(); + + rdfParser.setValueFactory(new ValueFactoryImpl()); + rdfParser.setVerifyData(true); rdfParser.setStopAtFirstError(true); @@ -492,8 +522,12 @@ opts.method = "GET"; // No solutions (assuming a told triple kb or quads kb w/o axioms). - assertEquals(0, countResults(doSparqlQuery(opts, REST))); + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + // Now with json. + opts.acceptHeader = BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON; + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + } /** @@ -509,21 +543,87 @@ opts.method = "POST"; // No solutions (assuming a told triple kb or quads kb w/o axioms). - assertEquals(0, countResults(doSparqlQuery(opts, REST))); + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + // Now with json. + opts.acceptHeader = BigdataRDFServlet.MIME_SPARQL_RESULTS_JSON; + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + } - public void test_POST_UPDATE_withBody_NTRIPLES() throws Exception { + public void test_POST_UPDATE_withBody_RDFXML() throws Exception { - do_UPDATE_withBody_NTRIPLES("POST", 23, REST); - - } + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.RDFXML); + + } - public void test_PUT_UPDATE_withBody_NTRIPLES() throws Exception { - - do_UPDATE_withBody_NTRIPLES("PUT", 23, REST); - - } + public void test_POST_UPDATE_withBody_NTRIPLES() throws Exception { + + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.NTRIPLES); + + } + + public void test_POST_UPDATE_withBody_N3() throws Exception { + + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.N3); + + } + + public void test_POST_UPDATE_withBody_TURTLE() throws Exception { + + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.TURTLE); + + } + + // Note: quads interchange + public void test_POST_UPDATE_withBody_TRIG() throws Exception { + + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.TRIG); + + } + + // Note: quads interchange + public void test_POST_UPDATE_withBody_TRIX() throws Exception { + + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.TRIX); + + } + + public void test_PUT_UPDATE_withBody_RDFXML() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.RDFXML); + + } + + public void test_PUT_UPDATE_withBody_NTRIPLES() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.NTRIPLES); + + } + + public void test_PUT_UPDATE_withBody_N3() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.N3); + + } + + public void test_PUT_UPDATE_withBody_TURTLE() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.TURTLE); + + } + + public void test_PUT_UPDATE_withBody_TRIG() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.TRIG); + + } + + public void test_PUT_UPDATE_withBody_TRIX() throws Exception { + + do_UPDATE_withBody("PUT", 23, requestPath, RDFFormat.TRIX); + + } /** * Select everything in the kb using a POST. @@ -537,14 +637,14 @@ opts.queryStr = queryStr; opts.method = "POST"; - do_UPDATE_withBody_NTRIPLES("POST", 23, REST); + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.NTRIPLES); - assertEquals(23, countResults(doSparqlQuery(opts, REST))); + assertEquals(23, countResults(doSparqlQuery(opts, requestPath))); - do_DELETE_with_Query(REST, "construct {?s ?p ?o} where {?s ?p ?o}"); + do_DELETE_with_Query(requestPath, "construct {?s ?p ?o} where {?s ?p ?o}"); // No solutions (assuming a told triple kb or quads kb w/o axioms). - assertEquals(0, countResults(doSparqlQuery(opts, REST))); + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); } @@ -561,21 +661,49 @@ opts.queryStr = queryStr; opts.method = "POST"; - do_UPDATE_withBody_NTRIPLES("POST", 23, REST); + do_UPDATE_withBody("POST", 23, requestPath, RDFFormat.NTRIPLES); - assertEquals(23, countResults(doSparqlQuery(opts, REST))); + assertEquals(23, countResults(doSparqlQuery(opts, requestPath))); - do_DELETE_with_Query(REST, "construct {?s ?p ?o} where {?s ?p ?o}"); + do_DELETE_with_Query(requestPath, "construct {?s ?p ?o} where {?s ?p ?o}"); // No solutions (assuming a told triple kb or quads kb w/o axioms). - assertEquals(0, countResults(doSparqlQuery(opts, REST))); + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); } + public void test_DELETE_withPOST_RDFXML() throws Exception { + doDeleteWithPostTest(RDFFormat.RDFXML); + } + + public void test_DELETE_withPOST_NTRIPLES() throws Exception { + doDeleteWithPostTest(RDFFormat.NTRIPLES); + } + + public void test_DELETE_withPOST_N3() throws Exception { + doDeleteWithPostTest(RDFFormat.N3); + } + + public void test_DELETE_withPOST_TURTLE() throws Exception { + doDeleteWithPostTest(RDFFormat.TURTLE); + } + + public void test_DELETE_withPOST_TRIG() throws Exception { + doDeleteWithPostTest(RDFFormat.TRIG); + } + + public void test_DELETE_withPOST_TRIX() throws Exception { + doDeleteWithPostTest(RDFFormat.TRIX); + } + /** - * Select everything in the kb using a POST. + * Test helps PUTs some data, verifies that it is visible, DELETEs the data, + * and then verifies that it is gone. + * + * @param format + * The interchange format. */ - public void test_DELETE_withPOST() throws Exception { - + private void doDeleteWithPostTest(final RDFFormat format) throws Exception { + final String queryStr = "select * where {?s ?p ?o}"; final QueryOptions opts = new QueryOptions(); @@ -583,17 +711,17 @@ opts.queryStr = queryStr; opts.method = "POST"; - do_UPDATE_withBody_NTRIPLES("POST", 23, REST); - - assertEquals(23, countResults(doSparqlQuery(opts, REST))); + do_UPDATE_withBody("POST", 23, requestPath, format); - do_DELETE_withBody_NTRIPLES("", 23); + assertEquals(23, countResults(doSparqlQuery(opts, requestPath))); + do_DELETE_withBody("", 23, format); + // No solutions (assuming a told triple kb or quads kb w/o axioms). - assertEquals(0, countResults(doSparqlQuery(opts, REST))); + assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + + } - } - private void do_DELETE_with_Query(final String servlet, final String query) { HttpURLConnection conn = null; try { @@ -624,10 +752,12 @@ conn.disconnect(); throw new RuntimeException(t); } - } + } - private void do_DELETE_withBody_NTRIPLES(final String servlet, final int ntriples) { - HttpURLConnection conn = null; + private void do_DELETE_withBody(final String servlet, final int ntriples, + final RDFFormat format) { + + HttpURLConnection conn = null; try { final URL url = new URL(m_serviceURL + "/" + servlet+"?delete"); @@ -638,22 +768,19 @@ conn.setUseCaches(false); conn.setReadTimeout(0);// TODO timeout (ms) - final String defmimetype = RDFFormat.NTRIPLES.getDefaultMIMEType(); + conn + .setRequestProperty("Content-Type", format + .getDefaultMIMEType()); - conn.setRequestProperty("Content-Type", defmimetype); - - final String data = genNTRIPLES(ntriples); + final byte[] data = genNTRIPLES(ntriples, format); - conn.setRequestProperty("Content-Length", "" + Integer.toString(data.length())); + conn.setRequestProperty("Content-Length", "" + + Integer.toString(data.length)); final OutputStream os = conn.getOutputStream(); try { - final Writer w = new OutputStreamWriter(os); - w.write(data); - w.flush(); - w.close(); + os.write(data); os.flush(); - os.close(); } finally { os.close(); } @@ -697,24 +824,67 @@ // assertTrue(rc == 405); // NOT_ALLOWED // } // } - - String genNTRIPLES(final int ntriples) { - StringBuffer databuf = new StringBuffer(); - databuf.append("#@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n"); - databuf.append("#@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n"); - databuf.append("#@prefix owl: <http://www.w3.org/2002/07/owl#> .\n"); - databuf.append("#@prefix : <#> .\n"); - for (int i = 0; i < ntriples; i++) { - databuf.append("<http://www.bigdata.org/b> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.bigdata.org/c#" + i + ">.\n"); - } - - return databuf.toString(); + + /** + * Generates some statements and serializes them using the specified + * {@link RDFFormat}. + * + * @param ntriples + * The #of statements to generate. + * @param format + * The format. + * + * @return the serialized statements. + */ + final byte[] genNTRIPLES(final int ntriples, final RDFFormat format) + throws RDFHandlerException { + + final Graph g = new GraphImpl(); + + final ValueFactory f = new ValueFactoryImpl(); + + final URI s = f.createURI("http://www.bigdata.org/b"); + + final URI rdfType = f + .createURI("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"); + + for (int i = 0; i < ntriples; i++) { + + final URI o = f.createURI("http://www.bigdata.org/c#" + i); + + g.add(s, rdfType, o); + + } + + final RDFWriterFactory writerFactory = RDFWriterRegistry.getInstance() + .get(format); + + if (writerFactory == null) + fail("RDFWriterFactory not found: format=" + format); + + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + final RDFWriter writer = writerFactory.getWriter(baos); + + writer.startRDF(); + + for (Statement stmt : g) { + + writer.handleStatement(stmt); + + } + + writer.endRDF(); + + return baos.toByteArray(); + } + /** - * @todo Test of POST w/ BODY having data to be loaded. + * FIXME Test of POST w/ BODY having data to be loaded. */ - public void do_UPDATE_withBody_NTRIPLES(final String method, - final int ntriples, final String servlet) throws Exception { + private void do_UPDATE_withBody(final String method, final int ntriples, + final String servlet, final RDFFormat format) throws Exception { HttpURLConnection conn = null; try { @@ -727,23 +897,18 @@ conn.setUseCaches(false); conn.setReadTimeout(0);// TODO timeout (ms) - final String defmimetype = RDFFormat.NTRIPLES.getDefaultMIMEType(); + conn.setRequestProperty("Content-Type", format + .getDefaultMIMEType()); - conn.setRequestProperty("Content-Type", defmimetype); + final byte[] data = genNTRIPLES(ntriples, format); - final String data = genNTRIPLES(ntriples); + conn.setRequestProperty("Content-Length", Integer.toString(data + .length)); - conn.setRequestProperty("Content-Length", "" - + Integer.toString(data.length())); - final OutputStream os = conn.getOutputStream(); try { - final Writer w = new OutputStreamWriter(os); - w.write(data); - w.flush(); - w.close(); + os.write(data); os.flush(); - os.close(); } finally { os.close(); } @@ -777,7 +942,7 @@ opts.queryStr = queryStr; opts.method = "GET"; - assertEquals(ntriples, countResults(doSparqlQuery(opts, REST))); + assertEquals(ntriples, countResults(doSparqlQuery(opts, requestPath))); } } @@ -806,12 +971,56 @@ } - - } - public void test_GET_DESCRIBE() throws Exception { + // TODO Also test POST DESCRIBE + public void test_GET_DESCRIBE_RDFXML() throws Exception { + doDescribeTest(RDFFormat.RDFXML); + + } + + public void test_GET_DESCRIBE_NTRIPLES() throws Exception { + + doDescribeTest(RDFFormat.NTRIPLES); + + } + + public void test_GET_DESCRIBE_N3() throws Exception { + + doDescribeTest(RDFFormat.N3); + + } + + public void test_GET_DESCRIBE_TURTLE() throws Exception { + + doDescribeTest(RDFFormat.TURTLE); + + } + + public void test_GET_DESCRIBE_TRIG() throws Exception { + + doDescribeTest(RDFFormat.TRIG); + + } + + public void test_GET_DESCRIBE_TRIX() throws Exception { + + doDescribeTest(RDFFormat.TRIX); + + } + + /** + * Inserts some data into the KB and then issues a DESCRIBE query against + * the REST API and verifies the expected results. + * + * @param format + * The format is used to specify the Accept header. + * + * @throws Exception + */ + private void doDescribeTest(final RDFFormat format) throws Exception { + final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); final URI bryan = new URIImpl(BD.NAMESPACE + "Bryan"); final URI person = new URIImpl(BD.NAMESPACE + "Person"); @@ -876,15 +1085,18 @@ " ?x rdf:type bd:Person . " +// " ?x bd:likes bd:RDF " +// "}"; + opts.acceptHeader = format.getDefaultMIMEType(); - final Graph actual = buildGraph(doSparqlQuery(opts, REST)); + final Graph actual = buildGraph(doSparqlQuery(opts, requestPath)); assertSameGraph(expected, actual); } } - + + // TODO Test for all RDFFormats. + // TODO Also test POST CONSTRUCT public void test_GET_CONSTRUCT() throws Exception { final URI mike = new URIImpl(BD.NAMESPACE + "Mike"); @@ -953,7 +1165,7 @@ // " ?x bd:likes bd:RDF " +// "}"; - final Graph actual = buildGraph(doSparqlQuery(opts, REST)); + final Graph actual = buildGraph(doSparqlQuery(opts, requestPath)); assertSameGraph(expected, actual); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |