From: <tho...@us...> - 2014-06-17 21:14:40
|
Revision: 8501 http://sourceforge.net/p/bigdata/code/8501 Author: thompsonbry Date: 2014-06-17 21:14:25 +0000 (Tue, 17 Jun 2014) Log Message: ----------- Continued progress in support of #566 and #753. I am iterating over the REST API method implementations and restructuring them to support both #566 and, by extension, #753 as well. It is not necessary to change over to group commit in order to derive the benefit from this refactoring, but when we do change over it will be with a single boolean switch from the existing operational mode to the group commit operational mode. I have modified the code to execute the RestApiTask in the caller's thread for the non-group-commit code path. This avoids the potential introduction of another thread for heavy query workloads. I have cleaned up some of the launderThrowable() invocations to provide better information about the REST API request that failed. This is not yet systematic. SPARQL Query and SPARQL UPDATE now go through the RestApiTask pattern. Continuing to identify, document, and work through potential problems in the REST API that would conflict with group commit semantics. All tests are passing. Group commit is still disabled. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/BigdataStatics.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -125,7 +125,6 @@ * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > * Concurrent unisolated operations against multiple KBs </a> */ - public static final boolean NSS_GROUP_COMMIT = Boolean - .getBoolean("com.bigdata.nssGroupCommit"); + public static final boolean NSS_GROUP_COMMIT = Boolean.getBoolean("com.bigdata.nssGroupCommit"); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -95,6 +95,7 @@ import com.bigdata.rdf.sail.ISPARQLUpdateListener; import com.bigdata.rdf.sail.SPARQLUpdateEvent; import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; +import com.bigdata.rdf.sail.webapp.RestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.StringUtil; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryHints; @@ -365,13 +366,10 @@ /** * Immediate shutdown interrupts any running queries. * - * FIXME Must abort any open transactions. This does not matter for the - * standalone database, but it will make a difference in scale-out. The - * transaction identifiers could be obtained from the {@link #queries} map. - * - * FIXME This must also abort any running updates. Those are currently - * running in thread handling the {@link HttpServletRequest}, however it - * probably makes sense to execute them on a bounded thread pool as well. + * FIXME GROUP COMMIT: Shutdown should abort open transactions (including + * queries and updates). This hould be addressed when we handle group commit + * since that provides us with a means to recognize and interrupt each + * running {@link RestApiTask}. */ void shutdownNow() { @@ -1135,83 +1133,125 @@ abstract protected void doQuery(BigdataSailRepositoryConnection cxn, OutputStream os) throws Exception; - @Override - final public Void call() throws Exception { - BigdataSailRepositoryConnection cxn = null; - boolean success = false; - try { - // Note: Will be UPDATE connection if UPDATE request!!! - cxn = getQueryConnection(namespace, timestamp); - if(log.isTraceEnabled()) - log.trace("Query running..."); - beginNanos = System.nanoTime(); - if (explain && !update) { - /* - * The data goes to a bit bucket and we send an - * "explanation" of the query evaluation back to the caller. - * - * Note: The trick is how to get hold of the IRunningQuery - * object. It is created deep within the Sail when we - * finally submit a query plan to the query engine. We have - * the queryId (on queryId2), so we can look up the - * IRunningQuery in [m_queries] while it is running, but - * once it is terminated the IRunningQuery will have been - * cleared from the internal map maintained by the - * QueryEngine, at which point we can not longer find it. - * - * Note: We can't do this for UPDATE since it would have a - * side-effect anyway. The way to "EXPLAIN" an UPDATE is to - * break it down into the component QUERY bits and execute - * those. - */ - doQuery(cxn, new NullOutputStream()); - success = true; - } else { - doQuery(cxn, os); - success = true; - os.flush(); - os.close(); - } - if (log.isTraceEnabled()) - log.trace("Query done."); - return null; - } finally { - endNanos = System.nanoTime(); - m_queries.remove(queryId); - if (queryId2 != null) m_queries2.remove(queryId2); -// if (os != null) { -// try { -// os.close(); -// } catch (Throwable t) { -// log.error(t, t); -// } -// } - if (cxn != null) { - if (!success && !cxn.isReadOnly()) { + /** + * Task for executing a SPARQL QUERY or SPARQL UPDATE. + * <p> + * See {@link AbstractQueryTask#update} to decide whether this task is a + * QUERY or an UPDATE. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private class SparqlRestApiTask extends RestApiTask<Void> { + + public SparqlRestApiTask(final HttpServletRequest req, + final HttpServletResponse resp, final String namespace, + final long timestamp) { + + super(req, resp, namespace, timestamp); + + } + + @Override + public Void call() throws Exception { + BigdataSailRepositoryConnection cxn = null; + boolean success = false; + try { + // Note: Will be UPDATE connection if UPDATE request!!! + cxn = getQueryConnection();//namespace, timestamp); + if(log.isTraceEnabled()) + log.trace("Query running..."); + beginNanos = System.nanoTime(); + if (explain && !update) { /* - * Force rollback of the connection. + * The data goes to a bit bucket and we send an + * "explanation" of the query evaluation back to the caller. * - * Note: It is possible that the commit has already been - * processed, in which case this rollback() will be a - * NOP. This can happen when there is an IO error when - * communicating with the client, but the database has - * already gone through a commit. + * Note: The trick is how to get hold of the IRunningQuery + * object. It is created deep within the Sail when we + * finally submit a query plan to the query engine. We have + * the queryId (on queryId2), so we can look up the + * IRunningQuery in [m_queries] while it is running, but + * once it is terminated the IRunningQuery will have been + * cleared from the internal map maintained by the + * QueryEngine, at which point we can not longer find it. + * + * Note: We can't do this for UPDATE since it would have a + * side-effect anyway. The way to "EXPLAIN" an UPDATE is to + * break it down into the component QUERY bits and execute + * those. */ + doQuery(cxn, new NullOutputStream()); + success = true; + } else { + doQuery(cxn, os); + success = true; + os.flush(); + os.close(); + } + if (log.isTraceEnabled()) + log.trace("Query done."); + return null; + } finally { + endNanos = System.nanoTime(); + m_queries.remove(queryId); + if (queryId2 != null) m_queries2.remove(queryId2); +// if (os != null) { +// try { +// os.close(); +// } catch (Throwable t) { +// log.error(t, t); +// } +// } + if (cxn != null) { + if (!success && !cxn.isReadOnly()) { + /* + * Force rollback of the connection. + * + * Note: It is possible that the commit has already been + * processed, in which case this rollback() will be a + * NOP. This can happen when there is an IO error when + * communicating with the client, but the database has + * already gone through a commit. + */ + try { + // Force rollback of the connection. + cxn.rollback(); + } catch (Throwable t) { + log.error(t, t); + } + } try { - // Force rollback of the connection. - cxn.rollback(); + // Force close of the connection. + cxn.close(); } catch (Throwable t) { log.error(t, t); } } - try { - // Force close of the connection. - cxn.close(); - } catch (Throwable t) { - log.error(t, t); - } } } + + } + + @Override + final public Void call() throws Exception { + + final String queryOrUpdateStr = astContainer.getQueryString(); + + try { + + return BigdataServlet.submitApiTask(getIndexManager(), + new SparqlRestApiTask(req, resp, namespace, timestamp)) + .get(); + + } catch (Throwable t) { + + // FIXME GROUP_COMMIT: check calling stack for existing launderThrowable. + throw BigdataRDFServlet.launderThrowable(t, resp, + queryOrUpdateStr); + + } + } // call() } // class AbstractQueryTask @@ -1234,6 +1274,7 @@ } + @Override protected void doQuery(final BigdataSailRepositoryConnection cxn, final OutputStream os) throws Exception { @@ -2111,64 +2152,64 @@ } - /** - * Return a connection transaction, which may be read-only or support - * update. When the timestamp is associated with a historical commit point, - * this will be a read-only connection. When it is associated with the - * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a - * mutable connection. - * - * @param namespace - * The namespace. - * @param timestamp - * The timestamp. - * - * @throws RepositoryException - */ - public BigdataSailRepositoryConnection getQueryConnection( - final String namespace, final long timestamp) - throws RepositoryException { +// /** +// * Return a connection transaction, which may be read-only or support +// * update. When the timestamp is associated with a historical commit point, +// * this will be a read-only connection. When it is associated with the +// * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a +// * mutable connection. +// * +// * @param namespace +// * The namespace. +// * @param timestamp +// * The timestamp. +// * +// * @throws RepositoryException +// */ +// public BigdataSailRepositoryConnection getQueryConnection( +// final String namespace, final long timestamp) +// throws RepositoryException { +// +// /* +// * Note: [timestamp] will be a read-only tx view of the triple store if +// * a READ_LOCK was specified when the NanoSparqlServer was started +// * (unless the query explicitly overrides the timestamp of the view on +// * which it will operate). +// */ +// final AbstractTripleStore tripleStore = getTripleStore(namespace, +// timestamp); +// +// if (tripleStore == null) { +// +// throw new DatasetNotFoundException("Not found: namespace=" +// + namespace + ", timestamp=" +// + TimestampUtility.toString(timestamp)); +// +// } +// +// // Wrap with SAIL. +// final BigdataSail sail = new BigdataSail(tripleStore); +// +// final BigdataSailRepository repo = new BigdataSailRepository(sail); +// +// repo.initialize(); +// +// if (TimestampUtility.isReadOnly(timestamp)) { +// +// return (BigdataSailRepositoryConnection) repo +// .getReadOnlyConnection(timestamp); +// +// } +// +// // Read-write connection. +// final BigdataSailRepositoryConnection conn = repo.getConnection(); +// +// conn.setAutoCommit(false); +// +// return conn; +// +// } - /* - * Note: [timestamp] will be a read-only tx view of the triple store if - * a READ_LOCK was specified when the NanoSparqlServer was started - * (unless the query explicitly overrides the timestamp of the view on - * which it will operate). - */ - final AbstractTripleStore tripleStore = getTripleStore(namespace, - timestamp); - - if (tripleStore == null) { - - throw new DatasetNotFoundException("Not found: namespace=" - + namespace + ", timestamp=" - + TimestampUtility.toString(timestamp)); - - } - - // Wrap with SAIL. - final BigdataSail sail = new BigdataSail(tripleStore); - - final BigdataSailRepository repo = new BigdataSailRepository(sail); - - repo.initialize(); - - if (TimestampUtility.isReadOnly(timestamp)) { - - return (BigdataSailRepositoryConnection) repo - .getReadOnlyConnection(timestamp); - - } - - // Read-write connection. - final BigdataSailRepositoryConnection conn = repo.getConnection(); - - conn.setAutoCommit(false); - - return conn; - - } - /** * Return a read-only view of the {@link AbstractTripleStore} for the given * namespace will read from the commit point associated with the given @@ -2182,12 +2223,17 @@ * @return The {@link AbstractTripleStore} -or- <code>null</code> if none is * found for that namespace and timestamp. * - * @todo enforce historical query by making sure timestamps conform (we do - * not want to allow read/write tx queries unless update semantics are - * introduced ala SPARQL 1.1). - * - * @todo Use a distributed read-only tx for queries (it would be nice if a - * tx used 2PL to specify which namespaces it could touch). + * FIXME GROUP_COMMIT: Review all callers. They are suspect. The + * code will sometimes resolve the KB as of the timestamp, but, + * given that the default is to read against the lastCommitTime, + * that does NOT prevent a concurrent destroy or create of a KB that + * invalidates such a pre-condition test. The main reason for such + * pre-condition tests is to provide nice HTTP status code responses + * when an identified namespace does (or does not) exist. The better + * way to handle this is by pushing the pre-condition test down into + * the {@link RestApiTask} and then throwning out an appropriate + * marked exception that gets correctly converted into an HTTP + * BAD_REQUEST message rather than sending back a stack trace. */ public AbstractTripleStore getTripleStore(final String namespace, final long timestamp) { @@ -2214,8 +2260,12 @@ * @throws SailException * * @throws RepositoryException + * + * FIXME GROUP COMMIT: This is deprecated by the support for + * {@link RestApiMutationTask}s */ - public BigdataSailRepositoryConnection getUnisolatedConnection( // FIXME REVIEW CALLERS + @Deprecated // deprecated by the + BigdataSailRepositoryConnection getUnisolatedConnection( final String namespace) throws SailException, RepositoryException { // resolve the default namespace. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -47,8 +47,6 @@ import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.impl.URIImpl; -import org.openrdf.repository.RepositoryException; -import org.openrdf.repository.RepositoryResult; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFWriter; @@ -59,7 +57,6 @@ import com.bigdata.rdf.properties.PropertiesWriter; import com.bigdata.rdf.properties.PropertiesWriterRegistry; import com.bigdata.rdf.rules.ConstraintViolationException; -import com.bigdata.rdf.sail.webapp.XMLBuilder.Node; import com.bigdata.util.InnerCause; /** @@ -398,41 +395,7 @@ buildResponse(resp, HTTP_OK, MIME_APPLICATION_XML, w.toString()); } - - /** - * Report the contexts back to the user agent. - * - * @param resp - * The response. - * @param it - * The iteration of contexts. - * @param elapsed - * The elapsed time (milliseconds). - * - * @throws IOException - */ - static protected void reportContexts(final HttpServletResponse resp, - final RepositoryResult<Resource> contexts, final long elapsed) - throws IOException, RepositoryException { - - final StringWriter w = new StringWriter(); - final XMLBuilder t = new XMLBuilder(w); - - final Node root = t.root("contexts"); - - while (contexts.hasNext()) { - - root.node("context").attr("uri", contexts.next()).close(); - - } - - root.close(); - - buildResponse(resp, HTTP_OK, MIME_APPLICATION_XML, w.toString()); - - } - /** * Send an RDF Graph as a response using content negotiation. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -34,6 +34,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import javax.servlet.ServletContext; import javax.servlet.http.HttpServlet; @@ -53,6 +54,7 @@ import com.bigdata.rdf.sail.webapp.client.IMimeTypes; import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.resources.IndexManager; import com.bigdata.service.IBigdataFederation; /** @@ -216,22 +218,49 @@ * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > * Concurrent unisolated operations against multiple KBs </a> */ - @SuppressWarnings({ "unchecked", "rawtypes" }) protected <T> Future<T> submitApiTask(final RestApiTask<T> task) throws DatasetNotFoundException { + final IIndexManager indexManager = getIndexManager(); + + return submitApiTask(indexManager, task); + + } + + /** + * Submit a task and return a {@link Future} for that task. The task will be + * run on the appropriate executor service depending on the nature of the + * backing database and the view required by the task. + * + * @param indexManager + * The {@link IndexManager}. + * @param task + * The task. + * + * @return The {@link Future} for that task. + * + * @throws DatasetNotFoundException + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA + * doLocalAbort() should interrupt NSS requests and AbstractTasks </a> + * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > + * Concurrent unisolated operations against multiple KBs </a> + */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + static protected <T> Future<T> submitApiTask( + final IIndexManager indexManager, final RestApiTask<T> task) + throws DatasetNotFoundException { + final String namespace = task.getNamespace(); final long timestamp = task.getTimestamp(); - final IIndexManager indexManager = getIndexManager(); - if (!BigdataStatics.NSS_GROUP_COMMIT || indexManager instanceof IBigdataFederation || TimestampUtility.isReadOnly(timestamp) ) { /* - * Run on a normal executor service. + * Execute the REST API task. * * Note: For scale-out, the operation will be applied using * client-side global views of the indices. @@ -240,10 +269,31 @@ * a Journal). This is helpful since we can avoid some overhead * associated the AbstractTask lock declarations. */ - - return indexManager.getExecutorService().submit( + // Wrap Callable. + final FutureTask<T> ft = new FutureTask<T>( new RestApiTaskForIndexManager(indexManager, task)); + if (true) { + + /* + * Caller runs (synchronous execution) + * + * Note: By having the caller run the task here we avoid + * consuming another thread. + */ + ft.run(); + + } else { + + /* + * Run on a normal executor service. + */ + indexManager.getExecutorService().submit(ft); + + } + + return ft; + } else { /** @@ -282,7 +332,7 @@ } } - + /** * Acquire the locks for the named indices associated with the specified KB. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -33,8 +33,8 @@ import com.bigdata.blueprints.BigdataGraphBulkLoad; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.webapp.RestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.MiniMime; -import com.bigdata.rdf.store.AbstractTripleStore; import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; /** @@ -69,16 +69,8 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - final long begin = System.currentTimeMillis(); - - final String namespace = getNamespace(req); - - final long timestamp = getTimestamp(req); - - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); - - if (tripleStore == null) { + if (getBigdataRDFContext().getTripleStore(getNamespace(req), + getTimestamp(req)) == null) { /* * There is no such triple/quad store instance. */ @@ -104,12 +96,37 @@ try { + submitApiTask( + new BlueprintsPostTask(req, resp, getNamespace(req), + getTimestamp(req))).get(); + + } catch (Throwable t) { + + throw BigdataRDFServlet.launderThrowable(t, resp, ""); + + } + + } + + private static class BlueprintsPostTask extends RestApiMutationTask<Void> { + + public BlueprintsPostTask(HttpServletRequest req, + HttpServletResponse resp, String namespace, long timestamp) { + + super(req, resp, namespace, timestamp); + + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + BigdataSailRepositoryConnection conn = null; boolean success = false; try { - conn = getBigdataRDFContext() - .getUnisolatedConnection(namespace); + conn = getUnisolatedConnection(); final BigdataGraphBulkLoad graph = new BigdataGraphBulkLoad(conn); @@ -123,10 +140,11 @@ final long elapsed = System.currentTimeMillis() - begin; - reportModifiedCount(resp, nmodified, elapsed); - - return; + reportModifiedCount(nmodified, elapsed); + // Done. + return null; + } finally { if (conn != null) { @@ -137,15 +155,11 @@ conn.close(); } - + } - } catch (Throwable t) { + } - throw BigdataRDFServlet.launderThrowable(t, resp, ""); - - } - } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.PipedOutputStream; +import java.util.Arrays; import java.util.concurrent.FutureTask; import java.util.concurrent.atomic.AtomicLong; @@ -270,8 +271,6 @@ final HttpServletResponse resp) throws IOException { final String baseURI = req.getRequestURL().toString(); - - final String namespace = getNamespace(req); final String contentType = req.getContentType(); @@ -281,68 +280,70 @@ if (log.isInfoEnabled()) log.info("Request body: " + contentType); - try { + /** + * There is a request body, so let's try and parse it. + * + * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> + * UpdateServlet fails to parse MIMEType when doing conneg. </a> + */ - /** - * There is a request body, so let's try and parse it. - * - * <a href="https://sourceforge.net/apps/trac/bigdata/ticket/620"> - * UpdateServlet fails to parse MIMEType when doing conneg. </a> - */ + final RDFFormat format = RDFFormat.forMIMEType(new MiniMime( + contentType).getMimeType()); - final RDFFormat format = RDFFormat.forMIMEType(new MiniMime( - contentType).getMimeType()); + if (format == null) { - if (format == null) { + buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, + "Content-Type not recognized as RDF: " + contentType); - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, - "Content-Type not recognized as RDF: " + contentType); + return; - return; + } - } + final RDFParserFactory rdfParserFactory = RDFParserRegistry + .getInstance().get(format); - final RDFParserFactory rdfParserFactory = RDFParserRegistry - .getInstance().get(format); + if (rdfParserFactory == null) { - if (rdfParserFactory == null) { + buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, + "Parser factory not found: Content-Type=" + contentType + + ", format=" + format); - buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, - "Parser factory not found: Content-Type=" + contentType - + ", format=" + format); + return; - return; + } + /* + * Allow the caller to specify the default contexts. + */ + final Resource[] defaultContext; + { + final String[] s = req.getParameterValues("context-uri"); + if (s != null && s.length > 0) { + try { + defaultContext = toURIs(s); + } catch (IllegalArgumentException ex) { + buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, + ex.getLocalizedMessage()); + return; + } + } else { + defaultContext = new Resource[0]; } + } - /* - * Allow the caller to specify the default contexts. - */ - final Resource[] defaultContext; - { - final String[] s = req.getParameterValues("context-uri"); - if (s != null && s.length > 0) { - try { - defaultContext = toURIs(s); - } catch (IllegalArgumentException ex) { - buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, - ex.getLocalizedMessage()); - return; - } - } else { - defaultContext = new Resource[0]; - } - } - + try { + submitApiTask( - new DeleteWithBodyTask(req, resp, namespace, + new DeleteWithBodyTask(req, resp, getNamespace(req), ITx.UNISOLATED, baseURI, defaultContext, rdfParserFactory)).get(); - + } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, ""); - + throw BigdataRDFServlet.launderThrowable(t, resp, + "DELETE-WITH-BODY: baseURI=" + baseURI + ", context-uri=" + + Arrays.toString(defaultContext)); + } } @@ -523,7 +524,7 @@ } if (log.isInfoEnabled()) - log.info("delete with access path: (s=" + s + ", p=" + p + ", o=" + log.info("DELETE-WITH-ACCESS-PATH: (s=" + s + ", p=" + p + ", o=" + o + ", c=" + c + ")"); try { @@ -534,8 +535,9 @@ } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, "s=" + s + ",p=" - + p + ",o=" + o + ",c=" + c); + throw BigdataRDFServlet.launderThrowable(t, resp, + "DELETE-WITH-ACCESS-PATH: (s=" + s + ",p=" + p + ",o=" + o + + ",c=" + c + ")"); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DescribeCacheServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -129,6 +129,10 @@ /** * GET returns the DESCRIBE of the resource. + * + * FIXME DESCRIBE: TX ISOLATION for request but ensure that cache is not + * negatively effected by that isolation (i.e., how does the cache index + * based on time tx view). */ @Override protected void doGet(final HttpServletRequest req, @@ -369,14 +373,10 @@ os.flush(); } catch (Throwable e) { -// try { - throw BigdataRDFServlet.launderThrowable(e, resp, - "DESCRIBE" - // queryStr // TODO Report as "DESCRIBE uri(s)". - ); -// } catch (Exception e1) { -// throw new RuntimeException(e); -// } + + throw BigdataRDFServlet.launderThrowable(e, resp, + "DESCRIBE: uris=" + internalURIs); + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -138,8 +138,6 @@ final String baseURI = req.getRequestURL().toString(); - final String namespace = getNamespace(req); - final String contentType = req.getContentType(); if (contentType == null) @@ -175,32 +173,13 @@ if (rdfParserFactory == null) { buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, - "Parser factory not found: Content-Type=" - + contentType + ", format=" + format); - - return; + "Parser factory not found: Content-Type=" + contentType + + ", format=" + format); + return; + } -// /* -// * Allow the caller to specify the default context. -// */ -// final Resource defaultContext; -// { -// final String s = req.getParameter("context-uri"); -// if (s != null) { -// try { -// defaultContext = new URIImpl(s); -// } catch (IllegalArgumentException ex) { -// buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, -// ex.getLocalizedMessage()); -// return; -// } -// } else { -// defaultContext = null; -// } -// } - /* * Allow the caller to specify the default contexts. */ @@ -223,13 +202,16 @@ try { submitApiTask( - new InsertWithBodyTask(req, resp, namespace, ITx.UNISOLATED, - baseURI, defaultContext, rdfParserFactory)).get(); + new InsertWithBodyTask(req, resp, getNamespace(req), + ITx.UNISOLATED, baseURI, defaultContext, + rdfParserFactory)).get(); } catch (Throwable t) { - throw BigdataRDFServlet.launderThrowable(t, resp, ""); - + throw BigdataRDFServlet.launderThrowable(t, resp, + "INSERT-WITH-BODY: baseURI=" + baseURI + ", context-uri=" + + Arrays.toString(defaultContext)); + } } @@ -385,25 +367,6 @@ } -// /* -// * Allow the caller to specify the default context. -// */ -// final Resource defaultContext; -// { -// final String s = req.getParameter("context-uri"); -// if (s != null) { -// try { -// defaultContext = new URIImpl(s); -// } catch (IllegalArgumentException ex) { -// buildResponse(resp, HTTP_INTERNALERROR, MIME_TEXT_PLAIN, -// ex.getLocalizedMessage()); -// return; -// } -// } else { -// defaultContext = null; -// } -// } - /* * Allow the caller to specify the default contexts. */ @@ -431,8 +394,9 @@ } catch (Throwable t) { - throw launderThrowable(t, resp, "urls=" + urls); - + throw launderThrowable(t, resp, "uri=" + urls + ", context-uri=" + + Arrays.toString(defaultContext)); + } } @@ -688,10 +652,10 @@ } if (c.length >= 2) { - // added to more than one context - nmodified.addAndGet(c.length); + // added to more than one context + nmodified.addAndGet(c.length); } else { - nmodified.incrementAndGet(); + nmodified.incrementAndGet(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -63,6 +63,10 @@ * NanoSparqlServer Admin API for Multi-tenant deployments</a> * * @author thompsonbry + * + * FIXME GROUP COMMIT: The CREATE and DESTROY operations require special + * attention. The other operations in this class also should use the new + * REST API pattern, but are not intrinsically sensitive. */ public class MultiTenancyServlet extends BigdataRDFServlet { @@ -540,13 +544,6 @@ final long timestamp = getTimestamp(req); -// if (timestamp == ITx.READ_COMMITTED) { -// -// // Use the last commit point. -// timestamp = getIndexManager().getLastCommitTime(); -// -// } - final long tx = getBigdataRDFContext().newTx(timestamp); try { @@ -582,13 +579,6 @@ final HttpServletResponse resp) throws IOException { final long timestamp = getTimestamp(req); - -// if (timestamp == ITx.READ_COMMITTED) { -// -// // Use the last commit point. -// timestamp = getIndexManager().getLastCommitTime(); -// -// } final boolean describeEachNamedGraph; { @@ -683,9 +673,7 @@ final VoID v = new VoID(g, tripleStore, serviceURI, aDataSet); - v.describeDataSet(false/* describeStatistics */, -// getBigdataRDFContext().getConfig().describeEachNamedGraph); - describeEachNamedGraph); + v.describeDataSet(false/* describeStatistics */, describeEachNamedGraph); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-17 20:51:10 UTC (rev 8500) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-06-17 21:14:25 UTC (rev 8501) @@ -69,6 +69,8 @@ import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.UpdateTask; +import com.bigdata.rdf.sail.webapp.RestApiTask.RestApiQueryTask; +import com.bigdata.rdf.sail.webapp.XMLBuilder.Node; import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryRoot; @@ -163,11 +165,11 @@ if (req.getParameter(ATTR_UPDATE) != null) { // SPARQL 1.1 UPDATE. - doUpdate(req, resp); + doSparqlUpdate(req, resp); } else if (RESTServlet.hasMimeType(req, MIME_SPARQL_UPDATE)) { // SPARQL 1.1 UPDATE, see trac 711 for bug report motivating this case - doUpdate(req, resp); + doSparqlUpdate(req, resp); } else if (req.getParameter(ATTR_UUID) != null) { @@ -187,7 +189,7 @@ } else { // SPARQL Query. - doQuery(req, resp); + doSparqlQuery(req, resp); } @@ -202,7 +204,7 @@ if (req.getParameter(ATTR_QUERY) != null) { - doQuery(req, resp); + doSparqlQuery(req, resp); } else if (req.getParameter(ATTR_UUID) != null) { @@ -318,7 +320,7 @@ * @param resp * @throws IOException */ - private void doUpdate(final HttpServletRequest req, + private void doSparqlUpdate(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { if (!isWritable(getServletContext(), req, resp)) { @@ -409,43 +411,9 @@ } /** - * FIXME GROUP COMMIT: We need to refactor the code that manages the - * running queries in BigdataRDFServlet so we can separate out the - * concurrency control of the views from the control over the #of - * running queries and/or update requests and the metadata that we - * manage to track and report on those requests. - */ -// private static class SparqlUpdateTask extends RestApiMutationTask<Void> { -// -// /** -// * -// * @param namespace -// * The namespace of the target KB instance. -// * @param timestamp -// * The timestamp used to obtain a mutable connection. -// * @param baseURI -// * The base URI for the operation. -// */ -// public SparqlUpdateTask(final HttpServletRequest req, -// final HttpServletResponse resp, -// final String namespace, final long timestamp -// ) { -// super(req, resp, namespace, timestamp); -// } -// -// @Override -// public Void call() throws Exception { -// -// -// -// } -// -// } - - /** * Run a SPARQL query. */ - void doQuery(final HttpServletRequest req, final HttpServletResponse resp) + void doSparqlQuery(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { if (!isReadable(getServletContext(), req, resp)) { @@ -1065,10 +1033,6 @@ return; } - final long begin = System.currentTimeMillis(); - - final String namespace = getNamespace(req); - final Resource s; final URI p; final Value o; @@ -1089,70 +1053,94 @@ + o + ", c=" + c + ")"); try { + + submitApiTask( + new EstCardTask(req, resp, getNamespace(req), + getTimestamp(req), // + s, p, o, c)).get(); - try { + } catch (Throwable t) { - BigdataSailRepositoryConnection conn = null; - try { + launderThrowable(t, resp, "ESTCARD: access path: (s=" + s + ", p=" + + p + ", o=" + o + ", c=" + c + ")"); - final long timestamp = getTimestamp(req); + } + + } + + /** + * Helper task for the ESTCARD query. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private static class EstCardTask extends RestApiQueryTask<Void> { - conn = getBigdataRDFContext().getQueryConnection( - namespace, timestamp); + private final Resource s; + private final URI p; + private final Value o; + private final Resource[] c; + + public EstCardTask(final HttpServletRequest req, + final HttpServletResponse resp, final String namespace, + final long timestamp, final Resource s, final URI p, + final Value o, final Resource[] c) { - // Range count all statements matching that access path. - long rangeCount = 0; - if (c != null && c.length > 0) { - for (Resource r : c) { - rangeCount += conn.getSailConnection() - .getBigdataSail().getDatabase() - .getAccessPath(s, p, o, r) - .rangeCount(false/* exact */); - } - } else { - rangeCount += conn.getSailConnection() - .getBigdataSail().getDatabase() - .getAccessPath(s, p, o, (Resource) null) + super(req, resp, namespace, timestamp); + + this.s = s; + this.p = p; + this.o = o; + this.c = c; + + } + + @Override + public Void call() throws Exception { + + final long begin = System.currentTimeMillis(); + + BigdataSailRepositoryConnection conn = null; + try { + + conn = getQueryConnection(); + + // Range count all statements matching that access path. + long rangeCount = 0; + if (c != null && c.length > 0) { + for (Resource r : c) { + rangeCount += conn.getSailConnection().getBigdataSail() + .getDatabase().getAccessPath(s, p, o, r) .rangeCount(false/* exact */); } - - final long elapsed = System.currentTimeMillis() - begin; - - reportRangeCount(resp, rangeCount, elapsed); + } else { + rangeCount += conn.getSailConnection().getBigdataSail() + .getDatabase() + .getAccessPath(s, p, o, (Resource) null) + .rangeCount(false/* exact */); + } - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - - } finally { + final long elapsed = System.currentTimeMillis() - begin; - if (conn != null) - conn.close(); + reportRangeCount(resp, rangeCount, elapsed); - } + return null; - } catch (Throwable t) { + } finally { - throw BigdataRDFServlet.launderThrowable(t, resp, ""); + if (conn != null) { - } + conn.close(); - } catch (Exception ex) { + } - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + } } - } + } // ESTCARD task. /** * Report on the contexts in use in the quads database. - * @param req - * @param resp */ private void doContexts(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { @@ -1161,58 +1149,86 @@ // HA Quorum in use, but quorum is not met. return; } - - final long begin = System.currentTimeMillis(); - - final String namespace = getNamespace(req); try { + + submitApiTask( + new GetContextsTask(req, resp, getNamespace(req), + getTimestamp(req))).get(); + } catch (Throwable t) { + + launderThrowable(t, resp, "GET-CONTEXTS"); + + } + + } + + /** + * Task to report the contexts used by a QUADS mode KB instance. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private static class GetContextsTask extends RestApiQueryTask<Void> { + + public GetContextsTask(final HttpServletRequest req, + final HttpServletResponse resp, final String namespace, + final long timestamp) { + + super(req, resp, namespace, timestamp); + + } + + @Override + public Void call() throws Exception { + + BigdataSailRepositoryConnection conn = null; try { - BigdataSailRepositoryConnection conn = null; + conn = getQueryConnection(); + + final StringWriter w = new StringWriter(); + + final RepositoryResult<Resource> it = conn.getContextIDs(); + try { - final long timestamp = getTimestamp(req); + final XMLBuilder t = new XMLBuilder(w); - conn = getBigdataRDFContext().getQueryConnection( - namespace, timestamp); + final Node root = t.root("contexts"); - final RepositoryResult<Resource> it = conn.getContextIDs(); - - final long elapsed = System.currentTimeMillis() - begin; - - reportContexts(resp, it, elapsed); + while (it.hasNext()) { - } catch(Throwable t) { - - if(conn != null) - conn.rollback(); - - throw new RuntimeException(t); - + root.node("context").attr("uri", it.next()).close(); + + } + + root.close(); + } finally { - if (conn != null) - conn.close(); + it.close(); } - } catch (Throwable t) { + buildResponse(resp, HTTP_OK, MIME_APPLICATION_XML, w.toString()); + + return null; - throw BigdataRDFServlet.launderThrowable(t, resp, ""); + } finally { - } + if (conn != null) { - } catch (Exception ex) { + conn.close(); + + } - // Will be rendered as an INTERNAL_ERROR. - throw new RuntimeException(ex); + } } - + } - + /** * Private API reports the shards against which the access path would * read. @@ -1234,10 +1250,6 @@ return; } - final long begin = System.currentTimeMillis(); - - final String namespace = getNamespace(req); - final boolean doRangeCount = true; final Resource s; final URI p; @@ -1259,173 +1271,201 @@ + o + ", c=" + c + ")"); try { + + submitApiTask( + new ShardsTask(req, resp, getNamespace(req), + getTimestamp(req), s, p, o, c, doRangeCount)).get(); - try { + } catch (Throwable t) { - BigdataSailRepositoryConnection conn = null; - try { + launderThrowable(t, resp, "SHARDS: access path: (s=" + s + ", p=" + + p + ", o=" + o + ", c=" + c + ")"); - final long timestamp = getTimestamp(req); + } - conn = getBigdataRDFContext().getQueryConnection( - namespace, timestamp); + } - final AccessPath<?> accessPath = (AccessPath<?>) conn - .getSailConnection().getBigdataSail().getDatabase() - .getAccessPath(s, p, o, c); - - final ClientIndexView ndx = (ClientIndexView) accessPath - .getIndex(); - - final String charset = "utf-8";// TODO from request. + /** + * Task to report on the SHARDS used by a scale-out deployment. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private static class ShardsTask extends RestApiQueryTask<Void> { - resp.setContentType(BigdataServlet.MIME_TEXT_HTML); - resp.setCharacterEncoding(charset); - final Writer w = resp.getWriter(); - try { + private final Resource s; + private final URI p; + private final Value o; + private final Resource c; + private final boolean doRangeCount; + + public ShardsTask(final HttpServletRequest req, + final HttpServletResponse resp, final String namespace, + final long timestamp, final Resource s, final URI p, + final Value o, final Resource c, final boolean doRangeCount) { - final HTMLBuilder doc = new HTMLBuilder(charset, w); - - XMLBuilder.Node current = doc.root("html"); - { - current = current.node("head"); - current.node("meta") - .attr("http-equiv", "Content-Type") - .attr("content", - "text/html;charset=utf-8") - .close(); - current.node("title") - .textNoEncode("bigdata®").close(); - current = current.close();// close the head. - } + super(req, resp, namespace, timestamp); - // open the body - current = current.node("body"); + this.s = s; + this.p = p; + this.o = o; + this.c = c; + this.doRangeCount = doRangeCount; + + } - final IBigdataFederation<?> fed = (IBigdataFederation<?>) getBigdataRDFContext() - .getIndexManager(); - - final Iterator<PartitionLocator> itr = ndx.locatorScan( - timestamp, accessPath.getFromKey(), - accessPath.getToKey(), false/* reverseScan */); + @Override + public Void call() throws Exception { - int nlocators = 0; + final long begin = System.currentTimeMillis(); + + BigdataSailRepositoryConnection conn = null; + try { - // The distinct hosts on which the shards are located. - final Map<String,AtomicInteger> hosts = new TreeMap<String,AtomicInteger>(); - ... [truncated message content] |
From: <tob...@us...> - 2014-06-24 19:14:10
|
Revision: 8503 http://sourceforge.net/p/bigdata/code/8503 Author: tobycraig Date: 2014-06-24 19:14:03 +0000 (Tue, 24 Jun 2014) Log Message: ----------- #975 - Added simple health panel Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-20 12:00:30 UTC (rev 8502) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-24 19:14:03 UTC (rev 8503) @@ -25,11 +25,13 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; +import java.io.StringWriter; import java.math.BigInteger; import java.net.InetSocketAddress; import java.security.DigestException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.util.Date; import java.util.Iterator; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -65,6 +67,8 @@ import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.StatusServlet.DigestEnum; import com.bigdata.zookeeper.DumpZookeeper; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; /** * Class supports the {@link StatusServlet} and isolates code that has a @@ -885,6 +889,68 @@ } + /** + * Basic server health info + * + * @param req + * @param resp + * @throws TimeoutException + * @throws InterruptedException + * @throws AsynchronousQuorumCloseException + * @throws IOException + */ + public void doHealthStatus(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { + + if (!(indexManager instanceof HAJournal)) + return; + + final HAJournal journal = (HAJournal) indexManager; + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + StringWriter writer = new StringWriter(); + JsonFactory factory = new JsonFactory(); + JsonGenerator json = factory.createGenerator(writer); + + json.writeStartObject(); + + json.writeFieldName("version"); + json.writeString("1.0"); // FIXME + json.writeFieldName("timestamp"); + json.writeNumber(new Date().getTime()); // FIXME + if(quorum.isQuorumFullyMet(quorum.token())) { + json.writeFieldName("status"); + json.writeString("Good"); + json.writeFieldName("details"); + json.writeString("All servers joined"); + } else { + // at least one server is not available, so status is either Warning or Bad + json.writeFieldName("status"); + if(quorum.isQuorumMet()) { + json.writeString("Warning"); + } else { + json.writeString("Bad"); + } + json.writeFieldName("details"); + json.writeString("Only " + quorum.getJoined().length + " of target " + + quorum.replicationFactor() + " servers joined"); + } + + json.writeEndObject(); + json.close(); + + // TODO Alternatively "max-age=1" for max-age in seconds. + resp.addHeader("Cache-Control", "no-cache"); + + BigdataRDFServlet.buildResponse(resp, BigdataRDFServlet.HTTP_OK, + BigdataRDFServlet.MIME_APPLICATION_JSON, writer.toString()); + + return; + + } + // /** // * Impose a lexical ordering on the file names. This is used for the HALog // * and snapshot file names. The main component of those file names is the Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-06-20 12:00:30 UTC (rev 8502) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-06-24 19:14:03 UTC (rev 8503) @@ -187,6 +187,11 @@ static final String HA = "HA"; /** + * Request basic server health information. + */ + static final String HEALTH = "health"; + + /** * Handles CANCEL requests (terminate a running query). */ @Override @@ -396,6 +401,14 @@ return; } + if (req.getParameter(HEALTH) != null + && getIndexManager() instanceof AbstractJournal + && ((AbstractJournal) getIndexManager()).getQuorum() != null) { // for HA1 + new HAStatusServletUtil(getIndexManager()).doHealthStatus(req, resp); + + return; + } + // IRunningQuery objects currently running on the query controller. final boolean showQueries = req.getParameter(SHOW_QUERIES) != null; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java 2014-06-20 12:00:30 UTC (rev 8502) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IMimeTypes.java 2014-06-24 19:14:03 UTC (rev 8503) @@ -37,6 +37,7 @@ MIME_DEFAULT_BINARY = "application/octet-stream", MIME_APPLICATION_XML = "application/xml", MIME_TEXT_JAVASCRIPT = "text/javascript", + MIME_APPLICATION_JSON = "application/json", /** * The traditional encoding of URL query parameters within a POST * message body. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-20 12:00:30 UTC (rev 8502) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-24 19:14:03 UTC (rev 8503) @@ -25,6 +25,7 @@ <a data-target="update">Update</a> <a data-target="explore">Explore</a> <a data-target="status">Status</a> + <a data-target="health">Health</a> <a data-target="performance">Performance</a> <a data-target="namespaces">Namespaces</a> <p>Current namespace: <span id="current-namespace"></span></p> @@ -185,6 +186,18 @@ </div> + <div class="tab" id="health-tab"> + + <div class="box"> + <p id="health-status">Status: <span></span></p> + <p id="health-details">Details: <span></span></p> + <p id="health-version">Version: <span></span></p> + <p id="health-timestamp">Timestamp: <span></span></p> + <p><a href="#" id="health-refresh">Refresh</a></p> + </div> + + </div> + <div class="tab" id="performance-tab"> <div class="box"></div> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-20 12:00:30 UTC (rev 8502) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-24 19:14:03 UTC (rev 8503) @@ -1348,7 +1348,23 @@ }); } +/* Health */ +$('#tab-selector a[data-target=health], #health-refresh').click(getHealth); + +function getHealth(e) { + e.preventDefault(); + $.get('/status?health', function(data) { + for(var key in data) { + if(key == 'timestamp') { + var date = new Date(data[key]); + data[key] = date.toString(); + } + $('#health-' + key + ' span').html(data[key]); + } + }) +} + /* Performance */ $('#tab-selector a[data-target=performance]').click(function(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-28 01:45:10
|
Revision: 8507 http://sourceforge.net/p/bigdata/code/8507 Author: tobycraig Date: 2014-06-28 01:45:04 +0000 (Sat, 28 Jun 2014) Log Message: ----------- Added per service status to health tab Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java 2014-06-27 21:26:52 UTC (rev 8506) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java 2014-06-28 01:45:04 UTC (rev 8507) @@ -350,6 +350,19 @@ } /** + * Attempts to return build version. + * + * @return Build version if available + */ + public final static String getVersion() { + if (getBuildInfo().isEmpty()) { + return "unknown"; + } + + return getBuildInfo().get("buildVersion"); + } + + /** * Outputs the banner and exits. * * @param args Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-27 21:26:52 UTC (rev 8506) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-28 01:45:04 UTC (rev 8507) @@ -43,6 +43,7 @@ import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeper; +import com.bigdata.Banner; import com.bigdata.BigdataStatics; import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; @@ -916,28 +917,39 @@ json.writeStartObject(); - json.writeFieldName("version"); - json.writeString("1.0"); // FIXME - json.writeFieldName("timestamp"); - json.writeNumber(new Date().getTime()); // FIXME + json.writeStringField("version", Banner.getVersion()); + json.writeNumberField("timestamp", new Date().getTime()); if(quorum.isQuorumFullyMet(quorum.token())) { - json.writeFieldName("status"); - json.writeString("Good"); - json.writeFieldName("details"); - json.writeString("All servers joined"); + json.writeStringField("status", "Good"); + json.writeStringField("details", "All servers joined"); } else { // at least one server is not available, so status is either Warning or Bad - json.writeFieldName("status"); if(quorum.isQuorumMet()) { - json.writeString("Warning"); + json.writeStringField("status", "Warning"); } else { - json.writeString("Bad"); + json.writeStringField("status", "Bad"); } - json.writeFieldName("details"); - json.writeString("Only " + quorum.getJoined().length + " of target " + + json.writeStringField("details", "Only " + quorum.getJoined().length + " of target " + quorum.replicationFactor() + " servers joined"); } + json.writeFieldName("services"); + json.writeStartArray(); + + final UUID[] joined = quorum.getJoined(); + final UUID[] pipeline = quorum.getPipeline(); + + for (UUID serviceId : pipeline) { + final boolean isLeader = serviceId.equals(quorum.getLeaderId()); + final boolean isFollower = indexOf(serviceId, joined) > 0; + + json.writeStartObject(); + json.writeStringField("id", serviceId.toString()); + json.writeStringField("status", isLeader ? "leader" : (isFollower ? "follower" : "unready")); + json.writeEndObject(); + } + + json.writeEndArray(); json.writeEndObject(); json.close(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-27 21:26:52 UTC (rev 8506) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-28 01:45:04 UTC (rev 8507) @@ -202,11 +202,17 @@ <div class="tab" id="health-tab"> + <div class="box" id="health-overview"> + <h1>Overview</h1> + <p class="health-status">Status: <span></span></p> + <p class="health-details">Details: <span></span></p> + <p class="health-version">Version: <span></span></p> + <p class="health-timestamp">Timestamp: <span></span></p> + </div> + + <div id="health-services"></div> + <div class="box"> - <p id="health-status">Status: <span></span></p> - <p id="health-details">Details: <span></span></p> - <p id="health-version">Version: <span></span></p> - <p id="health-timestamp">Timestamp: <span></span></p> <p><a href="#" id="health-refresh">Refresh</a></p> </div> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-27 21:26:52 UTC (rev 8506) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-28 01:45:04 UTC (rev 8507) @@ -1406,15 +1406,31 @@ function getHealth(e) { e.preventDefault(); $.get('/status?health', function(data) { - for(var key in data) { - if(key == 'timestamp') { - var date = new Date(data[key]); - data[key] = date.toString(); + $('#health-overview .health-status span').html(data.status); + $('#health-overview').removeClass('health-good health-warning health-bad').addClass('health-' + data.status.toLowerCase()); + $('#health-overview .health-details span').html(data.details); + $('#health-overview .health-version span').html(data.version); + $('#health-overview .health-timestamp span').html(new Date(data.timestamp).toString()); + + $('#health-services div').remove(); + for(var i=0; i<data.services.length; i++) { + var div = $('<div>'); + div.append('<p>ID: ' + data.services[i].id + '</p>'); + div.append('<p>Status: ' + data.services[i].status + '</p>'); + var health; + switch(data.services[i].status) { + case 'leader': + case 'follower': + health = 'good'; + break; + case 'unready': + health = 'warning'; + break; + default: + health = 'bad'; } - if(key == 'status') { - $('#health-overview').removeClass('health-good health-warning health-bad').addClass('health-' + data[key].toLowerCase()); - } - $('#health-' + key + ' span').html(data[key]); + div.addClass('box health-' + health); + div.appendTo($('#health-services')); } }) } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-30 17:39:15
|
Revision: 8512 http://sourceforge.net/p/bigdata/code/8512 Author: tobycraig Date: 2014-06-30 17:39:05 +0000 (Mon, 30 Jun 2014) Log Message: ----------- Made Banner.getVersion return null instead of "unknown" and fixed formatting Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java 2014-06-30 13:10:31 UTC (rev 8511) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/Banner.java 2014-06-30 17:39:05 UTC (rev 8512) @@ -378,27 +378,27 @@ } - /** - * Attempts to return the build version (aka the release version) from the - * <code>com.bigdata.BuildInfo</code> class. This class is generated by - * <code>build.xml</code> and is NOT available from the IDE. It is correct - * discovered using reflection. - * - * @return Build version if available and <code>unknown</code> otherwise. - * - * @see #getBuildInfo() - */ - public final static String getVersion() { + /** + * Attempts to return the build version (aka the release version) from the + * <code>com.bigdata.BuildInfo</code> class. This class is generated by + * <code>build.xml</code> and is NOT available from the IDE. It is correct + * discovered using reflection. + * + * @return Build version if available and <code>unknown</code> otherwise. + * + * @see #getBuildInfo() + */ + public final static String getVersion() { - if (getBuildInfo().isEmpty()) { - - return "unknown"; - - } + if (getBuildInfo().isEmpty()) { - return getBuildInfo().get(BuildInfoMeta.buildVersion); + return null; - } + } + + return getBuildInfo().get(BuildInfoMeta.buildVersion); + + } /** * Outputs the banner and exits. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-30 13:10:31 UTC (rev 8511) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-06-30 17:39:05 UTC (rev 8512) @@ -890,78 +890,80 @@ } - /** - * Basic server health info - * - * @param req - * @param resp - * @throws TimeoutException - * @throws InterruptedException - * @throws AsynchronousQuorumCloseException - * @throws IOException - */ - public void doHealthStatus(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { + /** + * Basic server health info + * + * @param req + * @param resp + * @throws TimeoutException + * @throws InterruptedException + * @throws AsynchronousQuorumCloseException + * @throws IOException + */ + public void doHealthStatus(final HttpServletRequest req, + final HttpServletResponse resp) throws IOException { - if (!(indexManager instanceof HAJournal)) - return; + if (!(indexManager instanceof HAJournal)) + return; - final HAJournal journal = (HAJournal) indexManager; + final HAJournal journal = (HAJournal) indexManager; - final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal - .getQuorum(); - - StringWriter writer = new StringWriter(); - JsonFactory factory = new JsonFactory(); - JsonGenerator json = factory.createGenerator(writer); - - json.writeStartObject(); - - json.writeStringField("version", Banner.getVersion()); - json.writeNumberField("timestamp", new Date().getTime()); - if(quorum.isQuorumFullyMet(quorum.token())) { - json.writeStringField("status", "Good"); - json.writeStringField("details", "All servers joined"); - } else { - // at least one server is not available, so status is either Warning or Bad - if(quorum.isQuorumMet()) { - json.writeStringField("status", "Warning"); - } else { - json.writeStringField("status", "Bad"); - } - json.writeStringField("details", "Only " + quorum.getJoined().length + " of target " + - quorum.replicationFactor() + " servers joined"); - } + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); - json.writeFieldName("services"); - json.writeStartArray(); - - final UUID[] joined = quorum.getJoined(); - final UUID[] pipeline = quorum.getPipeline(); + StringWriter writer = new StringWriter(); + JsonFactory factory = new JsonFactory(); + JsonGenerator json = factory.createGenerator(writer); - for (UUID serviceId : pipeline) { - final boolean isLeader = serviceId.equals(quorum.getLeaderId()); - final boolean isFollower = indexOf(serviceId, joined) > 0; + json.writeStartObject(); - json.writeStartObject(); - json.writeStringField("id", serviceId.toString()); - json.writeStringField("status", isLeader ? "leader" : (isFollower ? "follower" : "unready")); - json.writeEndObject(); - } - - json.writeEndArray(); - json.writeEndObject(); - json.close(); - - // TODO Alternatively "max-age=1" for max-age in seconds. - resp.addHeader("Cache-Control", "no-cache"); + json.writeStringField("version", Banner.getVersion()); + json.writeNumberField("timestamp", new Date().getTime()); + if (quorum.isQuorumFullyMet(quorum.token())) { + json.writeStringField("status", "Good"); + json.writeStringField("details", "All servers joined"); + } else { + // at least one server is not available + // status is either Warning or Bad + if (quorum.isQuorumMet()) { + json.writeStringField("status", "Warning"); + } else { + json.writeStringField("status", "Bad"); + } + json.writeStringField("details", "Only " + quorum.getJoined().length + + " of target " + quorum.replicationFactor() + + " servers joined"); + } - BigdataRDFServlet.buildResponse(resp, BigdataRDFServlet.HTTP_OK, - BigdataRDFServlet.MIME_APPLICATION_JSON, writer.toString()); + json.writeFieldName("services"); + json.writeStartArray(); - return; - - } + final UUID[] pipeline = quorum.getPipeline(); + final UUID[] joined = quorum.getJoined(); + + for (UUID serviceId : pipeline) { + final boolean isLeader = serviceId.equals(quorum.getLeaderId()); + final boolean isFollower = indexOf(serviceId, joined) > 0; + + json.writeStartObject(); + json.writeStringField("id", serviceId.toString()); + json.writeStringField("status", isLeader ? "leader" + : (isFollower ? "follower" : "unready")); + json.writeEndObject(); + } + + json.writeEndArray(); + json.writeEndObject(); + json.close(); + + // TODO Alternatively "max-age=1" for max-age in seconds. + resp.addHeader("Cache-Control", "no-cache"); + + BigdataRDFServlet.buildResponse(resp, BigdataRDFServlet.HTTP_OK, + BigdataRDFServlet.MIME_APPLICATION_JSON, writer.toString()); + + return; + + } // /** // * Impose a lexical ordering on the file names. This is used for the HALog This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-07-02 18:58:18
|
Revision: 8517 http://sourceforge.net/p/bigdata/code/8517 Author: thompsonbry Date: 2014-07-02 18:58:15 +0000 (Wed, 02 Jul 2014) Log Message: ----------- Bug fix for jackson support. The library is now correctly staged for CI and staging based deployments. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-07-02 14:02:27 UTC (rev 8516) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-07-02 18:58:15 UTC (rev 8517) @@ -69,9 +69,9 @@ fastutil.version=5.1.5 dsiutils.version=1.0.6-020610 lgplutils.version=1.0.7-270114 -ganglia-version=1.0.4 -gas-version=0.1.0 -jackson-version=2.2.3 +ganglia.version=1.0.4 +gas.version=0.1.0 +jackson.version=2.2.3 blueprints.version=2.5.0 jettison.version=1.3.3 rexster.version=2.5.0 Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-07-02 14:02:27 UTC (rev 8516) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-07-02 18:58:15 UTC (rev 8517) @@ -986,7 +986,7 @@ tofile="${dist.lib}/lgplutils.jar" /> <copy file="${bigdata.lib}/unimi/fastutil-${fastutil.version}.jar" tofile="${dist.lib}/fastutil.jar" /> - <copy file="${bigdata.lib}/bigdata-ganglia-${ganglia-version}.jar" + <copy file="${bigdata.lib}/bigdata-ganglia-${ganglia.version}.jar" tofile="${dist.lib}/bigdata-ganglia.jar" /> <!--copy file="${bigdata.lib}/bigdata-gas-${gas.version}.jar" tofile="${dist.lib}/bigdata-gas.jar" --> @@ -1016,8 +1016,11 @@ <!-- Blueprints library --> <copy file="${bigdata-blueprints.lib}/blueprints-core-${blueprints.version}.jar" tofile="${dist.lib}/blueprints-core.jar" /> - + <!-- JSON --> + <copy file="${bigdata-sails.lib}/jackson-core-${jackson.version}.jar" + tofile="${dist.lib}/jackson-core.jar" /> + <!-- jetty library --> <copy file="${bigdata-jetty.lib}/jetty-continuation-${jetty.version}.jar" tofile="${dist.lib}/jetty-continuation.jar" /> @@ -1869,7 +1872,7 @@ <!-- TODO ${path.separator}${dist.lib}/bigdata-gas.jar --> <property name="javac.test.classpath" - value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar${path.separator}${dist.lib}/blueprints-core.jar${path.separator}${blueprints-test.jar}${path.separator}${jettison.jar}" /> + value="${classes.dir}${path.separator}${junit.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/classserver.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar${path.separator}${dist.lib}/blueprints-core.jar${path.separator}${dist.lib}/jackson-core.jar${path.separator}${blueprints-test.jar}${path.separator}${jettison.jar}" /> <echo>javac </echo> @@ -2257,12 +2260,13 @@ <pathelement location="${dist.lib}/httpcore.jar" /> <pathelement location="${dist.lib}/httpmime.jar" /> <pathelement location="${dist.lib}/blueprints-core.jar" /> + <pathelement location="${dist.lib}/jackson-core.jar" /> <pathelement location="${blueprints-test.jar}" /> <pathelement location="${jettison.jar}" /> </path> <property name="run.class.path" - value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar${path.separator}${dist.lib}/blueprints-core.jar${path.separator}${blueprints-test.jar}${path.separator}${jettison.jar}" /> + value="${junit.jar}${path.separator}${bigdata-test.jar}${path.separator}${junit-ext.jar}${path.separator}${sesame-sparql-test.jar}${path.separator}${sesame-store-test.jar}${path.separator}${sesame-rio-test.jar}${path.separator}${dist.lib}/bigdata.jar${path.separator}${dist.lib}/colt.jar${path.separator}${dist.lib}/highscalelib.jar${path.separator}${dist.lib}/dsiutils.jar${path.separator}${dist.lib}/lgplutils.jar${path.separator}${dist.lib}/fastutil.jar${path.separator}${dist.lib}/bigdata-ganglia.jar${path.separator}${dist.lib}/bigdata-gas${path.separator}${dist.lib}/icu4j.jar${path.separator}${dist.lib}/icu4j-charset.jar${path.separator}${dist.lib}/jsk-lib.jar${path.separator}${dist.lib}/jsk-platform.jar${path.separator}${dist.lib}/log4j.jar${path.separator}${dist.lib}/lucene-analyzer.jar${path.separator}${dist.lib}/lucene-core.jar${path.separator}${dist.lib}/openrdf-sesame.jar${path.separator}${dist.lib}/slf4j.jar${path.separator}${dist.lib}/slf4j-log4j.jar${path.separator}${dist.lib}/nxparser.jar${path.separator}${dist.lib}/zookeeper.jar${path.separator}${dist.lib}/jetty-continuation.jar${path.separator}${dist.lib}/jetty-http.jar${path.separator}${dist.lib}/jetty-io.jar${path.separator}${dist.lib}/jetty-jmx.jar${path.separator}${dist.lib}/jetty-jndi.jar${path.separator}${dist.lib}/jetty-server.jar${path.separator}${dist.lib}/jetty-util.jar${path.separator}${dist.lib}/jetty-webapp.jar${path.separator}${dist.lib}/jetty-servlet.jar${path.separator}${dist.lib}/jetty-security.jar${path.separator}${dist.lib}/jetty-xml.jar${path.separator}${dist.lib}/jetty-rewrite.jar${path.separator}${dist.lib}/jetty-client.jar${path.separator}${dist.lib}/jetty-proxy.jar${path.separator}${dist.lib}/servlet-api.jar${path.separator}${dist.lib}/commons-codec.jar${path.separator}${dist.lib}/commons-fileupload.jar${path.separator}${dist.lib}/commons-io.jar${path.separator}${dist.lib}/commons-logging.jar${path.separator}${dist.lib}/httpclient.jar${path.separator}${dist.lib}/httpclient-cache.jar${path.separator}${dist.lib}/httpcore.jar${path.separator}${dist.lib}/httpmime.jar${path.separator}${dist.lib}/blueprints-core.jar${path.separator}${dist.lib}/jackson-core.jar${path.separator}${blueprints-test.jar}${path.separator}${jettison.jar}" /> <echo> classpath: ${run.class.path} </echo> @@ -2424,6 +2428,7 @@ <sysproperty key="jetty-proxy.jar" value="${dist.lib}/jetty-proxy.jar" /> <sysproperty key="servlet-api.jar" value="${dist.lib}/servlet-api.jar" /> <sysproperty key="blueprints-core.jar" value="${dist.lib}/blueprints-core.jar" /> + <sysproperty key="jackson-core.jar" value="${dist.lib}/jackson-core.jar" /> <!-- Jini group name --> <sysproperty key="bigdata.fedname" value="${bigdata.fedname}" /> @@ -2628,7 +2633,14 @@ </java> </target> - <target name="start-bigdata" depends="compile" description="Start the Bigdata Server (triples mode)."> + <!-- This is NOT recommended for development or deployment. It is a --> + <!-- fast and simple bootstrap for people getting started with bigdata --> + <!-- for the first time. bigdata is developed using eclipse, so that --> + <!-- makes the most sense for development. The recommended deployers --> + <!-- are documented at the following links: --> + <!-- http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer --> + <!-- http://wiki.bigdata.com/wiki/index.php/HAJournalServer --> + <target name="start-bigdata" depends="compile" description="Start the Bigdata Server (simple triples mode start)."> <java classname="com.bigdata.rdf.sail.webapp.NanoSparqlServer" failonerror="true" fork="true" logerror="true"> <classpath refid="runtime.classpath" /> <jvmarg value="-server"/> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-07-02 22:33:26
|
Revision: 8519 http://sourceforge.net/p/bigdata/code/8519 Author: tobycraig Date: 2014-07-02 22:33:22 +0000 (Wed, 02 Jul 2014) Log Message: ----------- Fixed health for standalone deployments, set unjoined services to red, added replication factor to healthy status Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-07-02 19:18:15 UTC (rev 8518) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-07-02 22:33:22 UTC (rev 8519) @@ -903,55 +903,67 @@ public void doHealthStatus(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (!(indexManager instanceof HAJournal)) - return; - - final HAJournal journal = (HAJournal) indexManager; - - final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); - StringWriter writer = new StringWriter(); JsonFactory factory = new JsonFactory(); JsonGenerator json = factory.createGenerator(writer); json.writeStartObject(); - json.writeStringField("version", Banner.getVersion()); json.writeNumberField("timestamp", new Date().getTime()); - if (quorum.isQuorumFullyMet(quorum.token())) { - json.writeStringField("status", "Good"); - json.writeStringField("details", "All servers joined"); + + if (!(indexManager instanceof HAJournal)) { + + // standalone + json.writeStringField("deployment", "standalone"); + } else { - // at least one server is not available - // status is either Warning or Bad - if (quorum.isQuorumMet()) { - json.writeStringField("status", "Warning"); + + // HA + json.writeStringField("deployment", "HA"); + + final HAJournal journal = (HAJournal) indexManager; + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + if (quorum.isQuorumFullyMet(quorum.token())) { + json.writeStringField("status", "Good"); + json.writeStringField("details", + "All servers (" + quorum.replicationFactor() + ") joined"); } else { - json.writeStringField("status", "Bad"); + // at least one server is not available + // status is either Warning or Bad + if (quorum.isQuorumMet()) { + json.writeStringField("status", "Warning"); + } else { + json.writeStringField("status", "Bad"); + } + json.writeStringField( + "details", + "Only " + quorum.getJoined().length + " of target " + + quorum.replicationFactor() + " servers joined"); } - json.writeStringField("details", "Only " + quorum.getJoined().length - + " of target " + quorum.replicationFactor() - + " servers joined"); - } - json.writeFieldName("services"); - json.writeStartArray(); + json.writeFieldName("services"); + json.writeStartArray(); - final UUID[] pipeline = quorum.getPipeline(); - final UUID[] joined = quorum.getJoined(); + final UUID[] members = quorum.getMembers(); + final UUID[] joined = quorum.getJoined(); - for (UUID serviceId : pipeline) { - final boolean isLeader = serviceId.equals(quorum.getLeaderId()); - final boolean isFollower = indexOf(serviceId, joined) > 0; + for (UUID serviceId : members) { + final boolean isLeader = serviceId.equals(quorum.getLeaderId()); + final boolean isFollower = indexOf(serviceId, joined) > 0; - json.writeStartObject(); - json.writeStringField("id", serviceId.toString()); - json.writeStringField("status", isLeader ? "leader" - : (isFollower ? "follower" : "unready")); - json.writeEndObject(); + json.writeStartObject(); + json.writeStringField("id", serviceId.toString()); + json.writeStringField("status", isLeader ? "leader" + : (isFollower ? "follower" : "unready")); + json.writeEndObject(); + } + + json.writeEndArray(); } - json.writeEndArray(); json.writeEndObject(); json.close(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-07-02 19:18:15 UTC (rev 8518) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-07-02 22:33:22 UTC (rev 8519) @@ -401,14 +401,13 @@ return; } - if (req.getParameter(HEALTH) != null - && getIndexManager() instanceof AbstractJournal - && ((AbstractJournal) getIndexManager()).getQuorum() != null) { // for HA1 - new HAStatusServletUtil(getIndexManager()).doHealthStatus(req, resp); - - return; - } + if (req.getParameter(HEALTH) != null) { + new HAStatusServletUtil(getIndexManager()).doHealthStatus(req, resp); + + return; + } + // IRunningQuery objects currently running on the query controller. final boolean showQueries = req.getParameter(SHOW_QUERIES) != null; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-07-02 19:18:15 UTC (rev 8518) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-07-02 22:33:22 UTC (rev 8519) @@ -352,7 +352,7 @@ } .health-bad { - background-color: red; + background-color: tomato; } #links { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-07-02 19:18:15 UTC (rev 8518) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-07-02 22:33:22 UTC (rev 8519) @@ -1406,6 +1406,13 @@ function getHealth(e) { e.preventDefault(); $.get('/status?health', function(data) { + + if(data.deployment == 'standalone') { + $('#health-tab').html('<div class="box">Server operating in standalone mode.</div>'); + $('#tab-selector a[data-target=health]').unbind('click'); + return; + } + $('#health-overview .health-status span').html(data.status); $('#health-overview').removeClass('health-good health-warning health-bad').addClass('health-' + data.status.toLowerCase()); $('#health-overview .health-details span').html(data.details); @@ -1424,10 +1431,10 @@ health = 'good'; break; case 'unready': - health = 'warning'; + health = 'bad'; break; default: - health = 'bad'; + health = 'warning'; } div.addClass('box health-' + health); div.appendTo($('#health-services')); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-07-08 13:52:51
|
Revision: 8524 http://sourceforge.net/p/bigdata/code/8524 Author: thompsonbry Date: 2014-07-08 13:52:46 +0000 (Tue, 08 Jul 2014) Log Message: ----------- See #988 (EXISTS is slow) As indicated off-list, we have existing code paths that support a non-vectored subquery per source solution for (NOT) EXISTS using the SubqueryOp. I have reenabled this code and verified that it passes the test suite (except for one detailed check of the physical operator plan that is generated for NOT EXISTS). I have also added a LIMIT ONE when using the sub-query version. However, I observe that the run time for the test described above is a constant .18 seconds regardless of which code path is used. In neither case do I observe the slow performance described on this ticket of 5 seconds. That is, the test case does not demonstrate the performance problem. Please check the committed test cases in TestNegation.java. See the test_exists_988a() and test_exists_988b() methods at the end of that file. The sub-query plan is currently OFF {{{ private static PipelineOp addExistsSubquery(PipelineOp left, final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, final AST2BOpContext ctx) { if (true) { // Vectored sub-plan evaluation. return addExistsSubqueryFast(left, subqueryRoot, doneSet, ctx); } else { // Non-vectored sub-query evaluation. return addExistsSubquerySubquery(left, subqueryRoot, doneSet, ctx); } } }}} I have not tested the use of a DISTINCT solutions operator in the sub-plan, but I have marked the code for where that operator should be introduced. {{{ * FIXME EXISTS: Try DISTINCT in the sub-plan and compare to correctness * without for (NOT) EXISTS and to performance of the non-vectored code * path for EXISTS> }}} Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryJoinAnnotations.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988.trig branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.srx Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryJoinAnnotations.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryJoinAnnotations.java 2014-07-03 17:48:19 UTC (rev 8523) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryJoinAnnotations.java 2014-07-08 13:52:46 UTC (rev 8524) @@ -33,11 +33,6 @@ * Annotations for joins against a subquery. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: SubqueryJoinAnnotations.java 5491 2011-11-02 20:11:07Z - * thompsonbry $ - * - * @deprecated With {@link SubqueryOp}, which is the sole class which extends - * this interface. */ public interface SubqueryJoinAnnotations extends JoinAnnotations, SubqueryAnnotations { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java 2014-07-03 17:48:19 UTC (rev 8523) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/SubqueryOp.java 2014-07-08 13:52:46 UTC (rev 8524) @@ -92,20 +92,28 @@ * evaluation semantics under these conditions. This is handled by "projecting" * only those variables into the subquery which it will project out. * + * <h3>Efficiency</h3> + * + * This non-vectored operator issues one sub-query per source solution flowing + * into the operator. In general, it is MUCH more efficient to vector the + * solutions into a sub-plan. The latter is accomplished by building a hash + * index over the source solutions, flooding them into the sub-plan, and then + * executing the appropriate hash join back against the source solutions after + * the sub-plan. + * <p> + * There are a few cases where it may make sense to use the non-vectored + * operator. For example, for EXISTS where LIMIT ONE can be imposed on the + * subquery. However, there can still be cases where the vectored sub-plan is + * more efficient. + * * @see AbstractSubqueryOp * @see JVMNamedSubqueryOp * @see HTreeNamedSubqueryOp * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * - * @deprecated This operator is no longer in use. The last use case which we had - * for this was in support of ASK subquery evaluation for (NOT) - * EXISTS. - * <p> - * It is possible that another use case MIGHT be found to support - * parallel evaluation of named subqueries. However, that also might - * be handled by a thread pool if we move to interleaved query plan - * generation and query plan evaluation in support of the RTO. + * @see <a href="http://trac.bigdata.com/ticket/988"> bad performance for FILTER + * EXISTS </a> */ public class SubqueryOp extends PipelineOp { @@ -222,6 +230,7 @@ } + @Override public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { return new FutureTask<Void>(new ControllerTask(this, context)); @@ -287,6 +296,7 @@ /** * Evaluate the subquery. */ + @Override public Void call() throws Exception { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2014-07-03 17:48:19 UTC (rev 8523) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2014-07-08 13:52:46 UTC (rev 8524) @@ -47,6 +47,7 @@ import com.bigdata.bop.controller.NamedSetAnnotations; import com.bigdata.bop.controller.ServiceCallJoin; import com.bigdata.bop.controller.Steps; +import com.bigdata.bop.controller.SubqueryOp; import com.bigdata.bop.controller.Union; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.join.HTreeHashIndexOp; @@ -159,7 +160,6 @@ * {@link PipelineOp}s. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @see <a href= * "https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=QueryEvaluation" @@ -1831,24 +1831,37 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/515"> * Query with two "FILTER NOT EXISTS" expressions returns no * results</a> + * @see <a href="http://trac.bigdata.com/ticket/988"> bad performance for + * FILTER EXISTS </a> * @see http://www.w3.org/2009/sparql/wiki/Design:Negation */ private static PipelineOp addExistsSubquery(PipelineOp left, final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, final AST2BOpContext ctx) { -// if (true) { -// return addExistsSubqueryFast(left, subqueryRoot, doneSet, ctx); -// } else { -// return addExistsSubquerySubquery(left, subqueryRoot, doneSet, ctx); -// } -// -// } -// -// private static PipelineOp addExistsSubqueryFast(PipelineOp left, -// final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, -// final AST2BOpContext ctx) { + if (true) { // TODO Add query hint to allow choice of strategy. + // Vectored sub-plan evaluation. + return addExistsSubqueryFast(left, subqueryRoot, doneSet, ctx); + } else { + // Non-vectored sub-query evaluation. + return addExistsSubquerySubquery(left, subqueryRoot, doneSet, ctx); + } + + } + /** + * (NOT) EXISTS code path using a vectored sub-plan. + * + * @param left + * @param subqueryRoot + * @param doneSet + * @param ctx + * @return + */ + private static PipelineOp addExistsSubqueryFast(PipelineOp left, + final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, + final AST2BOpContext ctx) { + // Only Sub-Select is supported by this code path. switch (subqueryRoot.getQueryType()) { case ASK: @@ -1973,6 +1986,12 @@ // new NV(ProjectionOp.Annotations.SELECT, projectedVars)// // ); + /* + * FIXME EXISTS: Try DISTINCT in the sub-plan and compare to correctness + * without for (NOT) EXISTS and to performance of the non-vectored code + * path for EXISTS> + */ + // Append the subquery plan. // left = convertQueryBase(left, subqueryRoot, doneSet, ctx); left = convertJoinGroupOrUnion(left, subqueryRoot.getWhereClause(), @@ -2027,66 +2046,80 @@ } -// /** -// * A slow implementation using one {@link SubqueryOp} per source solution. -// * -// * @deprecated by -// * {@link #addExistsSubqueryFast(PipelineOp, SubqueryRoot, Set, AST2BOpContext)} -// */ -// private static PipelineOp addExistsSubquerySubquery(PipelineOp left, -// final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, -// final AST2BOpContext ctx) { -// -// // Only "ASK" subqueries are allowed. -// switch (subqueryRoot.getQueryType()) { -// case ASK: -// break; -// default: -// throw new UnsupportedOperationException(); -// } -// -// @SuppressWarnings("rawtypes") -// final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); -// -// final IConstraint[] joinConstraints = getJoinConstraints( -// getJoinConstraints(subqueryRoot), needsMaterialization); -// -// final boolean aggregate = StaticAnalysis.isAggregate(subqueryRoot); -// -// /* -// * The anonymous variable which gets bound based on the (NOT) EXISTS -// * graph pattern. -// */ -// final IVariable<?> askVar = subqueryRoot.getAskVar(); -// -// if (askVar == null) -// throw new UnsupportedOperationException(); -// -// final PipelineOp subqueryPlan = convertQueryBase(null/* left */, -// subqueryRoot, doneSet, ctx); -// -// left = new SubqueryOp(leftOrEmpty(left),// SUBQUERY -// new NV(Predicate.Annotations.BOP_ID, ctx.nextId()),// -// new NV(SubqueryOp.Annotations.SUBQUERY, subqueryPlan),// -// new NV(SubqueryOp.Annotations.JOIN_TYPE, JoinTypeEnum.Normal),// -// new NV(SubqueryOp.Annotations.ASK_VAR, askVar),// -// new NV(SubqueryOp.Annotations.SELECT, subqueryRoot.getProjection().getProjectionVars()),// -// new NV(SubqueryOp.Annotations.CONSTRAINTS, joinConstraints),// -// new NV(SubqueryOp.Annotations.IS_AGGREGATE, aggregate)// -// ); -// -// /* -// * For each filter which requires materialization steps, add the -// * materializations steps to the pipeline and then add the filter to the -// * pipeline. -// */ -// left = addMaterializationSteps(ctx, left, doneSet, -// needsMaterialization, subqueryRoot.getQueryHints()); -// -// return left; -// -// } + /** + * A non-vectored implementation for (NOT) EXISTS using one + * {@link SubqueryOp} per source solution. + */ + private static PipelineOp addExistsSubquerySubquery(PipelineOp left, + final SubqueryRoot subqueryRoot, final Set<IVariable<?>> doneSet, + final AST2BOpContext ctx) { + // Only "ASK" subqueries are allowed. + switch (subqueryRoot.getQueryType()) { + case ASK: + break; + default: + throw new UnsupportedOperationException(); + } + + @SuppressWarnings("rawtypes") + final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); + + final IConstraint[] joinConstraints = getJoinConstraints( + getJoinConstraints(subqueryRoot), needsMaterialization); + + final boolean aggregate = StaticAnalysis.isAggregate(subqueryRoot); + + /* + * The anonymous variable which gets bound based on the (NOT) EXISTS + * graph pattern. + */ + final IVariable<?> askVar = subqueryRoot.getAskVar(); + + if (askVar == null) + throw new UnsupportedOperationException(); + + /* + * Impose LIMIT ONE on the non-vectored sub-query. + * + * Note: This reduces the amount of work for the sub-query. + * + * For EXISTS, this means that we stop if we find at least one solution. + * The askVar becomes bound to true. The IConstraint associated with the + * EXISTS FILTER will therefore evaluate to true. + * + * For NOT EXISTS, this means that we stop if we find at least one + * solution. The askVar becomes bound to true (this is the same as for + * EXISTS). The IConstraint associated with the NOT EXISTS FILTER will + * therefore evaluate to false since it tests !askVar. + */ + subqueryRoot.setSlice(new SliceNode(0L/* offset */, 1L/* limit */)); + + final PipelineOp subqueryPlan = convertQueryBase(null/* left */, + subqueryRoot, doneSet, ctx); + + left = new SubqueryOp(leftOrEmpty(left),// SUBQUERY + new NV(Predicate.Annotations.BOP_ID, ctx.nextId()),// + new NV(SubqueryOp.Annotations.SUBQUERY, subqueryPlan),// + new NV(SubqueryOp.Annotations.JOIN_TYPE, JoinTypeEnum.Normal),// + new NV(SubqueryOp.Annotations.ASK_VAR, askVar),// + new NV(SubqueryOp.Annotations.SELECT, subqueryRoot.getProjection().getProjectionVars()),// + new NV(SubqueryOp.Annotations.CONSTRAINTS, joinConstraints),// + new NV(SubqueryOp.Annotations.IS_AGGREGATE, aggregate)// + ); + + /* + * For each filter which requires materialization steps, add the + * materializations steps to the pipeline and then add the filter to the + * pipeline. + */ + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + subqueryRoot.getQueryHints(), ctx); + + return left; + + } + /** * Generate the query plan for a join group or union. This is invoked for * the top-level "WHERE" clause and may be invoked recursively for embedded Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java 2014-07-03 17:48:19 UTC (rev 8523) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java 2014-07-08 13:52:46 UTC (rev 8524) @@ -35,7 +35,6 @@ * Test suite for named and default graph stuff. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestNamedGraphs extends AbstractDataDrivenSPARQLTestCase { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java 2014-07-03 17:48:19 UTC (rev 8523) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNegation.java 2014-07-08 13:52:46 UTC (rev 8524) @@ -32,8 +32,6 @@ import org.openrdf.model.vocabulary.RDF; -import com.bigdata.bop.IVariable; -import com.bigdata.bop.Var; import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; import com.bigdata.rdf.model.BigdataValueFactory; @@ -59,7 +57,6 @@ * Test suite for SPARQL negation (EXISTS and MINUS). * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class TestNegation extends AbstractDataDrivenSPARQLTestCase { @@ -798,4 +795,70 @@ } + /** + * Performance related test for EXISTS. This is NOT an EXISTS query. + * However, EXISTS is translated into an ASK sub-query. This runs the + * equivalent ASK query. + * + * <pre> + * prefix eg: <eg:> + * ASK + * FROM eg:g + * { BIND (1 as ?t) + * ?a eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p ?b + * } + * + * <pre> + * + * @throws Exception + * + * @see <a href="http://trac.bigdata.com/ticket/988"> bad + * performance for FILTER EXISTS </a> + */ + public void test_exists_988a() throws Exception { + + new TestHelper( + "exists-988a", // testURI, + "exists-988a.rq",// queryFileURL + "exists-988.trig",// dataFileURL + "exists-988a.srx" // resultFileURL, +// false, // laxCardinality +// true // checkOrder + ).runTest(); + + } + + /** + * Performance related test for EXISTS. + * + * <pre> + * prefix eg: <eg:> + * SELET * + * FROM eg:g + * { BIND (1 as ?t) + * FILTER EXISTS { + * ?a eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p ?b + * } + * } + * + * <pre> + * + * @throws Exception + * + * @see <a href="http://trac.bigdata.com/ticket/988"> bad + * performance for FILTER EXISTS </a> + */ + public void test_exists_988b() throws Exception { + + new TestHelper( + "exists-988b", // testURI, + "exists-988b.rq",// queryFileURL + "exists-988.trig",// dataFileURL + "exists-988b.srx" // resultFileURL, +// false, // laxCardinality +// true // checkOrder + ).runTest(); + + } + } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988.trig =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988.trig (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988.trig 2014-07-08 13:52:46 UTC (rev 8524) @@ -0,0 +1,10 @@ +@prefix eg: <http://www.bigdata.com/> . + +eg: { + eg:a eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . + eg:b eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . + eg:c eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . + eg:d eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . + eg:e eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . + eg:f eg:p eg:a, eg:b, eg:c, eg:d, eg:e, eg:f . +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.rq 2014-07-08 13:52:46 UTC (rev 8524) @@ -0,0 +1,7 @@ +prefix eg: <http://www.bigdata.com/> + +ASK +FROM eg:g +{ BIND (1 as ?t) + ?a eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p ?b +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988a.srx 2014-07-08 13:52:46 UTC (rev 8524) @@ -0,0 +1,6 @@ +<?xml version="1.0"?> +<sparql xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + </head> + <boolean>false</boolean> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.rq 2014-07-08 13:52:46 UTC (rev 8524) @@ -0,0 +1,9 @@ +prefix eg: <http://www.bigdata.com/> + +SELECT ?t +FROM eg:g +{ BIND (1 as ?t) + FILTER EXISTS { + ?a eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p/eg:p ?b + } +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/exists-988b.srx 2014-07-08 13:52:46 UTC (rev 8524) @@ -0,0 +1,11 @@ +<?xml version="1.0"?> +<sparql + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" + xmlns="http://www.w3.org/2005/sparql-results#" > + <head> + <variable name="t"/> + </head> + <results> + </results> +</sparql> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-07-10 15:11:54
|
Revision: 8532 http://sourceforge.net/p/bigdata/code/8532 Author: thompsonbry Date: 2014-07-10 15:11:48 +0000 (Thu, 10 Jul 2014) Log Message: ----------- Check-point on the stored-query service. A simple service has been implemented that can execute a single SPARQL query. I am going to refactor to allow stored queries to do complex application processing including the submission of multiple queries while holding a set of locks. I moved the core code for the group commit interfaces used by the REST API into a different package and am reusing them for the stored query service. I pulled out the ServiceParams helper class so it can be used with services that use the openrdf data structures (BindingSet[]) versus the bigdata data structures (IBindingSet[]). See #989 (stored query service) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AbstractServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SampleServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ValuesServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/CustomServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/BigdataNativeMockServiceFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ServiceParams.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/package.html branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForIndexManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/IApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/package.html branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/stored-query-001.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/stored-query-001.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/stored-query-001.ttl branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/service/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/service/storedquery/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/service/storedquery/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/service/storedquery/TestStoredQueryService.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/AbstractRestApiTask.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForIndexManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RestApiTaskForJournal.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AbstractServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AbstractServiceFactory.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AbstractServiceFactory.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -27,418 +27,30 @@ package com.bigdata.rdf.sparql.ast.eval; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import org.apache.log4j.Logger; -import org.openrdf.model.Literal; -import org.openrdf.model.URI; -import org.openrdf.model.Value; - -import com.bigdata.bop.IVariable; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.sparql.ast.GraphPatternGroup; -import com.bigdata.rdf.sparql.ast.IGroupMemberNode; -import com.bigdata.rdf.sparql.ast.StatementPatternNode; -import com.bigdata.rdf.sparql.ast.TermNode; import com.bigdata.rdf.sparql.ast.service.BigdataServiceCall; import com.bigdata.rdf.sparql.ast.service.ServiceCallCreateParams; import com.bigdata.rdf.sparql.ast.service.ServiceFactory; import com.bigdata.rdf.sparql.ast.service.ServiceNode; import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BD; /** - * An abstract ServiceFactory that deals with service parameters (magic - * predicates that connfigure the service). + * An abstract {@link ServiceFactory} that deals with service parameters (magic + * predicates that configure the service) in a standardized manner using the + * {@link ServiceParams} helper class. */ public abstract class AbstractServiceFactory implements ServiceFactory { - private static final Logger log = Logger - .getLogger(AbstractServiceFactory.class); - - /** - * The service parameters. Can be multi-valued. Map from predicate to - * one or more TermNode values. - */ - public static class ServiceParams { - - /** - * The map of service params. - */ - final Map<URI, List<TermNode>> params; - - public ServiceParams() { - - this.params = new LinkedHashMap<URI, List<TermNode>>(); - - } - - /** - * Add. - */ - public void add(final URI param, final TermNode value) { - - if (!params.containsKey(param)) { - - params.put(param, new LinkedList<TermNode>()); - - } - - params.get(param).add(value); - - } - - /** - * Set (clear and add). - */ - public void set(final URI param, final TermNode value) { - - clear(param); - - add(param, value); - - } - - /** - * Clear. - */ - public void clear(final URI param) { - - params.remove(param); - - } - - /** - * Check for existence. - */ - public boolean contains(final URI param) { - - return params.containsKey(param); - - } - - /** - * Get a singleton value for the specified param. - */ - public TermNode get(final URI param, final TermNode defaultValue) { - - if (params.containsKey(param)) { - - final List<TermNode> values = params.get(param); - - if (values.size() > 1) { - - throw new RuntimeException("not a singleton param"); - - } - - return values.get(0); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public Boolean getAsBoolean(final URI param) { - - return getAsBoolean(param, null); - - } - - /** - * Helper. - */ - public Boolean getAsBoolean(final URI param, final Boolean defaultValue) { - - final Literal term = getAsLiteral(param, null); - - if (term != null) { - - return term.booleanValue(); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public Integer getAsInt(final URI param) { - - return getAsInt(param, null); - - } - - /** - * Helper. - */ - public Integer getAsInt(final URI param, final Integer defaultValue) { - - final Literal term = getAsLiteral(param, null); - - if (term != null) { - - return term.intValue(); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public Long getAsLong(final URI param) { - - return getAsLong(param, null); - - } - - /** - * Helper. - */ - public Long getAsLong(final URI param, final Long defaultValue) { - - final Literal term = getAsLiteral(param, null); - - if (term != null) { - - return term.longValue(); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public String getAsString(final URI param) { - - return getAsString(param, null); - - } - - /** - * Helper. - */ - public String getAsString(final URI param, final String defaultValue) { - - final Literal term = getAsLiteral(param, null); - - if (term != null) { - - return term.stringValue(); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public Literal getAsLiteral(final URI param) { - - return getAsLiteral(param, null); - - } - - /** - * Helper. - */ - public Literal getAsLiteral(final URI param, final Literal defaultValue) { - - final TermNode term = get(param, null); - - if (term != null) { - - if (term.isVariable()) { - - throw new IllegalArgumentException("not a constant"); - - } - - final Value v = term.getValue(); - - if (!(v instanceof Literal)) { - - throw new IllegalArgumentException("not a literal"); - - } - - return ((Literal) v); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public URI getAsURI(final URI param) { - - return getAsURI(param, null); - - } - - /** - * Helper. - */ - public URI getAsURI(final URI param, final URI defaultValue) { - - final TermNode term = get(param, null); - - if (term != null) { - - if (term.isVariable()) { - - throw new IllegalArgumentException("not a constant"); - - } - - final Value v = term.getValue(); - - if (!(v instanceof URI)) { - - throw new IllegalArgumentException("not a uri"); - - } - - return ((URI) v); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public IVariable<IV> getAsVar(final URI param) { - - return getAsVar(param, null); - - } - - /** - * Helper. - */ - public IVariable<IV> getAsVar(final URI param, final IVariable<IV> defaultValue) { - - final TermNode term = get(param, null); - - if (term != null) { - - if (!term.isVariable()) { - - throw new IllegalArgumentException("not a var"); - - } - - return (IVariable<IV>) term.getValueExpression(); - - } - - return defaultValue; - - } - - /** - * Helper. - */ - public List<TermNode> get(final URI param) { - - if (params.containsKey(param)) { - - return params.get(param); - - } - - return Collections.EMPTY_LIST; - - } - - /** - * Iterator. - */ - public Iterator<Map.Entry<URI, List<TermNode>>> iterator() { - - return params.entrySet().iterator(); - - } - - public String toString() { - - final StringBuilder sb = new StringBuilder(); - - sb.append("["); - - for (Map.Entry<URI, List<TermNode>> e : params.entrySet()) { - - final URI param = e.getKey(); - - final List<TermNode> terms = e.getValue(); - - sb.append(param).append(": "); - - if (terms.size() == 1) { - - sb.append(terms.get(0)); - - } else { - - sb.append("["); - for (TermNode t : terms) { - - sb.append(t).append(", "); - - } - sb.setLength(sb.length()-2); - sb.append("]"); - - } - - sb.append(", "); - - } - - if (sb.length() > 1) - sb.setLength(sb.length()-2); - sb.append("]"); - - return sb.toString(); - - } - - } + public AbstractServiceFactory() { - public AbstractServiceFactory() { } - + /** * Create a {@link BigdataServiceCall}. Does the work of collecting * the service parameter triples and then delegates to * {@link #create(ServiceCallCreateParams, ServiceParams)}. */ - public BigdataServiceCall create(final ServiceCallCreateParams params) { + @Override + final public BigdataServiceCall create(final ServiceCallCreateParams params) { if (params == null) throw new IllegalArgumentException(); @@ -453,12 +65,8 @@ if (serviceNode == null) throw new IllegalArgumentException(); - final ServiceParams serviceParams = gatherServiceParams(params); + final ServiceParams serviceParams = ServiceParams.gatherServiceParams(params); - if (log.isDebugEnabled()) { - log.debug(serviceParams); - } - return create(params, serviceParams); } @@ -470,68 +78,4 @@ final ServiceCallCreateParams params, final ServiceParams serviceParams); - /** - * Gather the service params (any statement patterns with the subject - * of {@link BD#SERVICE_PARAM}. - */ - protected ServiceParams gatherServiceParams( - final ServiceCallCreateParams createParams) { - - if (createParams == null) - throw new IllegalArgumentException(); - - final AbstractTripleStore store = createParams.getTripleStore(); - - if (store == null) - throw new IllegalArgumentException(); - - final ServiceNode serviceNode = createParams.getServiceNode(); - - if (serviceNode == null) - throw new IllegalArgumentException(); - - final GraphPatternGroup<IGroupMemberNode> group = - serviceNode.getGraphPattern(); - - if (group == null) - throw new IllegalArgumentException(); - - final ServiceParams serviceParams = new ServiceParams(); - - final Iterator<IGroupMemberNode> it = group.iterator(); - - while (it.hasNext()) { - - final IGroupMemberNode node = it.next(); - - if (node instanceof StatementPatternNode) { - - final StatementPatternNode sp = (StatementPatternNode) node; - - final TermNode s = sp.s(); - - if (s.isConstant() && BD.SERVICE_PARAM.equals(s.getValue())) { - - if (sp.p().isVariable()) { - - throw new RuntimeException( - "not a valid service param triple pattern, " + - "predicate must be constant: " + sp); - - } - - final URI param = (URI) sp.p().getValue(); - - serviceParams.add(param, sp.o()); - - } - - } - - } - - return serviceParams; - - } - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SampleServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SampleServiceFactory.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SampleServiceFactory.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -137,7 +137,6 @@ } - /* * Note: This could extend the base class to allow for search service * configuration options. @@ -158,6 +157,7 @@ } + @Override public BigdataServiceCall create( final ServiceCallCreateParams params, final ServiceParams serviceParams) { Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ServiceParams.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ServiceParams.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ServiceParams.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -0,0 +1,493 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.eval; + +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; +import org.openrdf.model.URI; +import org.openrdf.model.Value; + +import com.bigdata.bop.IVariable; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.sparql.ast.GraphPatternGroup; +import com.bigdata.rdf.sparql.ast.IGroupMemberNode; +import com.bigdata.rdf.sparql.ast.StatementPatternNode; +import com.bigdata.rdf.sparql.ast.TermNode; +import com.bigdata.rdf.sparql.ast.service.ServiceCallCreateParams; +import com.bigdata.rdf.sparql.ast.service.ServiceNode; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BD; + +/** + * The service parameters. Can be multi-valued. Map from predicate to + * one or more {@link TermNode} values. + */ +public class ServiceParams { + + private static final Logger log = Logger + .getLogger(AbstractServiceFactory.class); + + /** + * The map of service params. + */ + private final Map<URI, List<TermNode>> params; + + public ServiceParams() { + + this.params = new LinkedHashMap<URI, List<TermNode>>(); + + } + + /** + * Add. + */ + public void add(final URI param, final TermNode value) { + + if (!params.containsKey(param)) { + + params.put(param, new LinkedList<TermNode>()); + + } + + params.get(param).add(value); + + } + + /** + * Set (clear and add). + */ + public void set(final URI param, final TermNode value) { + + clear(param); + + add(param, value); + + } + + /** + * Clear. + */ + public void clear(final URI param) { + + params.remove(param); + + } + + /** + * Check for existence. + */ + public boolean contains(final URI param) { + + return params.containsKey(param); + + } + + /** + * Get a singleton value for the specified param. + */ + public TermNode get(final URI param, final TermNode defaultValue) { + + if (params.containsKey(param)) { + + final List<TermNode> values = params.get(param); + + if (values.size() > 1) { + + throw new RuntimeException("not a singleton param"); + + } + + return values.get(0); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public Boolean getAsBoolean(final URI param) { + + return getAsBoolean(param, null); + + } + + /** + * Helper. + */ + public Boolean getAsBoolean(final URI param, final Boolean defaultValue) { + + final Literal term = getAsLiteral(param, null); + + if (term != null) { + + return term.booleanValue(); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public Integer getAsInt(final URI param) { + + return getAsInt(param, null); + + } + + /** + * Helper. + */ + public Integer getAsInt(final URI param, final Integer defaultValue) { + + final Literal term = getAsLiteral(param, null); + + if (term != null) { + + return term.intValue(); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public Long getAsLong(final URI param) { + + return getAsLong(param, null); + + } + + /** + * Helper. + */ + public Long getAsLong(final URI param, final Long defaultValue) { + + final Literal term = getAsLiteral(param, null); + + if (term != null) { + + return term.longValue(); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public String getAsString(final URI param) { + + return getAsString(param, null); + + } + + /** + * Helper. + */ + public String getAsString(final URI param, final String defaultValue) { + + final Literal term = getAsLiteral(param, null); + + if (term != null) { + + return term.stringValue(); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public Literal getAsLiteral(final URI param) { + + return getAsLiteral(param, null); + + } + + /** + * Helper. + */ + public Literal getAsLiteral(final URI param, final Literal defaultValue) { + + final TermNode term = get(param, null); + + if (term != null) { + + if (term.isVariable()) { + + throw new IllegalArgumentException("not a constant"); + + } + + final Value v = term.getValue(); + + if (!(v instanceof Literal)) { + + throw new IllegalArgumentException("not a literal"); + + } + + return ((Literal) v); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public URI getAsURI(final URI param) { + + return getAsURI(param, null); + + } + + /** + * Helper. + */ + public URI getAsURI(final URI param, final URI defaultValue) { + + final TermNode term = get(param, null); + + if (term != null) { + + if (term.isVariable()) { + + throw new IllegalArgumentException("not a constant"); + + } + + final Value v = term.getValue(); + + if (!(v instanceof URI)) { + + throw new IllegalArgumentException("not a uri"); + + } + + return ((URI) v); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + @SuppressWarnings("rawtypes") + public IVariable<IV> getAsVar(final URI param) { + + return getAsVar(param, null); + + } + + /** + * Helper. + */ + @SuppressWarnings("rawtypes") + public IVariable<IV> getAsVar(final URI param, + final IVariable<IV> defaultValue) { + + final TermNode term = get(param, null); + + if (term != null) { + + if (!term.isVariable()) { + + throw new IllegalArgumentException("not a var"); + + } + + return (IVariable<IV>) term.getValueExpression(); + + } + + return defaultValue; + + } + + /** + * Helper. + */ + public List<TermNode> get(final URI param) { + + if (params.containsKey(param)) { + + return params.get(param); + + } + + return Collections.emptyList(); + + } + + /** + * Iterator. + */ + public Iterator<Map.Entry<URI, List<TermNode>>> iterator() { + + return params.entrySet().iterator(); + + } + + @Override + public String toString() { + + final StringBuilder sb = new StringBuilder(); + + sb.append("["); + + for (Map.Entry<URI, List<TermNode>> e : params.entrySet()) { + + final URI param = e.getKey(); + + final List<TermNode> terms = e.getValue(); + + sb.append(param).append(": "); + + if (terms.size() == 1) { + + sb.append(terms.get(0)); + + } else { + + sb.append("["); + for (TermNode t : terms) { + + sb.append(t).append(", "); + + } + sb.setLength(sb.length() - 2); + sb.append("]"); + + } + + sb.append(", "); + + } + + if (sb.length() > 1) + sb.setLength(sb.length() - 2); + sb.append("]"); + + return sb.toString(); + + } + + /** + * Gather the service params (any statement patterns with the subject of + * {@link BD#SERVICE_PARAM}. + */ + static public ServiceParams gatherServiceParams( + final ServiceCallCreateParams createParams) { + + if (createParams == null) + throw new IllegalArgumentException(); + + final AbstractTripleStore store = createParams.getTripleStore(); + + if (store == null) + throw new IllegalArgumentException(); + + final ServiceNode serviceNode = createParams.getServiceNode(); + + if (serviceNode == null) + throw new IllegalArgumentException(); + + final GraphPatternGroup<IGroupMemberNode> group = serviceNode + .getGraphPattern(); + + if (group == null) + throw new IllegalArgumentException(); + + final ServiceParams serviceParams = new ServiceParams(); + + final Iterator<IGroupMemberNode> it = group.iterator(); + + while (it.hasNext()) { + + final IGroupMemberNode node = it.next(); + + if (node instanceof StatementPatternNode) { + + final StatementPatternNode sp = (StatementPatternNode) node; + + final TermNode s = sp.s(); + + if (s.isConstant() && BD.SERVICE_PARAM.equals(s.getValue())) { + + if (sp.p().isVariable()) { + + throw new RuntimeException( + "not a valid service param triple pattern, " + + "predicate must be constant: " + sp); + + } + + final URI param = (URI) sp.p().getValue(); + + serviceParams.add(param, sp.o()); + + } + + } + + } + + if (log.isDebugEnabled()) { + + log.debug(serviceParams); + + } + + return serviceParams; + + } + +} // class ServiceParams Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -65,7 +65,6 @@ import com.bigdata.rdf.sparql.ast.service.BigdataServiceCall; import com.bigdata.rdf.sparql.ast.service.IServiceOptions; import com.bigdata.rdf.sparql.ast.service.ServiceCallCreateParams; -import com.bigdata.rdf.sparql.ast.service.ServiceFactory; import com.bigdata.rdf.sparql.ast.service.ServiceNode; import com.bigdata.rdf.spo.DistinctMultiTermAdvancer; import com.bigdata.rdf.spo.ISPO; @@ -100,8 +99,7 @@ * * @see RangeBOp */ -public class SliceServiceFactory extends AbstractServiceFactory - implements ServiceFactory { +public class SliceServiceFactory extends AbstractServiceFactory { private static final Logger log = Logger .getLogger(SliceServiceFactory.class); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ValuesServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ValuesServiceFactory.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ValuesServiceFactory.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -81,8 +81,7 @@ * ... * } */ -public class ValuesServiceFactory extends AbstractServiceFactory - implements ServiceFactory { +public class ValuesServiceFactory extends AbstractServiceFactory { private static final Logger log = Logger .getLogger(ValuesServiceFactory.class); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/CustomServiceFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/CustomServiceFactory.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/CustomServiceFactory.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -39,7 +39,6 @@ * Federated Query and Custom Services</a> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface CustomServiceFactory extends ServiceFactory { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java 2014-07-10 00:00:48 UTC (rev 8531) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -44,7 +44,6 @@ * common ancestor other than {@link Object}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface ServiceCall<E> { Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -0,0 +1,350 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.service.storedquery; + +import java.util.Arrays; +import java.util.concurrent.Future; + +import org.apache.log4j.Logger; +import org.openrdf.query.BindingSet; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQueryResult; + +import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.BigdataSailTupleQuery; +import com.bigdata.rdf.sail.Sesame2BigdataIterator; +import com.bigdata.rdf.sparql.ast.eval.ASTEvalHelper; +import com.bigdata.rdf.sparql.ast.eval.ServiceParams; +import com.bigdata.rdf.sparql.ast.service.BigdataNativeServiceOptions; +import com.bigdata.rdf.sparql.ast.service.ExternalServiceCall; +import com.bigdata.rdf.sparql.ast.service.IServiceOptions; +import com.bigdata.rdf.sparql.ast.service.ServiceCallCreateParams; +import com.bigdata.rdf.sparql.ast.service.ServiceFactory; +import com.bigdata.rdf.sparql.ast.service.ServiceNode; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.task.AbstractApiTask; + +import cutthecrap.utils.striterators.ICloseableIterator; + +/** + * A SERVICE that exposes a stored query for execution. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @see <a href="http://trac.bigdata.com/ticket/989">Stored Query Service</a> + * + * TODO Wiki page. + * + * TODO Implicit prefix declaration for bsq. + * + * TODO Reconcile with the REST API (group commit task pattern). + * + * TODO Why does this work? + * + * <pre> + * SELECT ?book ?title ?price + * { + * SERVICE <http://www.bigdata.com/rdf/stored-query#test_stored_query_001> { + * } + * } + * </pre> + * + * while this does not work + * + * <pre> + * PREFIX bsq: <http://www.bigdata.com/rdf/stored-query#> + * + * SELECT ?book ?title ?price + * { + * SERVICE <bsq#test_stored_query_001> { + * } + * } + * </pre> + * + * TODO Example + * + * <pre> + * PREFIX bsq: <http://www.bigdata.com/rdf/stored-query#> + * #... + * SERVICE <bsq#my-service> { + * bsq:queryParam bsq:gasClass "com.bigdata.rdf.graph.analytics.BFS" . + * gas:program gas:in <IRI> . # one or more times, specifies the initial frontier. + * gas:program gas:out ?out . # exactly once - will be bound to the visited vertices. + * gas:program gas:maxIterations 4 . # optional limit on breadth first expansion. + * gas:program gas:maxVisited 2000 . # optional limit on the #of visited vertices. + * gas:program gas:nthreads 4 . # specify the #of threads to use (optional) + * } + * </pre> + * + */ +abstract public class StoredQueryService implements ServiceFactory { + + public interface Options { + + /** + * The namespace used for stored query service. + */ + String NAMESPACE = "http://www.bigdata.com/rdf/stored-query#"; + + } + + static private transient final Logger log = Logger + .getLogger(StoredQueryService.class); + + private final BigdataNativeServiceOptions serviceOptions; + + public StoredQueryService() { + + serviceOptions = new BigdataNativeServiceOptions(); + +// /* +// * TODO This should probably be metadata set for each specific +// * stored query. +// */ +// serviceOptions.setRunFirst(true); + + } + + @Override + public IServiceOptions getServiceOptions() { + + return serviceOptions; + + } + + @Override + final public ExternalServiceCall create(final ServiceCallCreateParams params) { + + if (params == null) + throw new IllegalArgumentException(); + + final AbstractTripleStore store = params.getTripleStore(); + + if (store == null) + throw new IllegalArgumentException(); + + final ServiceNode serviceNode = params.getServiceNode(); + + if (serviceNode == null) + throw new IllegalArgumentException(); + + final ServiceParams serviceParams = ServiceParams.gatherServiceParams(params); + + return create(params, serviceParams); + + } + + public ExternalServiceCall create( + final ServiceCallCreateParams createParams, + final ServiceParams serviceParams) { + + /* + * Create and return the ServiceCall object which will execute this + * query. + */ + + return new StoredQueryServiceCall(createParams, serviceParams); + + } + + /** + * Return the SPARQL query to be evaluated. + */ + abstract protected String getQuery(); + + private class StoredQueryServiceCall implements ExternalServiceCall { + + private final ServiceCallCreateParams createParams; + private final ServiceParams serviceParams; + + public StoredQueryServiceCall( + final ServiceCallCreateParams createParams, + final ServiceParams serviceParams) { + + if (createParams == null) + throw new IllegalArgumentException(); + + if (serviceParams == null) + throw new IllegalArgumentException(); + + this.createParams = createParams; + this.serviceParams = serviceParams; + + } + + @Override + public IServiceOptions getServiceOptions() { + + return createParams.getServiceOptions(); + + } + + /** + * TODO We could use {@link ASTEvalHelper} to evaluate at the bigdata + * level without forcing the materialization of any variable bindings + * from the lexicon indices. This would be faster for some purposes, + * especially if the stored procedure is only used to JOIN into an outer + * query as in <code>SELECT * { SERVICE bsq:my-service {} }</code> + * + * FIXME Generalize to allow arbitrary application logic that has easy + * methods permitting it to invoke multiple queries and operate on the + * results. + * + * FIXME Generalize to support groovy scripting. + */ + @Override + public ICloseableIterator<BindingSet> call(final BindingSet[] bindingSets) + throws Exception { + + if (log.isInfoEnabled()) { + log.info(bindingSets.length); + log.info(Arrays.toString(bindingSets)); + log.info(serviceParams); + } + + final AbstractTripleStore tripleStore = createParams.getTripleStore(); + + final String queryStr = getQuery(); + + /* + * FIXME What about incoming bindings? They need to flow into the + * SERVICE. + */ + + // TODO Should the baseURI be the SERVICE URI? Decide and document. + final String baseURI = createParams.getServiceURI().stringValue(); + + final Future<TupleQueryResult> ft = AbstractApiTask.submitApiTask( + tripleStore.getIndexManager(), + new SparqlApiTask(tripleStore.getNamespace(), tripleStore + .getTimestamp(), queryStr, baseURI, bindingSets)); + + try { + + final TupleQueryResult tupleQueryResult = ft.get(); + + return new Sesame2BigdataIterator<BindingSet, QueryEvaluationException>( + tupleQueryResult); + + } finally { + + ft.cancel(true/* mayInterruptIfRunning */); + + } + + } + + } // StoredQueryServiceCall + + /** + * Task to execute a SPARQL query. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class SparqlApiTask extends + AbstractApiTask<TupleQueryResult> { + + private final String queryStr; + private final String baseURI; + + /** + * + * FIXME This is ignoring the exogenous bindings. This is more or less + * equivalent to bottom-up evaluation. It would be more efficient if we + * could flow in the exogenous bindings but this is not supported before + * openrdf 2.7 (we hack this in {@link BigdataSailTupleQuery}). + */ + private final BindingSet[] bindingSets; + + public SparqlApiTask(final String namespace, final long timestamp, + final String queryStr, final String baseURI, + final BindingSet[] bindingSets) { + + super(namespace, timestamp); + + this.queryStr = queryStr; + this.baseURI = baseURI; + this.bindingSets = bindingSets; + + } + + @Override + public TupleQueryResult call() throws Exception { + BigdataSailRepositoryConnection cxn = null; + boolean success = false; + try { + // Note: Will be UPDATE connection if UPDATE request!!! + cxn = getQueryConnection(); + if (log.isTraceEnabled()) + log.trace("Query running..."); + final TupleQueryResult ret = doQuery(cxn); + success = true; + if (log.isTraceEnabled()) + log.trace("Query done."); + return ret; + } finally { + if (cxn != null) { + if (!success && !cxn.isReadOnly()) { + /* + * Force rollback of the connection. + * + * Note: It is possible that the commit has already + * been processed, in which case this rollback() + * will be a NOP. This can happen when there is an + * IO error when communicating with the client, but + * the database has already gone through a commit. + */ + try { + // Force rollback of the connection. + cxn.rollback(); + } catch (Throwable t) { + log.error(t, t); + } + } + try { + // Force close of the connection. + cxn.close(); + } catch (Throwable t) { + log.error(t, t); + } + } + } + } + + protected TupleQueryResult doQuery( + final BigdataSailRepositoryConnection cxn) throws Exception { + + final BigdataSailTupleQuery query = (BigdataSailTupleQuery) cxn + .prepareTupleQuery(QueryLanguage.SPARQL, queryStr, + baseURI); + + return query.evaluate(); + + } + + } // SparqlApiTask + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/package.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/package.html (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/package.html 2014-07-10 15:11:48 UTC (rev 8532) @@ -0,0 +1,18 @@ +<html> +<head> +<title>A SPARQL Stored Query Service</title> +</head> +<body> + +<p> + + An embedded SERVICE that permits an application to register and execute + stored queries. A stored query may be as simple as a single SPARQL + query. However, it may also involve arbitrary application logic that + coordinates some set of related queries before returning a result to + the caller. + +</p> + +</body> +</html> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-07-10 15:11:48 UTC (rev 8532) @@ -0,0 +1,407 @@ +/* + + Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + + Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + */ +package com.bigdata.rdf.task; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicReference; + +import org.openrdf.repository.RepositoryException; +import org.openrdf.sail.SailException; + +import com.bigdata.BigdataStatics; +import com.bigdata.journal.IConcurrencyManager; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; +import com.bigdata.journal.TimestampUtility; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSailRepository; +import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.webapp.DatasetNotFoundException; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.resources.IndexManager; +import com.bigdata.service.IBigdataFederation; + +/** + * Base class is non-specific. Directly derived classes are suitable for + * internal tasks (stored queries, stored procedures, etc) while REST API tasks + * are based on a specialized subclass that also provides for access to the HTTP + * request and response. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > + * Concurrent unisolated operations against multiple KBs </a> + */ +abstract public class AbstractApiTask<T> implements IApiTask<T> { + + /** The reference to the {@link IIndexManager} is set before the task is executed. */ + private final AtomicReference<IIndexManager> indexManagerRef = new AtomicReference<IIndexManager>(); + + /** The namespace of the target KB instance. */ + protected final String namespace; + + /** The timestamp of the view of that KB instance. */ + protected final long timestamp; + + @Override + public String getNamespace() { + return namespace; + } + + @Override + public long getTimestamp() { + return timestamp; + } + + /** + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp of the view of that KB instance. + */ + protected AbstractApiTask(final String namespace, + final long timestamp) { + this.namespace = namespace; + this.timestamp = timestamp; + } + + protected void clearIndexManager() { + + indexManagerRef.set(null); + + } + + protected void setIndexManager(final IIndexManager indexManager) { + + if (!indexManagerRef + .compareAndSet(null/* expect */, indexManager/* update */)) { + + throw new IllegalStateException(); + + } + + } + + protected IIndexManager getIndexManager() { + + final IIndexManager tmp = indexManagerRef.get(); + + if (tmp == null) + throw new IllegalStateException(); + + return tmp; + + } + + /** + * Return a read-only view of the {@link AbstractTripleStore} for the given + * namespace will read from the commit point associated with the given + * timestamp. + * + * @param namespace + * The namespace. + * @param timestamp + * The timestamp. + * + * @return The {@link AbstractTripleStore} -or- <code>null</code> if none is + * found for that namespace and timestamp. + * + * TODO Enforce historical query by making sure timestamps conform + * (we do not want to allow read/write tx queries unless update + * semantics are introduced ala SPARQL 1.1). + */ + protected AbstractTripleStore getTripleStore(final String namespace, + final long timestamp) { + + // if (timestamp == ITx.UNISOLATED) + // throw new IllegalArgumentException("UNISOLATED reads disallowed."); + + // resolve the default namespace. + final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() + .getResourceLocator().locate(namespace, timestamp); + + return tripleStore; + + } + + /** + * Return a connection transaction, which may be either read-only or support + * mutation depending on the timestamp associated with the task's view. When + * the timestamp is associated with a historical commit point, this will be + * a read-only connection. When it is associated with the + * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a + * mutable connection. + * + * @throws RepositoryException + */ + protected BigdataSailRepositoryConnection getQueryConnection() + throws RepositoryException { + + /* + * Note: [timestamp] will be a read-only tx view of the triple store if + * a READ_LOCK was specified when the NanoSparqlServer was started + * (unless the query explicitly overrides the timestamp of the view on + * which it will operate). + */ + final AbstractTripleStore tripleStore = getTripleStore(namespace, + timestamp); + + if (tripleStore == null) { + + throw new DatasetNotFoundException("Not found: namespace=" + + namespace + ", timestamp=" + + TimestampUtility.toString(timestamp)); + + } + + // Wrap with SAIL. + final BigdataSail sail = new BigdataSail(tripleStore); + + final BigdataSailRepository repo = new BigdataSailRepository(sail); + + repo.initialize(); + + if (TimestampUtility.isReadOnly(timestamp)) { + + return (BigdataSailRepositoryConnection) repo + .getReadOnlyConnection(timestamp); + + } + + // Read-write connection. + final BigdataSailRepositoryConnection conn = repo.getConnection(); + + conn.setAutoCommit(false); + + return conn; + + } + + /** + * Return an UNISOLATED connection. + * + * @return The UNISOLATED connection. + * + * @throws SailException + * @throws RepositoryException + */ + protected BigdataSailRepositoryConnection getUnisolatedConnection() + throws SailException, RepositoryException { + + // resolve the default namespace. + final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() + .getResourceLocator().locate(namespace, ITx.UNISOLATED); + + if (tripleStore == null) { + + throw new RuntimeException("Not found: namespace=" + namespace); + + } + + // Wrap with SAIL. + final BigdataSail sail = new BigdataSail(tripleStore); + + final BigdataSailRepository repo = new BigdataSailRepository(sail); + + repo.initialize(); + + final BigdataSailRepositoryConnection conn = (BigdataSailRepositoryConnection) repo + .getUnisolatedConnection(); + + conn.setAutoCommit(false); + + return conn; + + } + + /** + * Submit a task and return a {@link Future} for that task. The task will be + * run on the appropriate executor service depending on the nature of the + * backing database and the view required by the task. + * + * @param indexManager + * The {@link IndexManager}. + * @param task + * The task. + * + * @return The {@link Future} for that task. + * + * @throws DatasetNotFoundException + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/753" > HA + * doLocalAbort() should interrupt NSS requests and AbstractTasks </a> + * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > + * Concurrent unisolated operations against multiple KBs </a> + */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + static public <T> Future<T> submitApiTask( + final IIndexManager indexManager, final AbstractApiTask<T> task) + throws DatasetNotFoundException { + + final String namespace = task.getNamespace(); + + final long timestamp = task.getTimestamp(); + + if (!BigdataStatics.NSS_GROUP_COMMIT || indexManager instanceof IBigdataFederation + || TimestampUtility.isReadOnly(timestamp) + ) { + + /* + * Execute the REST API task. + * + * Note: For scale-out, the operation will be applied using + * client-side global views of the indices. + * + * Note: This can be used for operations on read-only views (even on + * a Journal). This is helpful since we can avoid some overhead + * associated the AbstractTask lock declarations. + */ + // Wrap Callable. + final FutureTask<T> ft = new FutureTask<T>( + new ApiTaskForIndexManager(indexManager, task)); + + if (true) { + + /* + * Caller runs (synchronous execution) + * + * Note: By having the caller run the task here we avoid + * consuming another thread. + */ + ft.run(); + + } else { + + /* + * Run on a normal executor service. + */ + indexManager.getExecutorService().submit(ft); + + } + + return ft; + + } else { + + /** + * Run on the ConcurrencyManager of the Journal. + * + * Mutation operations will be scheduled based on the pre-declared + * locks and will have exclusive access to the resources guarded by + * those locks when they run. + * + * FIXME GROUP COMMIT: The {@link AbstractTask} was written to + * require the exact set of resource lock declarations. However, for + * the REST API, we want to operate on all indices associated with a + * KB instance. This requires either: + * <p> + * (a) pre-resolving the names of those indices and passing them all + * into the AbstractTask; or + * <P> + * (b) allowing the caller to only declare the namespace and then to + * be granted access to all indices whose names are in that + * namespace. + * + * (b) is now possible with the fix to the Name2Addr prefix scan. + */ + + // Obtain the necessary locks for R/w access to KB indices. + final String[] locks = getLocksForKB((Journal) indexManager, + namespace); + + final IConcurrencyManager cc = ((Journal) indexManager) + .getConcurrencyManager(); + + // Submit task to ConcurrencyManager. Will acquire locks and run. + return cc.submit(new ApiTaskForJournal(cc, task.getTimestamp(), + locks, task)); + + } + + } + + /** + * Acquire the locks for the named indices associated with the specified KB. + * + * @param indexManager + * The {@link Journal}. + * @param namespace + * The namespace of the KB instance. + * + * @return The locks for the named indices associated with that KB instance. + * + * @throws DatasetNotFoundException + * + * FIXME GROUP COMMIT : [This should be replaced by the use of + * the namespace and hierarchical locking support in + * AbstractTask.] This could fail to discover a recently create + * KB between the time when the KB is created and when the group + * commit for that create becomes visible. This data race exists + * because we are using [lastCommitTime] rather than the + * UNISOLATED view of the GRS. + * <p> + * Note: This data race MIGHT be closed by the default locator + * cache. If it records the new KB properties when they are + * created, then they should be visible. If they are not + * visible, then we have a data race. (But if it records them + * before the group commit for the KB create, then the actual KB + * indices will not be durable until the that group commit...). + * <p> + * Note: The problem can obviously be resolved by using the + * UNISOLATED index to obtain the KB properties, but that would + * serialize ALL updates. What we need is a suitable caching + * mechanism that (a) ensures that newly create KB instances are + * visible; and (b) has high concurrency for read-only requests + * for the properties for those KB instances. + */ + private static String[] getLocksForKB(final Journal indexManager, + final String namespace) throws DatasetNotFoundException { + + final long timestamp = indexManager.getLastCommitTime(); + + final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager + .getResourceLocator().locate(namespace, timestamp); + + if (tripleStore == null) + throw new DatasetNotFoundException("Not found: namespace=" + + namespace + ", timestamp=" + + TimestampUtility.toString(timestamp)); + + final Set<String> lockSet = new HashSet<String>(); + + lockSet.addAll(tripleStore.getSPORelation().getIndexNames()); + + lockSet.addAll(tripleStore.getLexiconRelation().... [truncated message content] |
From: <tho...@us...> - 2014-07-16 14:56:41
|
Revision: 8554 http://sourceforge.net/p/bigdata/code/8554 Author: thompsonbry Date: 2014-07-16 14:56:31 +0000 (Wed, 16 Jul 2014) Log Message: ----------- Extracted an IReadOnly interface from IIndexProcedure. Removed the sub-classes of AbstractRestApiTask that marked read-only versus read-write tasks and replaced them with the IReadOnly iterface. Cleanup around the group commit code and test suites. See #566 (Concurrent unisolated operations against multiple KBs on the same Journal) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/proc/IIndexProcedure.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestLockContention.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestUnisolatedReadWriteIndex.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForIndexManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/AbstractRestApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IReadOnly.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/proc/IIndexProcedure.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/proc/IIndexProcedure.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/proc/IIndexProcedure.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -34,6 +34,7 @@ import com.bigdata.btree.IIndex; import com.bigdata.btree.IRangeQuery; import com.bigdata.btree.ISimpleBTree; +import com.bigdata.journal.IReadOnly; import com.bigdata.service.DataService; import com.bigdata.service.IDataService; import com.bigdata.service.ndx.ClientIndexView; @@ -94,23 +95,22 @@ * with the JINI codebase mechanism (<code>java.rmi.server.codebase</code>). * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @todo add generic type for {@link #apply(IIndex)} 's return value (much like * {@link Callable}). */ -public interface IIndexProcedure extends Serializable { +public interface IIndexProcedure extends IReadOnly, Serializable { - /** - * Return <code>true</code> iff the procedure asserts that it will not - * write on the index. When <code>true</code>, the procedure may be run - * against a view of the index that is read-only or which allows concurrent - * processes to read on the same index object. When <code>false</code> the - * procedure will be run against a mutable view of the index (assuming that - * the procedure is executed in a context that has access to a mutable index - * view). - */ - public boolean isReadOnly(); +// /** +// * Return <code>true</code> iff the procedure asserts that it will not +// * write on the index. When <code>true</code>, the procedure may be run +// * against a view of the index that is read-only or which allows concurrent +// * processes to read on the same index object. When <code>false</code> the +// * procedure will be run against a mutable view of the index (assuming that +// * the procedure is executed in a context that has access to a mutable index +// * view). +// */ +// public boolean isReadOnly(); /** * Run the procedure. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -62,8 +62,6 @@ import com.bigdata.btree.ILocalBTreeView; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.view.FusedView; -import com.bigdata.concurrent.LockManager; -import com.bigdata.concurrent.LockManagerTask; import com.bigdata.concurrent.NonBlockingLockManager; import com.bigdata.counters.CounterSet; import com.bigdata.mdi.IResourceMetadata; @@ -116,7 +114,6 @@ * {@link ConcurrencyManager#submit(AbstractTask)} it. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * * @todo declare generic type for the return as <? extends Object> to be compatible * with {@link ConcurrencyManager#submit(AbstractTask)} @@ -166,12 +163,14 @@ * The object used to manage access to the resources from which views of the * indices are created. */ + @Override public final IResourceManager getResourceManager() { return resourceManager; } + @Override synchronized public final IJournal getJournal() { if (journal == null) { @@ -282,7 +281,7 @@ boolean registeredIndex = false; boolean droppedIndex = false; - Entry(Name2Addr.Entry entry) { + Entry(final Name2Addr.Entry entry) { super(entry.name, entry.checkpointAddr, entry.commitTime); @@ -425,6 +424,7 @@ final String name; final ICheckpointProtocol btree; + @Override public String toString() { return "DirtyListener{name="+name+"}"; @@ -448,8 +448,9 @@ * Add <i>this</i> to the {@link AbstractTask#commitList}. * * @param btree - * The {@link BTree} reporting that it is dirty. + * The index reporting that it is dirty. */ + @Override public void dirtyEvent(final ICheckpointProtocol btree) { assert btree == this.btree; @@ -966,7 +967,8 @@ this.l = l; } - + + @Override public Void call() throws Exception { if(log.isInfoEnabled()) @@ -1240,8 +1242,10 @@ * fails. The boolean argument indicates whether or not the group commit * succeeded. Throws exceptions are trapped and logged. */ - void afterTaskHook(boolean abort) { + void afterTaskHook(final boolean abort) { + ((IsolatedActionJournal) getJournal()).completeTask(); + } /* @@ -1260,6 +1264,7 @@ */ protected TaskCounters taskCounters; + @Override public TaskCounters getTaskCounters() { return taskCounters; @@ -1752,8 +1757,8 @@ * <dt>timestamp</dt> * <dd>The {@link #timestamp} specified to the ctor.</dd> * <dt>resources</dt> - * <dd>The named resource(s) specified to the ctor IFF {@link #INFO} is - * <code>true</code></dd> + * <dd>The named resource(s) specified to the ctor IFF logging @ INFO or + * above.</dd> * </dl> */ protected void setupLoggingContext() { @@ -1927,7 +1932,7 @@ * Delegate handles handshaking for writable transactions. */ - final Callable<T> delegate = new InnerReadWriteTxServiceCallable( + final Callable<T> delegate = new InnerReadWriteTxServiceCallable<T>( this, tx); return delegate.call(); @@ -1979,8 +1984,6 @@ * Call {@link #doTask()} for an unisolated write task. * * @throws Exception - * - * @todo update javadoc to reflect the change in how the locks are acquired. */ private T doUnisolatedReadWriteTask() throws Exception { @@ -2074,7 +2077,7 @@ * access to the same resources since the locks were released above. */ - writeService.afterTask(this, null); + writeService.afterTask(this/* task */, null/* cause */); return ret; @@ -2087,7 +2090,7 @@ if (log.isInfoEnabled()) log.info("Task failed: class=" + this + " : " + t2); - writeService.afterTask(this, t2); + writeService.afterTask(this/* task */, t2/* cause */); } @@ -2184,11 +2187,11 @@ * Inner class used to wrap up the call to {@link AbstractTask#doTask()} for * read-write transactions. */ - static protected class InnerReadWriteTxServiceCallable extends DelegateTask { + static protected class InnerReadWriteTxServiceCallable<T> extends DelegateTask<T> { - final Tx tx; + private final Tx tx; - InnerReadWriteTxServiceCallable(AbstractTask delegate, Tx tx) { + InnerReadWriteTxServiceCallable(final AbstractTask<T> delegate, final Tx tx) { super( delegate ); @@ -2202,7 +2205,8 @@ /** * Wraps up the execution of {@link AbstractTask#doTask()}. */ - public Object call() throws Exception { + @Override + public T call() throws Exception { // invoke on the outer class. @@ -2234,9 +2238,8 @@ } /** - * An instance of this class is used as the delegate for a - * {@link LockManagerTask} in order to coordinate the acquisition of locks - * with the {@link LockManager} before the task can execute and to release + * An instance of this class is used as the delegate to coordinate the acquisition of locks + * with the {@link NonBlockingLockManager} before the task can execute and to release * locks after the task has completed (whether it succeeds or fails). * <p> * Note: This inner class delegates the execution of the task to @@ -2249,8 +2252,6 @@ * prevent tasks from progressing. If there is strong lock contention then * writers will be more or less serialized. * - * @todo javadoc update to reflect the {@link NonBlockingLockManager} - * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ static protected class InnerWriteServiceCallable<T> extends DelegateTask<T> { @@ -2264,6 +2265,7 @@ /** * Note: Locks on the named indices are ONLY held during this call. */ + @Override public T call() throws Exception { // The write service on which this task is running. @@ -2356,7 +2358,7 @@ super(); } - public ResubmitException(String msg) { + public ResubmitException(final String msg) { super(msg); @@ -2389,6 +2391,7 @@ @SuppressWarnings("rawtypes") private final IResourceLocator resourceLocator; + @Override public String toString() { return getClass().getName() + "{task=" + AbstractTask.this + "}"; @@ -3016,6 +3019,7 @@ * Overridden to visit the name of all indices that were isolated and to * ignore the timestamp. */ + @SuppressWarnings("unchecked") @Override public Iterator<String> indexNameScan(final String prefix, final long timestampIsIgnored) { @@ -3054,7 +3058,8 @@ @SuppressWarnings("rawtypes") private final DefaultResourceLocator resourceLocator; - + + @Override public String toString() { return getClass().getName() + "{task=" + AbstractTask.this + "}"; @@ -3313,7 +3318,8 @@ final IIndexManager tmp = new DelegateIndexManager(this) { - public IIndex getIndex(String name, long timestampIsIgnored) { + @Override + public IIndex getIndex(final String name, final long timestampIsIgnored) { // last commit time. final long commitTime = delegate.getRootBlockView() @@ -3610,7 +3616,7 @@ */ private static class DelegateIndexManager implements IIndexManager { - private IIndexManager delegate; + private final IIndexManager delegate; public DelegateIndexManager(final IIndexManager delegate) { this.delegate = delegate; Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IReadOnly.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IReadOnly.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/IReadOnly.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -0,0 +1,48 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Mar 15, 2007 + */ +package com.bigdata.journal; + +/** + * A marker interface for logic that can declare whether or not it is read-only. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface IReadOnly { + + /** + * Return <code>true</code> iff the procedure asserts that it will not + * write on the index. When <code>true</code>, the procedure may be run + * against a view of the index that is read-only or which allows concurrent + * processes to read on the same index object. When <code>false</code> the + * procedure will be run against a mutable view of the index (assuming that + * the procedure is executed in a context that has access to a mutable index + * view). + */ + public boolean isReadOnly(); + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/JournalTransactionService.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -39,7 +39,6 @@ * Implementation for a standalone journal using single-phase commits. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ abstract public class JournalTransactionService extends AbstractHATransactionService { @@ -58,6 +57,7 @@ } + @Override public JournalTransactionService start() { super.start(); @@ -70,6 +70,7 @@ * Extended to register the new tx in the * {@link AbstractLocalTransactionManager}. */ + @Override protected void activateTx(final TxState state) { super.activateTx(state); @@ -88,6 +89,7 @@ } + @Override protected void deactivateTx(final TxState state) { super.deactivateTx(state); @@ -112,6 +114,7 @@ } + @Override protected long findCommitTime(final long timestamp) { final ICommitRecord commitRecord = journal.getCommitRecord(timestamp); @@ -126,6 +129,7 @@ } + @Override protected long findNextCommitTime(final long commitTime) { /* @@ -150,6 +154,7 @@ } + @Override protected void abortImpl(final TxState state) { if(state.isReadOnly()) { @@ -220,6 +225,7 @@ } + @Override protected long commitImpl(final TxState state) throws ExecutionException, InterruptedException { @@ -312,7 +318,7 @@ /* * FIXME The state changes for the local tx should be atomic across * this operation. In order to do that we have to make those changes - * inside of SinglePhaseTask while it is holding the lock, but after + * inside of SinglePhaseCommit while it is holding the lock, but after * it has committed. Perhaps the best way to do this is with a pre- * and post- call() API since we can not hold the lock across the * task otherwise (it will deadlock). @@ -382,7 +388,6 @@ * coherent commit time for the transaction as a whole. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public static class SinglePhaseCommit extends AbstractTask<Void> { @@ -411,6 +416,7 @@ } + @Override public Void doTask() throws Exception { /* @@ -446,6 +452,7 @@ /** * The last commit time from the current root block. */ + @Override final public long getLastCommitTime() { return journal.getRootBlockView().getLastCommitTime(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -139,25 +139,24 @@ * or a period of time then that might do it. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public class WriteExecutorService extends ThreadPoolExecutor { /** - * Main log for the {@link WriteExecutorService}. + * Main logger for the {@link WriteExecutorService}. */ - protected static final Logger log = Logger + private static final Logger log = Logger .getLogger(WriteExecutorService.class); /** * True iff the {@link #log} level is INFO or less. */ - final protected static boolean INFO = log.isInfoEnabled(); + final private static boolean INFO = log.isInfoEnabled(); /** * True iff the {@link #log} level is DEBUG or less. */ - final protected static boolean DEBUG = log.isDebugEnabled(); + final private static boolean DEBUG = log.isDebugEnabled(); /** * Uses the {@link OverflowManager} log for things relating to synchronous @@ -453,13 +452,13 @@ * The threads that are running our tasks (so that we can interrupt them * if necessary). */ - final private ConcurrentHashMap<Thread,AbstractTask> active = new ConcurrentHashMap<Thread,AbstractTask>(); + final private ConcurrentHashMap<Thread,AbstractTask<?>> active = new ConcurrentHashMap<Thread,AbstractTask<?>>(); /** * The set of tasks that make it into the commit group (so that we can set * the commit time on each of them iff the group commit succeeds). */ - final private Map<Thread,AbstractTask> commitGroup = new LinkedHashMap<Thread, AbstractTask>(); + final private Map<Thread,AbstractTask<?>> commitGroup = new LinkedHashMap<Thread, AbstractTask<?>>(); /** #of write tasks completed since the last commit. */ final private AtomicInteger nwrites = new AtomicInteger(0); @@ -805,7 +804,7 @@ * @param r * The {@link AbstractTask}. */ - protected void beforeTask(final Thread t, final AbstractTask r) { + protected void beforeTask(final Thread t, final AbstractTask<?> r) { if (t == null) throw new NullPointerException(); @@ -875,7 +874,7 @@ * The exception thrown -or- <code>null</code> if the task * completed successfully. */ - protected void afterTask(final AbstractTask r, final Throwable t) { + protected void afterTask(final AbstractTask<?> r, final Throwable t) { if (r == null) throw new NullPointerException(); @@ -1043,7 +1042,7 @@ } finally { // Remove since thread is no longer running the task. - final ITask tmp = active.remove(Thread.currentThread()); + final ITask<?> tmp = active.remove(Thread.currentThread()); if(trackActiveSetInMDC) { @@ -1238,7 +1237,7 @@ final Thread currentThread = Thread.currentThread(); // the task that invoked this method. - final ITask r = active.get(currentThread); + final ITask<?> r = active.get(currentThread); /* * If an abort is in progress then throw an exception. @@ -2149,7 +2148,7 @@ * enough.] */ - final AbstractTask[] a = active.values().toArray(new AbstractTask[0]); + final AbstractTask<?>[] a = active.values().toArray(new AbstractTask[0]); final TaskAndTime[] b = new TaskAndTime[a.length]; @@ -2195,7 +2194,7 @@ private static class TaskAndTime implements Comparable<TaskAndTime> { private final long now; - private final AbstractTask task; + private final AbstractTask<?> task; /** The elapsed milliseconds for work performed on this task. */ private final long elapsedRunTime; /** The #of milliseconds ago that work began on this task. */ @@ -2208,6 +2207,7 @@ } private final State state; + @Override public String toString() { return "TaskAndTime{" + task.toString() + ",elapsedRunTime=" + TimeUnit.NANOSECONDS.toMillis(elapsedRunTime) @@ -2215,7 +2215,7 @@ + ",state=" + state + "}"; } - TaskAndTime(final AbstractTask task, final long now) { + TaskAndTime(final AbstractTask<?> task, final long now) { this.task = task; this.now = now; if (task.nanoTime_finishedWork != 0L) { @@ -2238,6 +2238,7 @@ /** * Places into order by decreasing {@link #elapsedRunTime}. */ + @Override public int compareTo(final TaskAndTime o) { if (elapsedRunTime < o.elapsedRunTime) @@ -2417,7 +2418,7 @@ assert nwrites.get() == commitGroup.size(); - for (AbstractTask task : commitGroup.values()) { + for (AbstractTask<?> task : commitGroup.values()) { task.commitTime = timestamp; @@ -2543,14 +2544,14 @@ if (INFO) log.info("Interrupting tasks awaiting commit."); - final Iterator<Map.Entry<Thread, AbstractTask>> itr = active + final Iterator<Map.Entry<Thread, AbstractTask<?>>> itr = active .entrySet().iterator(); int ninterrupted = 0; while (itr.hasNext()) { - final Map.Entry<Thread,AbstractTask> entry = itr.next(); + final Map.Entry<Thread,AbstractTask<?>> entry = itr.next(); // set flag to deny access to resources. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -635,6 +635,7 @@ } + @Override public long nextTimestamp() { // setupLoggingContext(); @@ -698,6 +699,7 @@ * transactions which must contend for start times which will * read from the appropriate historical commit point). */ + @Override public long newTx(final long timestamp) { setupLoggingContext(); @@ -883,6 +885,7 @@ * * @see Options#MIN_RELEASE_AGE */ + @Override public long getReleaseTime() { if (log.isTraceEnabled()) @@ -1527,6 +1530,7 @@ * is invoked for all commits on all data services and will otherwise be a * global hotspot. */ + @Override public void notifyCommit(final long commitTime) { lock.lock(); @@ -1930,6 +1934,7 @@ /** * Note: Declared abstract so that we can hide the {@link IOException}. */ + @Override abstract public long getLastCommitTime(); /** @@ -2008,6 +2013,7 @@ /** * Abort the transaction (asynchronous). */ + @Override public void abort(final long tx) { setupLoggingContext(); @@ -2097,6 +2103,7 @@ } + @Override public long commit(final long tx) throws ValidationError { setupLoggingContext(); @@ -2475,6 +2482,7 @@ /** * The hash code is based on the {@link #getStartTimestamp()}. */ + @Override final public int hashCode() { return hashCode; @@ -2489,6 +2497,7 @@ * @param o * Another transaction object. */ + @Override final public boolean equals(final Object o) { if (this == o) @@ -2607,6 +2616,7 @@ /** * Returns a string representation of the transaction state. */ + @Override final public String toString() { /* @@ -2691,6 +2701,7 @@ * {@link #getLastCommitTime()} and then changes the {@link TxServiceRunState} * to {@link TxServiceRunState#Running}. */ + @Override public AbstractTransactionService start() { if(log.isInfoEnabled()) @@ -2741,6 +2752,7 @@ } + @Override @SuppressWarnings("rawtypes") public Class getServiceIface() { @@ -2758,60 +2770,70 @@ final CounterSet countersRoot = new CounterSet(); countersRoot.addCounter("runState", new Instrument<String>() { + @Override protected void sample() { setValue(runState.toString()); } }); countersRoot.addCounter("#active", new Instrument<Integer>() { + @Override protected void sample() { setValue(getActiveCount()); } }); countersRoot.addCounter("lastCommitTime", new Instrument<Long>() { + @Override protected void sample() { setValue(getLastCommitTime()); } }); countersRoot.addCounter("minReleaseAge", new Instrument<Long>() { + @Override protected void sample() { setValue(getMinReleaseAge()); } }); countersRoot.addCounter("releaseTime", new Instrument<Long>() { + @Override protected void sample() { setValue(getReleaseTime()); } }); countersRoot.addCounter("startCount", new Instrument<Long>() { + @Override protected void sample() { setValue(getStartCount()); } }); countersRoot.addCounter("abortCount", new Instrument<Long>() { + @Override protected void sample() { setValue(getAbortCount()); } }); countersRoot.addCounter("commitCount", new Instrument<Long>() { + @Override protected void sample() { setValue(getCommitCount()); } }); countersRoot.addCounter("readOnlyActiveCount", new Instrument<Long>() { + @Override protected void sample() { setValue(getReadOnlyActiveCount()); } }); countersRoot.addCounter("readWriteActiveCount", new Instrument<Long>() { + @Override protected void sample() { setValue(getReadWriteActiveCount()); } @@ -2826,6 +2848,7 @@ */ countersRoot.addCounter("earliestReadsOnCommitTime", new Instrument<Long>() { + @Override protected void sample() { final TxState tmp = earliestOpenTx; if (tmp != null) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentTx.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -95,27 +95,29 @@ * index that result in write-write conflicts. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ -public class StressTestConcurrentTx extends ProxyTestCase implements IComparisonTest { +public class StressTestConcurrentTx extends ProxyTestCase<Journal> implements + IComparisonTest { public StressTestConcurrentTx() { } - public StressTestConcurrentTx(String name) { + public StressTestConcurrentTx(final String name) { super(name); } - Journal journal; + private Journal journal; - public void setUpComparisonTest(Properties properties) throws Exception { + @Override + public void setUpComparisonTest(final Properties properties) throws Exception { journal = new Journal(properties); } + @Override public void tearDownComparisonTest() throws Exception { if (journal != null) { @@ -160,11 +162,13 @@ 100,// nops .10// abortRate ); + } finally { journal.destroy(); } + } /** @@ -405,9 +409,9 @@ private final int nops; private final double abortRate; - final Random r = new Random(); + private final Random r = new Random(); - public Task(Journal journal,String name, int trial, int keyLen, int nops, double abortRate) { + public Task(final Journal journal,final String name, int trial, int keyLen, int nops, double abortRate) { this.journal = journal; @@ -423,6 +427,7 @@ } + @Override public String toString() { return super.toString()+"#"+trial; @@ -435,6 +440,7 @@ * @return The commit time of the transactions and <code>0L</code> IFF * the transaction was aborted. */ + @Override public Long call() throws Exception { final long tx = journal.newTx(ITx.UNISOLATED); @@ -445,8 +451,9 @@ * complete. */ - journal.submit(new AbstractTask(journal, tx, name) { + journal.submit(new AbstractTask<Object>(journal, tx, name) { + @Override protected Object doTask() { // Random operations on the named index(s). @@ -532,9 +539,9 @@ * @see GenerateExperiment, which may be used to generate a set of * conditions to be run by the {@link ExperimentDriver}. */ - public static void main(String[] args) throws Exception { + public static void main(final String[] args) throws Exception { - Properties properties = new Properties(); + final Properties properties = new Properties(); // properties.setProperty(Options.FORCE_ON_COMMIT,ForceEnum.No.toString()); @@ -560,7 +567,7 @@ properties.setProperty(TestOptions.ABORT_RATE,".05"); - IComparisonTest test = new StressTestConcurrentTx(); + final IComparisonTest test = new StressTestConcurrentTx(); test.setUpComparisonTest(properties); @@ -632,7 +639,8 @@ * There are no "optional" properties - you must make sure that * each property has a defined value. */ - public Result doComparisonTest(Properties properties) throws Exception { + @Override + public Result doComparisonTest(final Properties properties) throws Exception { final long timeout = Long.parseLong(properties.getProperty(TestOptions.TIMEOUT)); @@ -670,16 +678,16 @@ * * @param args */ - public static void main(String[] args) throws Exception { + public static void main(final String[] args) throws Exception { // this is the test to be run. - String className = StressTestConcurrentTx.class.getName(); + final String className = StressTestConcurrentTx.class.getName(); /* * Set defaults for each condition. */ - Map<String,String> defaultProperties = new HashMap<String,String>(); + final Map<String,String> defaultProperties = new HashMap<String,String>(); // force delete of the files on close of the journal under test. defaultProperties.put(Options.CREATE_TEMP_FILE,"true"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestConcurrentUnisolatedIndices.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -75,6 +75,7 @@ private Journal journal; + @Override public void setUpComparisonTest(final Properties properties) throws Exception { @@ -82,6 +83,7 @@ } + @Override public void tearDownComparisonTest() throws Exception { if (journal != null) { @@ -425,6 +427,7 @@ } + @Override protected String getTaskName() { return super.getTaskName()+"#"+trial; @@ -436,6 +439,7 @@ * * @return null */ + @Override public Object doTask() throws Exception { // the index names on which the writer holds a lock. @@ -698,6 +702,7 @@ * There are no "optional" properties - you must make sure that * each property has a defined value. */ + @Override public Result doComparisonTest(Properties properties) throws Exception { final long timeout = Long.parseLong(properties.getProperty(TestOptions.TIMEOUT)); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestLockContention.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestLockContention.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestLockContention.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -48,9 +48,8 @@ * should still block many serialized writer tasks together for good throughput. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ -public class StressTestLockContention extends ProxyTestCase { +public class StressTestLockContention extends ProxyTestCase<Journal> { /** * @@ -62,7 +61,7 @@ /** * @param name */ - public StressTestLockContention(String name) { + public StressTestLockContention(final String name) { super(name); } @@ -88,7 +87,7 @@ final int ntasks = 500; - final List<Future> futures; + final List<Future<Object>> futures; { @@ -100,13 +99,14 @@ final String[] resource = new String[] { "foo", "bar", "baz" }; - final Collection<AbstractTask> tasks = new HashSet<AbstractTask>( + final Collection<AbstractTask<Object>> tasks = new HashSet<AbstractTask<Object>>( ntasks); for (int i = 0; i < ntasks; i++) { - tasks.add(new AbstractTask(journal, ITx.UNISOLATED, resource) { + tasks.add(new AbstractTask<Object>(journal, ITx.UNISOLATED, resource) { + @Override protected Object doTask() throws Exception { return null; @@ -124,7 +124,7 @@ * returns. */ - futures = journal.invokeAll(tasks, 20, TimeUnit.SECONDS); + futures = (List)journal.invokeAll(tasks, 20, TimeUnit.SECONDS); } finally { @@ -153,7 +153,7 @@ } - final Iterator<Future> itr = futures.iterator(); + final Iterator<Future<Object>> itr = futures.iterator(); int ncancelled = 0; int ncomplete = 0; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestUnisolatedReadWriteIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestUnisolatedReadWriteIndex.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/StressTestUnisolatedReadWriteIndex.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -56,7 +56,6 @@ * {@link UnisolatedReadWriteIndex} rather than the {@link ConcurrencyManager}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ * @see UnisolatedReadWriteIndex */ public class StressTestUnisolatedReadWriteIndex extends ProxyTestCase<Journal> { @@ -435,6 +434,7 @@ } + @Override public String toString() { return getClass().getName() + "#" + trial; @@ -446,6 +446,7 @@ * * @return null */ + @Override public Void call() throws Exception { final IIndex[] indices = new IIndex[resource.length]; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/storedquery/StoredQueryService.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -286,6 +286,13 @@ } @Override + public boolean isReadOnly() { + + return true; + + } + + @Override public TupleQueryResult call() throws Exception { BigdataSailRepositoryConnection cxn = null; boolean success = false; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -36,6 +36,7 @@ import com.bigdata.BigdataStatics; import com.bigdata.journal.IConcurrencyManager; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.IReadOnly; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; import com.bigdata.journal.TimestampUtility; @@ -57,7 +58,7 @@ * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > * Concurrent unisolated operations against multiple KBs </a> */ -abstract public class AbstractApiTask<T> implements IApiTask<T> { +abstract public class AbstractApiTask<T> implements IApiTask<T>, IReadOnly { /** The reference to the {@link IIndexManager} is set before the task is executed. */ private final AtomicReference<IIndexManager> indexManagerRef = new AtomicReference<IIndexManager>(); @@ -69,6 +70,9 @@ protected final long timestamp; @Override + abstract public boolean isReadOnly(); + + @Override public String getNamespace() { return namespace; } @@ -119,28 +123,21 @@ } /** - * Return a read-only view of the {@link AbstractTripleStore} for the given - * namespace will read from the commit point associated with the given - * timestamp. + * Return a view of the {@link AbstractTripleStore} for the given namespace + * that will read on the commit point associated with the given timestamp. * * @param namespace * The namespace. * @param timestamp - * The timestamp. + * The timestamp or {@link ITx#UNISOLATED} to obtain a read/write + * view of the index. * * @return The {@link AbstractTripleStore} -or- <code>null</code> if none is * found for that namespace and timestamp. - * - * TODO Enforce historical query by making sure timestamps conform - * (we do not want to allow read/write tx queries unless update - * semantics are introduced ala SPARQL 1.1). */ protected AbstractTripleStore getTripleStore(final String namespace, final long timestamp) { - // if (timestamp == ITx.UNISOLATED) - // throw new IllegalArgumentException("UNISOLATED reads disallowed."); - // resolve the default namespace. final AbstractTripleStore tripleStore = (AbstractTripleStore) getIndexManager() .getResourceLocator().locate(namespace, timestamp); @@ -275,7 +272,9 @@ * Execute the REST API task. * * Note: For scale-out, the operation will be applied using - * client-side global views of the indices. + * client-side global views of the indices. This means that + * there will not be any globally consistent views of the + * indices and that updates will be shard-wise local. * * Note: This can be used for operations on read-only views (even on * a Journal). This is helpful since we can avoid some overhead @@ -328,6 +327,11 @@ * namespace. * * (b) is now possible with the fix to the Name2Addr prefix scan. + * + * Note: We also need to isolate any named solution sets in the + * namespace of the KB. Those will be discovered along with the + * indices, but they may require changes to {@link AbstractTask} + * for GIST support. */ // Obtain the necessary locks for R/w access to KB indices. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForIndexManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForIndexManager.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/ApiTaskForIndexManager.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -37,6 +37,9 @@ * view of a scale-out index. It can also be used for a {@link Journal} if are * not relying on the {@link IConcurrencyManager} to guard the resources * declared by the task. + * <p> + * Note: Global locks are NOT used in scale-out and operations will be only + * shard-wise ACID. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @param <T> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/AbstractRestApiTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/AbstractRestApiTask.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/AbstractRestApiTask.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -41,14 +41,6 @@ * doLocalAbort() should interrupt NSS requests and AbstractTasks </a> * @see <a href="- http://sourceforge.net/apps/trac/bigdata/ticket/566" > * Concurrent unisolated operations against multiple KBs </a> - * - * FIXME GROUP COMMIT: Define DropKBTask and CreateKBTask for use by (a) - * the multi-tenancy API; and (b) variants without servlet request and - * response parameters for use by the unit tests and the NSS during its - * default KB create logic. These latter tasks should be a base class of - * the RestApiTask that supports the same delegation pattern, but which - * does not require the http request and response parameters. Fix up the - * callers of CreateKBTask and tripleStore.destroy() to use these tasks. */ abstract class AbstractRestApiTask<T> extends AbstractApiTask<T> { @@ -82,50 +74,57 @@ } - abstract static class RestApiQueryTask<T> extends AbstractRestApiTask<T> { - /** - * - * @param req - * The {@link HttpServletRequest}. - * @param resp - * The {@link HttpServletResponse}. - * @param namespace - * The namespace of the target KB instance. - * @param timestamp - * The timestamp used to obtain a query connection. - */ - public RestApiQueryTask(// - final HttpServletRequest req,// - final HttpServletResponse resp,// - final String namespace, final long timestamp) { - - super(req, resp, namespace, timestamp); - - } - - } - - abstract static class RestApiMutationTask<T> extends AbstractRestApiTask<T> { - /** - * - * @param req - * The {@link HttpServletRequest}. - * @param resp - * The {@link HttpServletResponse}. - * @param namespace - * The namespace of the target KB instance. - * @param timestamp - * The timestamp used to obtain a mutable connection. - */ - public RestApiMutationTask(// - final HttpServletRequest req,// - final HttpServletResponse resp,// - final String namespace, final long timestamp) { - - super(req, resp, namespace, timestamp); - - } - - } + /* + * Note: The introduction of a read/write task distinction at this level is + * problematic because some tasks must be declared in terms of the + * AbstractApiTask and then add the servlet parameters. Thus, this + * distinction could only really be captured through a marker interface + * (such as we have for + */ +// abstract static class RestApiQueryTask<T> extends AbstractRestApiTask<T> { +// /** +// * +// * @param req +// * The {@link HttpServletRequest}. +// * @param resp +// * The {@link HttpServletResponse}. +// * @param namespace +// * The namespace of the target KB instance. +// * @param timestamp +// * The timestamp used to obtain a query connection. +// */ +// public RestApiQueryTask(// +// final HttpServletRequest req,// +// final HttpServletResponse resp,// +// final String namespace, final long timestamp) { +// +// super(req, resp, namespace, timestamp); +// +// } +// +// } +// +// abstract static class RestApiMutationTask<T> extends AbstractRestApiTask<T> { +// /** +// * +// * @param req +// * The {@link HttpServletRequest}. +// * @param resp +// * The {@link HttpServletResponse}. +// * @param namespace +// * The namespace of the target KB instance. +// * @param timestamp +// * The timestamp used to obtain a mutable connection. +// */ +// public RestApiMutationTask(// +// final HttpServletRequest req,// +// final HttpServletResponse resp,// +// final String namespace, final long timestamp) { +// +// super(req, resp, namespace, timestamp); +// +// } +// +// } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -95,7 +95,6 @@ import com.bigdata.rdf.sail.ISPARQLUpdateListener; import com.bigdata.rdf.sail.SPARQLUpdateEvent; import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; -import com.bigdata.rdf.sail.webapp.AbstractRestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.StringUtil; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryHints; @@ -1154,6 +1153,14 @@ } @Override + public boolean isReadOnly() { + + // Read-only unless SPARQL UPDATE. + return !AbstractQueryTask.this.update; + + } + + @Override public Void call() throws Exception { BigdataSailRepositoryConnection cxn = null; boolean success = false; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -33,7 +33,6 @@ import com.bigdata.blueprints.BigdataGraphBulkLoad; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; -import com.bigdata.rdf.sail.webapp.AbstractRestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.MiniMime; import com.tinkerpop.blueprints.util.io.graphml.GraphMLReader; @@ -108,7 +107,7 @@ } - private static class BlueprintsPostTask extends RestApiMutationTask<Void> { + private static class BlueprintsPostTask extends AbstractRestApiTask<Void> { public BlueprintsPostTask(HttpServletRequest req, HttpServletResponse resp, String namespace, long timestamp) { @@ -118,6 +117,11 @@ } @Override + public boolean isReadOnly() { + return false; + } + + @Override public Void call() throws Exception { final long begin = System.currentTimeMillis(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -48,7 +48,6 @@ import com.bigdata.journal.ITx; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; -import com.bigdata.rdf.sail.webapp.AbstractRestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.client.EncodeDecodeValue; import com.bigdata.rdf.sail.webapp.client.MiniMime; @@ -348,7 +347,7 @@ } - private static class DeleteWithBodyTask extends RestApiMutationTask<Void> { + private static class DeleteWithBodyTask extends AbstractRestApiTask<Void> { private final String baseURI; private final Resource[] defaultContext; @@ -382,6 +381,11 @@ } @Override + public boolean isReadOnly() { + return false; + } + + @Override public Void call() throws Exception { final long begin = System.currentTimeMillis(); @@ -545,7 +549,7 @@ // static private transient final Resource[] nullArray = new Resource[]{}; - private static class DeleteWithAccessPathTask extends RestApiMutationTask<Void> { + private static class DeleteWithAccessPathTask extends AbstractRestApiTask<Void> { private Resource s; private URI p; @@ -580,6 +584,11 @@ } @Override + public boolean isReadOnly() { + return true; + } + + @Override public Void call() throws Exception { final long begin = System.currentTimeMillis(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/InsertServlet.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -49,7 +49,6 @@ import com.bigdata.rdf.rio.IRDFParserOptions; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; -import com.bigdata.rdf.sail.webapp.AbstractRestApiTask.RestApiMutationTask; import com.bigdata.rdf.sail.webapp.client.MiniMime; /** @@ -224,7 +223,7 @@ * TODO The {@link IRDFParserOptions} defaults should be coming from * the KB instance, right? What does the REST API say about this? */ - private static class InsertWithBodyTask extends RestApiMutationTask<Void> { + private static class InsertWithBodyTask extends AbstractRestApiTask<Void> { private final String baseURI; private final Resource[] defaultContext; @@ -258,6 +257,11 @@ } @Override + public boolean isReadOnly() { + return false; + } + + @Override public Void call() throws Exception { final long begin = System.currentTimeMillis(); @@ -407,7 +411,7 @@ } - private static class InsertWithURLsTask extends RestApiMutationTask<Void> { + private static class InsertWithURLsTask extends AbstractRestApiTask<Void> { private final Vector<URL> urls; private final Resource[] defaultContext; @@ -437,6 +441,11 @@ } @Override + public boolean isReadOnly() { + return false; + } + + @Override public Void call() throws Exception { final long begin = System.currentTimeMillis(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -65,8 +65,14 @@ * @author thompsonbry * * FIXME GROUP COMMIT: The CREATE and DESTROY operations require special - * attention. The other operations in this class also should use the new - * REST API pattern, but are not intrinsically sensitive. + * attention. Define DropKBTask and CreateKBTask for use by the + * multi-tenancy API and fix up the callers of CreateKBTask and + * tripleStore.destroy() to use these tasks. This means that the base + * implementations of those tasks must not require the servlet + * parameters. + * + * FIXME GROUP COMMIT: The other operations in this class also should + * use the new REST API pattern, but are not intrinsically sensitive. */ public class MultiTenancyServlet extends BigdataRDFServlet { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-07-16 14:52:28 UTC (rev 8553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-07-16 14:56:31 UTC (rev 8554) @@ -66,7 +66,6 @@ import com.bigdata.rdf.sail.BigdataSailQuery; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; -import com.bigdata.rdf.sail.webapp.AbstractRestApiTask.RestApiQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.UpdateTask; @@ -1073,7 +1072,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ - private static class EstCardTask extends RestApiQueryTask<Void> { + private static class EstCardTask extends AbstractRestApiTask<Void> { private final Resource s; private final URI p; @@ -1093,6 +1092,11 @@ ... [truncated message content] |
From: <tho...@us...> - 2014-07-17 11:32:02
|
Revision: 8564 http://sourceforge.net/p/bigdata/code/8564 Author: thompsonbry Date: 2014-07-17 11:31:54 +0000 (Thu, 17 Jul 2014) Log Message: ----------- Several changes related to the single-server jetty NSS deployment model (startNSS). build.xml: added comments about the purpose of the different log4j config files. bigdata-war/classes/log4j.properties: modified to NOT specify the FileAppender for the rules log so as to avoid forcing the creation of rules.log file. This tends to be problematic for people since the default directory often does not have sufficient permissions. startNSS: - Modified to fail if BD_HOME is not specified (and reports message). - Wrapped many environment variables in quotes to avoid problems when they are empty. This probably does not handle all edge cases with embedded whitespace yet. - Modified to report the command that is executed and the PID that results. - Modified to not send all output to /dev/null except for brew (the brew code path is probably incorrect, but I have kept the default behavior for now). Note: The bigdataNSS script can not be used to control the run state of the NSS process because it has some brew metavariables. See #918 (Unify the Bigdata Installation Trees) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-07-17 10:52:42 UTC (rev 8563) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-07-17 11:31:54 UTC (rev 8564) @@ -38,11 +38,14 @@ ## # Rule execution log. This is a formatted log file (comma delimited). +# Uncomment next line and also change from ConsoleAppender to FileAppender. +# Make sure that you can write on the specified file name and directory. #log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false -log4j.appender.ruleLog=org.apache.log4j.FileAppender +log4j.appender.ruleLog=org.apache.log4j.ConsoleAppender +#log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL -log4j.appender.ruleLog.File=rules.log +#log4j.appender.ruleLog.File=rules.log log4j.appender.ruleLog.Append=true # I find that it is nicer to have this unbuffered since you can see what # is going on and to make sure that I have complete rule evaluation logs Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-07-17 10:52:42 UTC (rev 8563) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-07-17 11:31:54 UTC (rev 8564) @@ -1171,6 +1171,7 @@ <property name="logging.to.path" location="${dist.var.config.logging}" /> + <!-- Note: normal server logging files. --> <property name="log4j.from.file" location="${bigdata.dir}/bigdata/src/resources/logging/log4j.properties" /> <copy file="${log4j.from.file}" todir="${logging.to.path}" /> @@ -1179,15 +1180,18 @@ <copy file="${logging.from.file}" todir="${logging.to.path}" /> + <!-- Note: scale-out logging files (standalone mode). --> <property name="standalone.log4j.from.file" location="${src.resources.config}/standalone/log4j.properties" /> <property name="standalone.log4j.to.file" location="${logging.to.path}/log4jStandalone.properties" /> <copy file="${standalone.log4j.from.file}" tofile="${standalone.log4j.to.file}" /> + <!-- Note: scale-out logging files (cluster mode). --> <property name="server.log4j.from.file" location="${src.resources.config}/log4jServer.properties" /> <copy file="${server.log4j.from.file}" todir="${logging.to.path}" /> + <!-- Note: HA replication cluster logging files. --> <property name="haserver.log4j.from.file" location="${src.resources}/HAJournal/log4jHA.properties" /> <copy file="${haserver.log4j.from.file}" todir="${logging.to.path}" /> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-07-17 10:52:42 UTC (rev 8563) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-07-17 11:31:54 UTC (rev 8564) @@ -1,7 +1,11 @@ #!/bin/bash +if [ -z "${BD_HOME}" ]; then + echo "Not specified: BD_HOME"; + exit 1; +fi export INSTALL_DIR=${BD_HOME} -if [ $INSTALL_TYPE == "BREW" ]; then +if [ "${INSTALL_TYPE}" == "BREW" ]; then export LIB_DIR=${INSTALL_DIR}/libexec else export LIB_DIR=${INSTALL_DIR}/lib @@ -15,13 +19,13 @@ export LOG_DIR=${BD_HOME}/var/log -if [ ! -d $LOG_DIR ]; then - mkdir -p $LOG_DIR +if [ ! -d "${LOG_DIR}" ]; then + mkdir -p "${LOG_DIR}" fi export DATA_DIR=${BD_HOME}/var/data -if [ ! -d $DATA_DIR ]; then - mkdir -p $DATA_DIR +if [ ! -d "${DATA_DIR}" ]; then + mkdir -p "${DATA_DIR}" fi export NSS="com.bigdata.rdf.sail.webapp.NanoSparqlServer" @@ -56,8 +60,8 @@ # Setup the directory for the pid of the ServiceStarter process. lockDir=${INSTALL_DIR}/var/lock -if [ ! -d $lockDir ]; then - mkdir -p $lockDir +if [ ! -d "${lockDir}" ]; then + mkdir -p "${lockDir}" fi pidFile=$lockDir/pid @@ -70,8 +74,19 @@ $NSS_PROPERTIES\ " -# echo "Running: $cmd" -$cmd > /dev/null 2>&1 & +echo "Running: $cmd" +# Note: This redirects console logger output to dev/null! +# This is only valid if all logger output is explicitly +# directed into a file, which it is not when using the +# default log4j and java.util.logging configuration. I am +# leaving the brew installer behavior as its historical +# value to avoid breaking it, but it is very likely to be +# incorrect. +if [ "${INSTALL_TYPE}" == "BREW" ]; then + $cmd > /dev/null 2>&1 & +else + $cmd& +fi pid=$! # echo "PID=$pid" echo "$pid">$pidFile This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-07-18 15:30:41
|
Revision: 8574 http://sourceforge.net/p/bigdata/code/8574 Author: mrpersonick Date: 2014-07-18 15:30:35 +0000 (Fri, 18 Jul 2014) Log Message: ----------- Ticket 995: Better SPARQL support through BigdataGraph, plus some hardening and usability refinements. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataRDFFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataVertex.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataValueReplacer.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataSelection.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsValueFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/DefaultBlueprintsValueFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/ImmortalGraph.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BlueprintsRDFFactory.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataEdge.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -24,10 +24,11 @@ import java.util.Arrays; import java.util.List; +import java.util.Set; +import org.apache.log4j.Logger; import org.openrdf.model.Statement; import org.openrdf.model.URI; -import org.openrdf.model.vocabulary.RDFS; import com.tinkerpop.blueprints.Direction; import com.tinkerpop.blueprints.Edge; @@ -42,6 +43,8 @@ */ public class BigdataEdge extends BigdataElement implements Edge { + private static final transient Logger log = Logger.getLogger(BigdataEdge.class); + private static final List<String> blacklist = Arrays.asList(new String[] { "id", "", "label" }); @@ -57,13 +60,19 @@ @Override public Object getId() { - return graph.factory.fromEdgeURI(uri); + if (log.isInfoEnabled()) + log.info("()"); + + return graph.factory.fromURI(uri); } @Override public void remove() { + if (log.isInfoEnabled()) + log.info("()"); + graph.removeEdge(this); } @@ -71,13 +80,19 @@ @Override public String getLabel() { - return (String) graph.getProperty(uri, RDFS.LABEL); + if (log.isInfoEnabled()) + log.info("()"); + + return (String) graph.getProperty(uri, graph.getValueFactory().getLabelURI()); } @Override public Vertex getVertex(final Direction dir) throws IllegalArgumentException { + if (log.isInfoEnabled()) + log.info("("+dir+")"); + if (dir == Direction.BOTH) { throw new IllegalArgumentException(); } @@ -85,7 +100,7 @@ final URI uri = (URI) (dir == Direction.OUT ? stmt.getSubject() : stmt.getObject()); - final String id = graph.factory.fromVertexURI(uri); + final String id = graph.factory.fromURI(uri); return graph.getVertex(id); @@ -94,6 +109,9 @@ @Override public void setProperty(final String prop, final Object val) { + if (log.isInfoEnabled()) + log.info("("+prop+", "+val+")"); + if (prop == null || blacklist.contains(prop)) { throw new IllegalArgumentException(); } @@ -112,4 +130,55 @@ } + @Override + public <T> T getProperty(final String prop) { + + if (log.isInfoEnabled()) + log.info("("+prop+")"); + + return super.getProperty(prop); + } + + @Override + public Set<String> getPropertyKeys() { + + if (log.isInfoEnabled()) + log.info("()"); + + return super.getPropertyKeys(); + + } + + @Override + public <T> T removeProperty(final String prop) { + + if (log.isInfoEnabled()) + log.info("("+prop+")"); + + return super.removeProperty(prop); + + } + +// @Override +// public void addProperty(final String prop, final Object val) { +// +// if (log.isInfoEnabled()) +// log.info("("+prop+", "+val+")"); +// +// super.addProperty(prop, val); +// +// } +// +// @Override +// public <T> List<T> getProperties(final String prop) { +// +// if (log.isInfoEnabled()) +// log.info("("+prop+")"); +// +// return super.getProperties(prop); +// +// } + + + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataElement.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -85,30 +85,30 @@ } - /** - * Simple extension for multi-valued properties. - */ - public void addProperty(final String prop, final Object val) { - - if (prop == null || blacklist.contains(prop)) { - throw new IllegalArgumentException(); - } - - graph.addProperty(uri, prop, val); - - } +// /** +// * Simple extension for multi-valued properties. +// */ +// public void addProperty(final String prop, final Object val) { +// +// if (prop == null || blacklist.contains(prop)) { +// throw new IllegalArgumentException(); +// } +// +// graph.addProperty(uri, prop, val); +// +// } +// +// /** +// * Simple extension for multi-valued properties. +// */ +// @SuppressWarnings("unchecked") +// public <T> List<T> getProperties(final String property) { +// +// return (List<T>) graph.getProperties(uri, property); +// +// } /** - * Simple extension for multi-valued properties. - */ - @SuppressWarnings("unchecked") - public <T> List<T> getProperties(final String property) { - - return (List<T>) graph.getProperties(uri, property); - - } - - /** * Generated code. */ @Override Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraph.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -24,28 +24,31 @@ import info.aduna.iteration.CloseableIteration; +import java.lang.reflect.Array; +import java.util.Collection; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; +import java.util.Properties; import java.util.Set; import java.util.UUID; +import org.apache.log4j.Logger; import org.openrdf.OpenRDFException; import org.openrdf.model.Literal; import org.openrdf.model.Statement; import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.impl.StatementImpl; -import org.openrdf.model.impl.URIImpl; -import org.openrdf.model.vocabulary.RDF; -import org.openrdf.model.vocabulary.RDFS; import org.openrdf.query.GraphQueryResult; import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.query.parser.QueryParserUtil; import org.openrdf.repository.RepositoryConnection; import org.openrdf.repository.RepositoryResult; -import com.bigdata.rdf.store.BD; import com.tinkerpop.blueprints.Direction; import com.tinkerpop.blueprints.Edge; import com.tinkerpop.blueprints.Features; @@ -62,25 +65,64 @@ */ public abstract class BigdataGraph implements Graph { + private static final transient Logger log = Logger.getLogger(BigdataGraph.class); + + public interface Options { + + /** + * Allow multiple edges with the same edge id. Useful for assigning + * by-reference properties (e.g. vertex type). + */ + String LAX_EDGES = BigdataGraph.class.getName() + ".laxEdges"; + + } + /** + * URI used for typing elements. + */ + protected final URI TYPE; + + /** * URI used to represent a Vertex. */ - public static final URI VERTEX = new URIImpl(BD.NAMESPACE + "Vertex"); - + protected final URI VERTEX; + /** * URI used to represent a Edge. */ - public static final URI EDGE = new URIImpl(BD.NAMESPACE + "Edge"); - + protected final URI EDGE; + + /** + * URI used for labeling edges. + */ + protected final URI LABEL; + /** * Factory for round-tripping between Blueprints data and RDF data. */ - final BlueprintsRDFFactory factory; + final BlueprintsValueFactory factory; - public BigdataGraph(final BlueprintsRDFFactory factory) { + /** + * Allow re-use of edge identifiers. + */ + private final boolean laxEdges; + + public BigdataGraph(final BlueprintsValueFactory factory) { + this(factory, new Properties()); + } + + public BigdataGraph(final BlueprintsValueFactory factory, + final Properties props) { this.factory = factory; + this.laxEdges = Boolean.valueOf(props.getProperty(Options.LAX_EDGES, "false")); + + this.TYPE = factory.getTypeURI(); + this.VERTEX = factory.getVertexURI(); + this.EDGE = factory.getEdgeURI(); + this.LABEL = factory.getLabelURI(); + } /** @@ -93,13 +135,27 @@ } + /** + * Return the factory used to round-trip between Blueprints values and + * RDF values. + */ + public BlueprintsValueFactory getValueFactory() { + return factory; + } + /** * Different implementations will return different types of connections * depending on the mode (client/server, embedded, read-only, etc.) */ - protected abstract RepositoryConnection cxn() throws Exception; + protected abstract RepositoryConnection getWriteConnection() throws Exception; /** + * A read-only connection can be used for read operations without blocking + * or being blocked by writers. + */ + protected abstract RepositoryConnection getReadOnlyConnection() throws Exception; + + /** * Return a single-valued property for an edge or vertex. * * @see {@link BigdataElement} @@ -120,26 +176,38 @@ try { final RepositoryResult<Statement> result = - cxn().getStatements(uri, prop, null, false); + getWriteConnection().getStatements(uri, prop, null, false); if (result.hasNext()) { - final Value value = result.next().getObject(); - - if (result.hasNext()) { - throw new RuntimeException(uri - + ": more than one value for p: " + prop - + ", did you mean to call getProperties()?"); + final Statement stmt = result.next(); + + if (!result.hasNext()) { + + /* + * Single value. + */ + return getProperty(stmt.getObject()); + + } else { + + /* + * Multi-value, use a list. + */ + final List<Object> list = new LinkedList<Object>(); + + list.add(getProperty(stmt.getObject())); + + while (result.hasNext()) { + + list.add(getProperty(result.next().getObject())); + + } + + return list; + } - if (!(value instanceof Literal)) { - throw new RuntimeException("not a property: " + value); - } - - final Literal lit = (Literal) value; - - return factory.fromLiteral(lit); - } return null; @@ -152,56 +220,73 @@ } - /** - * Return a multi-valued property for an edge or vertex. - * - * @see {@link BigdataElement} - */ - public List<Object> getProperties(final URI uri, final String prop) { + protected Object getProperty(final Value value) { + + if (!(value instanceof Literal)) { + throw new RuntimeException("not a property: " + value); + } + + final Literal lit = (Literal) value; - return getProperties(uri, factory.toPropertyURI(prop)); + final Object o = factory.fromLiteral(lit); - } + return o; - - /** - * Return a multi-valued property for an edge or vertex. - * - * @see {@link BigdataElement} - */ - public List<Object> getProperties(final URI uri, final URI prop) { - - try { - - final RepositoryResult<Statement> result = - cxn().getStatements(uri, prop, null, false); - - final List<Object> props = new LinkedList<Object>(); - - while (result.hasNext()) { - - final Value value = result.next().getObject(); - - if (!(value instanceof Literal)) { - throw new RuntimeException("not a property: " + value); - } - - final Literal lit = (Literal) value; - - props.add(factory.fromLiteral(lit)); - - } - - return props; - - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException(e); - } - } +// /** +// * Return a multi-valued property for an edge or vertex. +// * +// * TODO get rid of me +// * +// * @see {@link BigdataElement} +// */ +// public List<Object> getProperties(final URI uri, final String prop) { +// +// return getProperties(uri, factory.toPropertyURI(prop)); +// +// } +// +// /** +// * Return a multi-valued property for an edge or vertex. +// * +// * TODO get rid of me +// * +// * @see {@link BigdataElement} +// */ +// public List<Object> getProperties(final URI uri, final URI prop) { +// +// try { +// +// final RepositoryResult<Statement> result = +// getWriteConnection().getStatements(uri, prop, null, false); +// +// final List<Object> props = new LinkedList<Object>(); +// +// while (result.hasNext()) { +// +// final Value value = result.next().getObject(); +// +// if (!(value instanceof Literal)) { +// throw new RuntimeException("not a property: " + value); +// } +// +// final Literal lit = (Literal) value; +// +// props.add(factory.fromLiteral(lit)); +// +// } +// +// return props; +// +// } catch (RuntimeException e) { +// throw e; +// } catch (Exception e) { +// throw new RuntimeException(e); +// } +// +// } + /** * Return the property names for an edge or vertex. * @@ -212,7 +297,7 @@ try { final RepositoryResult<Statement> result = - cxn().getStatements(uri, null, null, false); + getWriteConnection().getStatements(uri, null, null, false); final Set<String> properties = new LinkedHashSet<String>(); @@ -224,12 +309,12 @@ continue; } - if (stmt.getPredicate().equals(RDFS.LABEL)) { + if (stmt.getPredicate().equals(LABEL)) { continue; } final String p = - factory.fromPropertyURI(stmt.getPredicate()); + factory.fromURI(stmt.getPredicate()); properties.add(p); @@ -267,7 +352,7 @@ final Object oldVal = getProperty(uri, prop); - cxn().remove(uri, prop, null); + getWriteConnection().remove(uri, prop, null); return oldVal; @@ -283,9 +368,54 @@ * * @see {@link BigdataElement} */ - public void setProperty(final URI uri, final String prop, final Object val) { + public void setProperty(final URI s, final String prop, final Object val) { + + if (val instanceof Collection) { + + @SuppressWarnings("unchecked") + final Collection<Object> vals = (Collection<Object>) val; + + // empty collection, do nothing + if (vals.size() == 0) { + return; + } + + final Collection<Literal> literals = new LinkedList<Literal>(); + + for (Object o : vals) { + + literals.add(factory.toLiteral(o)); + + } + + setProperty(s, factory.toPropertyURI(prop), literals); + + } else if (val.getClass().isArray()) { + + final int len = Array.getLength(val); + + // empty array, do nothing + if (len == 0) { + return; + } + + final Collection<Literal> literals = new LinkedList<Literal>(); + + for (int i = 0; i < len; i++) { + + final Object o = Array.get(val, i); + + literals.add(factory.toLiteral(o)); + + } + + setProperty(s, factory.toPropertyURI(prop), literals); + + } else { - setProperty(uri, factory.toPropertyURI(prop), factory.toLiteral(val)); + setProperty(s, factory.toPropertyURI(prop), factory.toLiteral(val)); + + } } @@ -298,11 +428,15 @@ public void setProperty(final URI uri, final URI prop, final Literal val) { try { + + final RepositoryConnection cxn = getWriteConnection(); + + // remove the old value + cxn.remove(uri, prop, null); + + // add the new value + cxn.add(uri, prop, val); - cxn().remove(uri, prop, null); - - cxn().add(uri, prop, val); - } catch (RuntimeException e) { throw e; } catch (Exception e) { @@ -312,27 +446,26 @@ } /** - * Add a property on an edge or vertex (multi-value property extension). + * Set a multi-value property on an edge or vertex (remove the old + * values first). * * @see {@link BigdataElement} */ - public void addProperty(final URI uri, final String prop, final Object val) { + public void setProperty(final URI uri, final URI prop, + final Collection<Literal> vals) { - setProperty(uri, factory.toPropertyURI(prop), factory.toLiteral(val)); + try { - } - - /** - * Add a property on an edge or vertex (multi-value property extension). - * - * @see {@link BigdataElement} - */ - public void addProperty(final URI uri, final URI prop, final Literal val) { - - try { + final RepositoryConnection cxn = getWriteConnection(); - cxn().add(uri, prop, val); + // remove the old value + cxn.remove(uri, prop, null); + // add the new values + for (Literal val : vals) { + cxn.add(uri, prop, val); + } + } catch (RuntimeException e) { throw e; } catch (Exception e) { @@ -341,6 +474,36 @@ } +// /** +// * Add a property on an edge or vertex (multi-value property extension). +// * +// * @see {@link BigdataElement} +// */ +// public void addProperty(final URI uri, final String prop, final Object val) { +// +// setProperty(uri, factory.toPropertyURI(prop), factory.toLiteral(val)); +// +// } +// +// /** +// * Add a property on an edge or vertex (multi-value property extension). +// * +// * @see {@link BigdataElement} +// */ +// public void addProperty(final URI uri, final URI prop, final Literal val) { +// +// try { +// +// getWriteConnection().add(uri, prop, val); +// +// } catch (RuntimeException e) { +// throw e; +// } catch (Exception e) { +// throw new RuntimeException(e); +// } +// +// } + /** * Post a GraphML file to the remote server. (Bulk-upload operation.) */ @@ -357,40 +520,44 @@ public Edge addEdge(final Object key, final Vertex from, final Vertex to, final String label) { + if (log.isInfoEnabled()) + log.info("("+key+", "+from+", "+to+", "+label+")"); + if (label == null) { throw new IllegalArgumentException(); } - final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); - - final URI edgeURI = factory.toEdgeURI(eid); - - if (key != null) { + if (key != null && !laxEdges) { final Edge edge = getEdge(key); if (edge != null) { if (!(edge.getVertex(Direction.OUT).equals(from) && - (edge.getVertex(Direction.OUT).equals(to)))) { + (edge.getVertex(Direction.IN).equals(to)))) { throw new IllegalArgumentException("edge already exists: " + key); } } } + final String eid = key != null ? key.toString() : UUID.randomUUID().toString(); + + final URI edgeURI = factory.toEdgeURI(eid); + try { // do we need to check this? -// if (cxn().hasStatement(edgeURI, RDF.TYPE, EDGE, false)) { +// if (cxn().hasStatement(edgeURI, TYPE, EDGE, false)) { // throw new IllegalArgumentException("edge " + eid + " already exists"); // } final URI fromURI = factory.toVertexURI(from.getId().toString()); final URI toURI = factory.toVertexURI(to.getId().toString()); - cxn().add(fromURI, edgeURI, toURI); - cxn().add(edgeURI, RDF.TYPE, EDGE); - cxn().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); + final RepositoryConnection cxn = getWriteConnection(); + cxn.add(fromURI, edgeURI, toURI); + cxn.add(edgeURI, TYPE, EDGE); + cxn.add(edgeURI, LABEL, factory.toLiteral(label)); return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); @@ -408,6 +575,9 @@ @Override public Vertex addVertex(final Object key) { + if (log.isInfoEnabled()) + log.info("("+key+")"); + try { final String vid = key != null ? @@ -416,11 +586,11 @@ final URI uri = factory.toVertexURI(vid); // do we need to check this? -// if (cxn().hasStatement(vertexURI, RDF.TYPE, VERTEX, false)) { +// if (cxn().hasStatement(vertexURI, TYPE, VERTEX, false)) { // throw new IllegalArgumentException("vertex " + vid + " already exists"); // } - cxn().add(uri, RDF.TYPE, VERTEX); + getWriteConnection().add(uri, TYPE, VERTEX); return new BigdataVertex(uri, this); @@ -437,6 +607,9 @@ */ @Override public Edge getEdge(final Object key) { + + if (log.isInfoEnabled()) + log.info("("+key+")"); if (key == null) throw new IllegalArgumentException(); @@ -446,7 +619,7 @@ final URI edge = factory.toEdgeURI(key.toString()); final RepositoryResult<Statement> result = - cxn().getStatements(null, edge, null, false); + getWriteConnection().getStatements(null, edge, null, false); if (result.hasNext()) { @@ -477,6 +650,9 @@ @Override public Iterable<Edge> getEdges() { + if (log.isInfoEnabled()) + log.info("()"); + final URI wild = null; return getEdges(wild, wild); @@ -549,7 +725,7 @@ try { final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + getWriteConnection().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); final GraphQueryResult stmts = query.evaluate(); @@ -576,7 +752,7 @@ try { final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + getWriteConnection().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); final GraphQueryResult stmts = query.evaluate(); @@ -635,7 +811,7 @@ try { final org.openrdf.query.GraphQuery query = - cxn().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + getWriteConnection().prepareGraphQuery(QueryLanguage.SPARQL, queryStr); final GraphQueryResult stmts = query.evaluate(); @@ -663,6 +839,9 @@ @Override public Iterable<Edge> getEdges(final String prop, final Object val) { + if (log.isInfoEnabled()) + log.info("("+prop+", "+val+")"); + final URI p = factory.toPropertyURI(prop); final Literal o = factory.toLiteral(val); @@ -692,6 +871,9 @@ @Override public Vertex getVertex(final Object key) { + if (log.isInfoEnabled()) + log.info("("+key+")"); + if (key == null) throw new IllegalArgumentException(); @@ -699,7 +881,7 @@ try { - if (cxn().hasStatement(uri, RDF.TYPE, VERTEX, false)) { + if (getWriteConnection().hasStatement(uri, TYPE, VERTEX, false)) { return new BigdataVertex(uri, this); } @@ -720,10 +902,13 @@ @Override public Iterable<Vertex> getVertices() { + if (log.isInfoEnabled()) + log.info("()"); + try { final RepositoryResult<Statement> result = - cxn().getStatements(null, RDF.TYPE, VERTEX, false); + getWriteConnection().getStatements(null, TYPE, VERTEX, false); return new VertexIterable(result, true); @@ -741,13 +926,16 @@ @Override public Iterable<Vertex> getVertices(final String prop, final Object val) { + if (log.isInfoEnabled()) + log.info("("+prop+", "+val+")"); + final URI p = factory.toPropertyURI(prop); final Literal o = factory.toLiteral(val); try { final RepositoryResult<Statement> result = - cxn().getStatements(null, p, o, false); + getWriteConnection().getStatements(null, p, o, false); return new VertexIterable(result, true); @@ -765,6 +953,10 @@ */ @Override public GraphQuery query() { + + if (log.isInfoEnabled()) + log.info("()"); + // return new DefaultGraphQuery(this); return new BigdataGraphQuery(this); } @@ -779,17 +971,17 @@ final URI uri = factory.toURI(edge); - if (!cxn().hasStatement(uri, RDF.TYPE, EDGE, false)) { + if (!getWriteConnection().hasStatement(uri, TYPE, EDGE, false)) { throw new IllegalStateException(); } final URI wild = null; // remove the edge statement - cxn().remove(wild, uri, wild); + getWriteConnection().remove(wild, uri, wild); // remove its properties - cxn().remove(uri, wild, wild); + getWriteConnection().remove(uri, wild, wild); } catch (RuntimeException e) { throw e; @@ -809,17 +1001,17 @@ final URI uri = factory.toURI(vertex); - if (!cxn().hasStatement(uri, RDF.TYPE, VERTEX, false)) { + if (!getWriteConnection().hasStatement(uri, TYPE, VERTEX, false)) { throw new IllegalStateException(); } final URI wild = null; // remove outgoing edges and properties - cxn().remove(uri, wild, wild); + getWriteConnection().remove(uri, wild, wild); // remove incoming edges - cxn().remove(wild, wild, uri); + getWriteConnection().remove(wild, wild, uri); } catch (RuntimeException e) { throw e; @@ -1004,6 +1196,100 @@ } + /** + * Project a subgraph using a SPARQL query. + */ + public BigdataGraphlet project(final String queryStr) throws Exception { + + final String operation = + QueryParserUtil.removeSPARQLQueryProlog(queryStr).toLowerCase(); + + if (!operation.startsWith("construct")) { + throw new IllegalArgumentException("not a graph query"); + } + + try { + + final RepositoryConnection cxn = getReadOnlyConnection(); + + try { + + final org.openrdf.query.GraphQuery query = + cxn.prepareGraphQuery(QueryLanguage.SPARQL, queryStr); + + final GraphQueryResult result = query.evaluate(); + try { + + final BigdataQueryProjection projection = + new BigdataQueryProjection(factory); + + return projection.convert(result); + + } finally { + result.close(); + } + + } finally { + + cxn.close(); + + } + + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + + /** + * Select results using a SPARQL query. + */ + public BigdataSelection select(final String queryStr) throws Exception { + + final String operation = + QueryParserUtil.removeSPARQLQueryProlog(queryStr).toLowerCase(); + + if (!operation.startsWith("select")) { + throw new IllegalArgumentException("not a tuple query"); + } + + try { + + final RepositoryConnection cxn = getReadOnlyConnection(); + + try { + + final TupleQuery query = (TupleQuery) + cxn.prepareTupleQuery(QueryLanguage.SPARQL, queryStr); + + final TupleQueryResult result = query.evaluate(); + try { + + final BigdataQueryProjection projection = + new BigdataQueryProjection(factory); + + return projection.convert(result); + + } finally { + result.close(); + } + + } finally { + + cxn.close(); + + } + + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + + } + protected static final Features FEATURES = new Features(); @Override @@ -1020,9 +1306,9 @@ FEATURES.supportsDoubleProperty = true; FEATURES.supportsFloatProperty = true; FEATURES.supportsIntegerProperty = true; - FEATURES.supportsPrimitiveArrayProperty = false; - FEATURES.supportsUniformListProperty = false; - FEATURES.supportsMixedListProperty = false; + FEATURES.supportsPrimitiveArrayProperty = true; + FEATURES.supportsUniformListProperty = true; + FEATURES.supportsMixedListProperty = true; FEATURES.supportsLongProperty = true; FEATURES.supportsMapProperty = false; FEATURES.supportsStringProperty = true; @@ -1034,7 +1320,7 @@ FEATURES.supportsEdgeIteration = true; FEATURES.supportsVertexIndex = false; FEATURES.supportsEdgeIndex = false; - FEATURES.ignoresSuppliedIds = true; + FEATURES.ignoresSuppliedIds = false; FEATURES.supportsTransactions = false; FEATURES.supportsIndices = true; FEATURES.supportsKeyIndices = true; @@ -1046,5 +1332,5 @@ FEATURES.supportsThreadedTransactions = false; } - + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphBulkLoad.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -35,7 +35,6 @@ import com.bigdata.rdf.changesets.IChangeRecord; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; import com.tinkerpop.blueprints.Edge; -import com.tinkerpop.blueprints.Features; import com.tinkerpop.blueprints.GraphQuery; import com.tinkerpop.blueprints.TransactionalGraph; import com.tinkerpop.blueprints.Vertex; @@ -60,17 +59,21 @@ } public BigdataGraphBulkLoad(final BigdataSailRepositoryConnection cxn, - final BlueprintsRDFFactory factory) { + final BlueprintsValueFactory factory) { super(factory); this.cxn = cxn; this.cxn.addChangeLog(this); } - protected RepositoryConnection cxn() throws Exception { + protected RepositoryConnection getWriteConnection() throws Exception { return cxn; } + protected RepositoryConnection getReadOnlyConnection() throws Exception { + return cxn; + } + @Override public void commit() { try { @@ -167,7 +170,7 @@ // cxn().remove(s, p, null); - cxn().add(s, p, o); + getWriteConnection().add(s, p, o); } catch (Exception e) { throw new RuntimeException(e); @@ -193,7 +196,7 @@ // throw new IllegalArgumentException("vertex " + vid + " already exists"); // } - cxn().add(uri, RDF.TYPE, VERTEX); + getWriteConnection().add(uri, RDF.TYPE, VERTEX); return new BigdataVertex(uri, this); @@ -241,9 +244,9 @@ final URI fromURI = factory.toVertexURI(from.getId().toString()); final URI toURI = factory.toVertexURI(to.getId().toString()); - cxn().add(fromURI, edgeURI, toURI); - cxn().add(edgeURI, RDF.TYPE, EDGE); - cxn().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); + getWriteConnection().add(fromURI, edgeURI, toURI); + getWriteConnection().add(edgeURI, RDF.TYPE, EDGE); + getWriteConnection().add(edgeURI, RDFS.LABEL, factory.toLiteral(label)); return new BigdataEdge(new StatementImpl(fromURI, edgeURI, toURI), this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphClient.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -54,7 +54,7 @@ } public BigdataGraphClient(final String bigdataEndpoint, - final BlueprintsRDFFactory factory) { + final BlueprintsValueFactory factory) { this(new BigdataSailRemoteRepository(bigdataEndpoint), factory); } @@ -63,7 +63,7 @@ } public BigdataGraphClient(final RemoteRepository repo, - final BlueprintsRDFFactory factory) { + final BlueprintsValueFactory factory) { this(new BigdataSailRemoteRepository(repo), factory); } @@ -72,7 +72,7 @@ } public BigdataGraphClient(final BigdataSailRemoteRepository repo, - final BlueprintsRDFFactory factory) { + final BlueprintsValueFactory factory) { super(factory); this.repo = repo; @@ -89,13 +89,21 @@ /** * Get a {@link BigdataSailRemoteRepositoryConnection}. */ - protected BigdataSailRemoteRepositoryConnection cxn() throws Exception { + protected BigdataSailRemoteRepositoryConnection getWriteConnection() throws Exception { if (cxn == null) { cxn = repo.getConnection(); } return cxn; } + /** + * Get a {@link BigdataSailRemoteRepositoryConnection}. No difference in + * connection for remote clients. + */ + protected BigdataSailRemoteRepositoryConnection getReadOnlyConnection() throws Exception { + return getWriteConnection(); + } + /** * Shutdown the connection and repository (client-side, not server-side). */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphConfiguration.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -110,7 +110,7 @@ final String journal = config.getString(Options.FILE); - return BigdataGraphFactory.create(journal); + return BigdataGraphFactory.open(journal, true); } else { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphEmbedded.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -22,6 +22,8 @@ */ package com.bigdata.blueprints; +import java.util.Properties; + import org.openrdf.repository.RepositoryConnection; import com.bigdata.rdf.sail.BigdataSail; @@ -53,11 +55,11 @@ /** * Create a Blueprints wrapper around a {@link BigdataSail} instance with - * a non-standard {@link BlueprintsRDFFactory} implementation. + * a non-standard {@link BlueprintsValueFactory} implementation. */ public BigdataGraphEmbedded(final BigdataSail sail, - final BlueprintsRDFFactory factory) { - this(new BigdataSailRepository(sail), factory); + final BlueprintsValueFactory factory) { + this(new BigdataSailRepository(sail), factory, new Properties()); } /** @@ -65,20 +67,24 @@ * instance. */ public BigdataGraphEmbedded(final BigdataSailRepository repo) { - this(repo, BigdataRDFFactory.INSTANCE); + this(repo, BigdataRDFFactory.INSTANCE, new Properties()); } /** * Create a Blueprints wrapper around a {@link BigdataSailRepository} - * instance with a non-standard {@link BlueprintsRDFFactory} implementation. + * instance with a non-standard {@link BlueprintsValueFactory} implementation. */ public BigdataGraphEmbedded(final BigdataSailRepository repo, - final BlueprintsRDFFactory factory) { - super(factory); + final BlueprintsValueFactory factory, final Properties props) { + super(factory, props); this.repo = repo; } + public BigdataSailRepository getRepository() { + return repo; + } + protected final ThreadLocal<RepositoryConnection> cxn = new ThreadLocal<RepositoryConnection>() { protected RepositoryConnection initialValue() { RepositoryConnection cxn = null; @@ -92,7 +98,7 @@ } }; - protected RepositoryConnection cxn() throws Exception { + protected RepositoryConnection getWriteConnection() throws Exception { // if (cxn == null) { // cxn = repo.getUnisolatedConnection(); // cxn.setAutoCommit(false); @@ -100,6 +106,10 @@ return cxn.get(); } + protected RepositoryConnection getReadOnlyConnection() throws Exception { + return repo.getReadOnlyConnection(); + } + @Override public void commit() { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphFactory.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -63,10 +63,12 @@ } /** - * Open an existing persistent local bigdata instance. + * Open an existing persistent local bigdata instance. If a journal does + * not exist at the specified location and the boolean create flag is true + * a journal will be created at that location. */ - public static BigdataGraph open(final String file) throws Exception { - final BigdataSail sail = BigdataSailFactory.openSail(file); + public static BigdataGraph open(final String file, final boolean create) throws Exception { + final BigdataSail sail = BigdataSailFactory.openSail(file, create); sail.initialize(); return new BigdataGraphEmbedded(sail); } @@ -80,14 +82,14 @@ return new BigdataGraphEmbedded(sail); } - /** - * Create a new persistent local bigdata instance. - */ - public static BigdataGraph create(final String file) - throws Exception { - final BigdataSail sail = BigdataSailFactory.createSail(file); - sail.initialize(); - return new BigdataGraphEmbedded(sail); - } +// /** +// * Create a new persistent local bigdata instance. +// */ +// public static BigdataGraph create(final String file) +// throws Exception { +// final BigdataSail sail = BigdataSailFactory.createSail(file); +// sail.initialize(); +// return new BigdataGraphEmbedded(sail); +// } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphQuery.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -63,6 +63,26 @@ private final BigdataGraph graph; /** + * URI used for typing elements. + */ + protected final URI TYPE; + + /** + * URI used to represent a Vertex. + */ + protected final URI VERTEX; + + /** + * URI used to represent a Edge. + */ + protected final URI EDGE; + + /** + * URI used for labeling edges. + */ + protected final URI LABEL; + + /** * The list of criteria. Bigdata's query optimizer will re-order the * criteria based on selectivity and execute for maximum performance and * minimum IO. @@ -76,6 +96,10 @@ public BigdataGraphQuery(final BigdataGraph graph) { this.graph = graph; + this.TYPE = graph.getValueFactory().getTypeURI(); + this.VERTEX = graph.getValueFactory().getVertexURI(); + this.EDGE = graph.getValueFactory().getEdgeURI(); + this.LABEL = graph.getValueFactory().getLabelURI(); } /** @@ -204,7 +228,7 @@ */ @Override public Iterable<Edge> edges() { - final String queryStr = toQueryStr(BigdataGraph.EDGE); + final String queryStr = toQueryStr(EDGE); return graph.getEdges(queryStr); } @@ -215,7 +239,7 @@ */ @Override public Iterable<Vertex> vertices() { - final String queryStr = toQueryStr(BigdataGraph.VERTEX); + final String queryStr = toQueryStr(VERTEX); return graph.getVertices(queryStr, true); } @@ -226,8 +250,8 @@ final StringBuilder sb = new StringBuilder(); - if (type == BigdataGraph.VERTEX) { - sb.append("construct { ?x rdf:type <"+type+"> . }\n"); + if (type == VERTEX) { + sb.append("construct { ?x <"+TYPE+"> <"+type+"> . }\n"); sb.append("{\n select distinct ?x where {\n"); } else { sb.append("construct { ?from ?x ?to . }\n"); @@ -235,7 +259,7 @@ sb.append(" ?from ?x ?to .\n"); } - final BlueprintsRDFFactory factory = graph.factory; + final BlueprintsValueFactory factory = graph.factory; boolean hasHas = false; @@ -304,7 +328,7 @@ // need a statement pattern for the filter not exists if (!hasHas) { - sb.append(" ?x rdf:type <").append(type).append("> .\n"); + sb.append(" ?x <"+TYPE+"> <").append(type).append("> .\n"); } @@ -334,7 +358,7 @@ private String toFilterStr(final BigdataPredicate pred, final String var, final Object val) { - final BlueprintsRDFFactory factory = graph.factory; + final BlueprintsValueFactory factory = graph.factory; final StringBuilder sb = new StringBuilder(); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -0,0 +1,55 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import java.util.Collection; +import java.util.LinkedList; + +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Vertex; + +public class BigdataGraphlet { + + private final Collection<? extends Vertex> vertices; + private final Collection<? extends Edge> edges; + + public BigdataGraphlet() { + this.vertices = new LinkedList<Vertex>(); + this.edges = new LinkedList<Edge>(); + } + + public BigdataGraphlet(final Collection<? extends Vertex> vertices, + final Collection<? extends Edge> edges) { + this.vertices = vertices; + this.edges = edges; + } + + public Collection<? extends Vertex> getVertices() { + return vertices; + } + + public Collection<? extends Edge> getEdges() { + return edges; + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataGraphlet.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java 2014-07-17 22:45:00 UTC (rev 8573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataPredicate.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -1,3 +1,25 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.blueprints; import com.tinkerpop.blueprints.Compare; Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-blueprints/src/java/com/bigdata/blueprints/BigdataQueryProjection.java 2014-07-18 15:30:35 UTC (rev 8574) @@ -0,0 +1,508 @@ +/** +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.blueprints; + +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; +import org.openrdf.model.Statement; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.query.BindingSet; +import org.openrdf.query.GraphQueryResult; +import org.openrdf.query.TupleQueryResult; + +import com.bigdata.blueprints.BigdataSelection.Bindings; +import com.tinkerpop.blueprints.Direction; +import com.tinkerpop.blueprints.Edge; +import com.tinkerpop.blueprints.Element; +import com.tinkerpop.blueprints.Vertex; +import com.tinkerpop.blueprints.VertexQuery; + +public class BigdataQueryProjection { + + private static final transient Logger log = Logger.getLogger(BigdataQueryProjection.class); + + private final BlueprintsValueFactory factory; + + public BigdataQueryProjection(final BlueprintsValueFactory factory) { + + this.factory = factory; + + } + + public BigdataSelection convert(final TupleQueryResult result) + throws Exception { + + final BigdataSelection selection = new BigdataSelection(); + + while (result.hasNext()) { + + final BindingSet bs = result.next(); + + final Bindings bindings = selection.newBindings(); + + for (String key : bs.getBindingNames()) { + + final Value val= bs.getBinding(key).getValue(); + + final Object o; + if (val instanceof Literal) { + o = factory.fromLiteral((Literal) val); + } else if (val instanceof URI) { + o = factory.fromURI((URI) val); + } else { + throw new RuntimeException("bnodes not legal: " + val); + } + + bindings.put(key, o); + + } + + } + + return selection; + + } + + public BigdataGraphlet convert(final GraphQueryResult stmts) throws Exception { + + final PartialGraph elements = new PartialGraph(); + + while (stmts.hasNext()) { + + final Statement stmt = stmts.next(); + + if (log.isInfoEnabled()) { + log.info(stmt); + } + + final Value o = stmt.getObject(); + + if (o instanceof URI) { + + handleEdge(elements, stmt); + + } else if (o instanceof Literal) { + + handleProperty(elements, stmt); + + } else { + + // how did we get a bnode? +// log.warn("ignoring: " + stmt); + + } + + } + + /* + * Attach properties to edges. + */ + final Iterator<Map.Entry<URI, PartialElement>> it = elements.properties.entrySet().iterator(); + + while (it.hasNext()) { + + final Map.Entry<URI, PartialElement> e = it.next(); + + final URI uri = e.getKey(); + + final PartialElement element = e.getValue(); + + boolean isEdge = false; + + for (Statement stmt : elements.edges.keySet()) { + + if (stmt.getPredicate().equals(uri)) { + + isEdge = true; + + final PartialEdge edge = elements.edges.get(stmt); + + edge.copyProperties(element); + + } + + } + + if (isEdge) { + it.remove(); + } + + } + + /* + * Attach properties to vertices. + */ + for (URI uri : elements.properties.keySet()) { + + final PartialElement element = elements.properties.get(uri); + + if (log.isInfoEnabled()) { + log.info(uri + ": " + element); + } + + final PartialVertex v = elements.putIfAbsent(uri); + + v.copyProperties(element); + + } + + /* + * Fill in any missing edge label. + */ + for (PartialEdge edge : elements.edges.values()) { + + if (edge.getLabel() == null) { + + edge.setLabel(edge.getId().toString()); + + } + + } + +// /* +// * Prune any incomplete edges. +// */ +// final Iterator<Element> it = elements.values().iterator(); +// +// while (it.hasNext()) { +// +// final Element e = it.next(); +// +// if (e instanceof PartialEdge) { +// +// if (!((PartialEdge) e).isComplete()) { +// +// it.remove(); +// +// } +// +// } +// +// } + + return new BigdataGraphlet( + elements.vertices.values(), elements.edges.values()); + + } + + private void handleEdge(final PartialGraph elements, final Statement stmt) { + + if (log.isTraceEnabled()) { + log.trace(stmt); + } + + final PartialVertex from = elements.putIfAbsent((URI) stmt.getSubject()); + + final PartialEdge edge = elements.putIfAbsent(stmt); + + final PartialVertex to = elements.putIfAbsent((URI) stmt.getObject()); + + edge.setFrom(from); + + edge.setTo(to); + +// // use the default label +// edge.setLabel(factory.fromEdgeURI(stmt.getPredicate())); + + } + + private void handleProperty(final PartialGraph elements, final Statement stmt) { + +// if (log.isInfoEnabled()) { +// log.info(stmt); +// } + + final URI uri = (URI) stmt.getSubject(); + + final PartialElement element = elements.putElementIfAbsent(uri); + + final String prop = factory.fromURI(stmt.getPredicate()); + + final Object val = factory.fromLiteral((Literal) stmt.getObject()); + +// if (prop.equals("label") && element instanceof PartialEdge) { +// +// ((PartialEdge) element).setLabel(val.toString()); +// +// } else { + + element.setProperty(prop, val); + +// } + + } + +// private PartialElement putIfAbsent(final URI uri) { +// +// if (factory.isEdge(uri)) { +// +// return putEdgeIfAbsent(uri); +// +// } else if (factory.isVertex(uri)) { +// +// return putVertexIfAbsent(uri); +// +// } else { +// +// throw new RuntimeException("bad element: " + uri); +// +// } +// +// } +// + private class PartialGraph { + + private final Map<URI, PartialElement> properties = new LinkedHashMap<URI, PartialElement>(); + + private final Map<Statement, PartialEdge> edges = new LinkedHashMap<Statement, PartialEdge>(); + + private final Map<URI, PartialVertex> vertices = new LinkedHashMap<URI, PartialVertex>(); + + private PartialElement putElementIfAbsent(final URI uri) { + + final String id = uri.toString(); + + if (properties.containsKey(uri)) { + + return (PartialElement) properties.get(uri); + + } else { + + final PartialElement e = new PartialElement(id); + + properties.put(uri, e); + + return e; + + } + + } + + private PartialVertex putIfAbsen... [truncated message content] |
From: <tho...@us...> - 2014-07-18 15:46:00
|
Revision: 8578 http://sourceforge.net/p/bigdata/code/8578 Author: thompsonbry Date: 2014-07-18 15:45:57 +0000 (Fri, 18 Jul 2014) Log Message: ----------- Checkpoint on GIST refactor (#585) in support of group commit for the REST API (#566). This commit introduces hierarchical locking (vs enumeration of the indices) into the AbstractApiTask and makes some progress toward a refactor of the ITask interface and the AbstractTask implementation to support access at the ICheckpointProtocol layer (supports BTree, HTree and Stream) in addition to the IIndex layer (supports BTree, FusedView, and IsolatedFusedView). The next step will be to refactor AbstractTask.getIndex() to push down a method to obtaining the ICheckpointProtocol object. In order to do this, the AbstractTask.indexCache must be relayered. It is currently specific to the IIndex interface. In order to provide caching for ICheckpointProtocol objects, it needs to be rewritten to the ICheckpointProtocol layer. However, the code needs to be carefully reviewed to determine whether we also need caching at the IIndex layer or if we can only cache at the ICheckpointProtocol layer. This is a question of both correctness and performance. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/ITask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-07-18 15:44:45 UTC (rev 8577) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2014-07-18 15:45:57 UTC (rev 8578) @@ -56,6 +56,7 @@ import com.bigdata.bfs.GlobalFileSystemHelper; import com.bigdata.btree.AbstractBTree; import com.bigdata.btree.BTree; +import com.bigdata.btree.Checkpoint; import com.bigdata.btree.ICheckpointProtocol; import com.bigdata.btree.IDirtyListener; import com.bigdata.btree.IIndex; @@ -64,6 +65,7 @@ import com.bigdata.btree.view.FusedView; import com.bigdata.concurrent.NonBlockingLockManager; import com.bigdata.counters.CounterSet; +import com.bigdata.htree.AbstractHTree; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.IAllocationContext; import com.bigdata.rawstore.IPSOutputStream; @@ -114,9 +116,6 @@ * {@link ConcurrencyManager#submit(AbstractTask)} it. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * - * @todo declare generic type for the return as <? extends Object> to be compatible - * with {@link ConcurrencyManager#submit(AbstractTask)} */ public abstract class AbstractTask<T> implements Callable<T>, ITask<T> { @@ -264,6 +263,7 @@ * Cache of named indices resolved by this task for its {@link #timestamp}. * * @see #getIndex(String name) + * @see #getIndexLocal(String) */ final private Map<String,ILocalBTreeView> indexCache; @@ -543,8 +543,7 @@ } /** - * Return a view of the named index appropriate for the timestamp associated - * with this task. + * {@inheritDoc} * <p> * Note: There are two ways in which a task may access an * {@link ITx#UNISOLATED} index, but in all cases access to the index is @@ -553,37 +552,13 @@ * {@link IJournal#getIndex(String)} on that journal, which is simply * delegated to this method. See {@link IsolatedActionJournal}. * - * @param name - * The name of the index. - * - * @throws NullPointerException - * if <i>name</i> is <code>null</code>. - * @throws IllegalStateException - * if <i>name</i> is not a declared resource. - * @throws StaleLocatorException - * if <i>name</i> identifies an index partition which has been - * split, joined, or moved. - * @throws NoSuchIndexException - * if the named index is not registered as of the timestamp. - * - * @return The index. - * - * @todo modify to return <code>null</code> if the index is not registered? - * - * FIXME GIST. This will throw a ClassCastException if the returned - * index is an ILocalBTreeView. - * * @see http://trac.bigdata.com/ticket/585 (GIST) */ @Override synchronized final public ILocalBTreeView getIndex(final String name) { - if (name == null) { - - // @todo change to IllegalArgumentException for API consistency? + if (name == null) throw new NullPointerException(); - - } // validate that this is a declared index. assertResource(name); @@ -636,10 +611,11 @@ * index from the store, set the [lastCommitTime], and enter it into * the unisolated Name2Addr's cache of unisolated indices. */ - BTree btree; + ICheckpointProtocol ndx; // the unisolated name2Addr object. - final Name2Addr name2Addr = resourceManager.getLiveJournal()._getName2Addr(); + final Name2Addr name2Addr = resourceManager.getLiveJournal() + ._getName2Addr(); synchronized (name2Addr) { @@ -680,46 +656,61 @@ * But, fetch the btree from the cache to ensure we use the * most recent checkpoint */ - btree = null; + ndx = null; - final BTree tmpbtree = (BTree) name2Addr.getIndexCache(name); - if (tmpbtree != null) - checkpointAddr = tmpbtree.getCheckpoint().getCheckpointAddr(); + final ICheckpointProtocol tmp_ndx = name2Addr + .getIndexCache(name); + + if (tmp_ndx != null) { + + checkpointAddr = tmp_ndx.getCheckpoint() + .getCheckpointAddr(); + + } } else { - // recover from unisolated index cache. - btree = (BTree) name2Addr.getIndexCache(name); + + // Recover from unisolated index cache. + ndx = name2Addr.getIndexCache(name); + } - if (btree == null) { + if (ndx == null) { - final IJournal tmp; -// tmp = resourceManager.getLiveJournal(); - tmp = getJournal();// wrap with the IsolatedActionJournal. + // wrap with the IsolatedActionJournal. + final IJournal tmp = getJournal(); +// tmp = resourceManager.getLiveJournal(); // re-load btree from the store. - btree = BTree.load(// + ndx = Checkpoint.loadFromCheckpoint(// tmp, // backing store. checkpointAddr,// false// readOnly ); // set the lastCommitTime on the index. - btree.setLastCommitTime(entry.commitTime); + ndx.setLastCommitTime(entry.commitTime); // add to the unisolated index cache (must not exist). - name2Addr.putIndexCache(name, btree, false/* replace */); + name2Addr.putIndexCache(name, ndx, false/* replace */); - btree.setBTreeCounters(resourceManager - .getIndexCounters(name)); + // set performance counters iff the class supports it. + if (ndx instanceof AbstractBTree) { + ((AbstractBTree) ndx).setBTreeCounters(resourceManager + .getIndexCounters(name)); + } else if (ndx instanceof AbstractHTree) { + ((AbstractHTree) ndx).setBTreeCounters(resourceManager + .getIndexCounters(name)); + } } } try { - - return getUnisolatedIndexView(name, btree); + + // wrap B+Tree as FusedView: FIXME GIST : BTree specific code path. + return getUnisolatedIndexView(name, (BTree) ndx); } catch (NoSuchStoreException ex) { @@ -760,10 +751,12 @@ /** * Given the name of an index and a {@link BTree}, obtain the view for all * source(s) described by the {@link BTree}s index partition metadata (if - * any), inserts that view into the {@link #indexCache}, and return the view. + * any), inserts that view into the {@link #indexCache}, and return the + * view. * <p> - * Note: This method is used both when registering a new index ({@link #registerIndex(String, BTree)}) - * and when reading an index view from the source ({@link #getIndex(String)}). + * Note: This method is used both when registering a new index ( + * {@link #registerIndex(String, BTree)}) and when reading an index view + * from the source ({@link #getIndex(String)}). * * @param name * The index name. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/ITask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/ITask.java 2014-07-18 15:44:45 UTC (rev 8577) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/ITask.java 2014-07-18 15:45:57 UTC (rev 8578) @@ -98,7 +98,8 @@ String toString(); /** - * Return an appropriate view of the named index for the operation. + * Return an appropriate view of the named B+Tree that has the appropriate + * isolation level for the operation (non-GIST). * <p> * When the task is isolated by a transaction, then the index will be * isolated by the transaction using the appropriate isolation level. If the @@ -124,23 +125,63 @@ * * @return An appropriate view of the named index. * - * @exception NoSuchIndexException - * if the named index does not exist at the time that the - * operation is executed. + * @throws NullPointerException + * if <i>name</i> is <code>null</code>. + * @throws IllegalStateException + * if <i>name</i> is not a declared resource. + * @throws StaleLocatorException + * if <i>name</i> identifies an index partition which has been + * split, joined, or moved. + * @throws NoSuchIndexException + * if the named index is not registered as of the timestamp. * - * @exception StaleLocatorException - * if the named index does not exist at the time the - * operation is executed and the {@link IResourceManager} has - * information which indicates that the index partition has - * been split, joined or moved. + * TODO modify to return <code>null</code> if the index is not + * registered? + */ + IIndex getIndex(String name); // non-GIST + + /** + * Return an appropriate view of the named index for the operation (GIST). + * <p> + * This method MUST be used to access non-B+Tree data structures that do not + * (yet) support {@link FusedView} style transaction isolation. + * <p> + * This method MAY NOT be used to access data structures if the operation is + * isolated by a read-write transaction. + * <p> + * This method DOES NOT understand the ordered views used by scale-out. The + * {@link ICheckpointProtocol} interface returned by this method is a + * concrete durable GIST data structure with a specific commit record. It is + * NOT a {@link FusedView} or similar data structure assembled from an + * ordered array of indices. If this method is used for a GIST data + * structure it will ONLY return the {@link ICheckpointProtocol} and will + * not wrap it with a {@link FusedView}. (This is of practical importance + * only for scale-out which uses {@link FusedView}s to support the dynamic + * key range partitioning algorithm for the distributed B+Tree data + * structure.) * - * @exception IllegalStateException - * if the named index is not one of the resources declared to - * the constructor. + * @param name + * The index name. * - * @see IGISTLocalManager + * @return An appropriate view of the named index. + * + * @throws NullPointerException + * if <i>name</i> is <code>null</code>. + * @throws IllegalStateException + * if <i>name</i> is not a declared resource. + * @throws StaleLocatorException + * if <i>name</i> identifies an index partition which has been + * split, joined, or moved. + * @throws NoSuchIndexException + * if the named index is not registered as of the timestamp. + * @throws UnsupportedOperationException + * if the {@link ITask} is associated with a read-write + * transaction. + * + * TODO modify to return <code>null</code> if the index is not + * registered? */ - IIndex getIndex(String name); // non-GIST +// ICheckpointProtocol getIndexLocal(String name); // GIST /** * The object used to track events and times for the task. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-07-18 15:44:45 UTC (rev 8577) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-07-18 15:45:57 UTC (rev 8578) @@ -24,8 +24,6 @@ */ package com.bigdata.rdf.task; -import java.util.HashSet; -import java.util.Set; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.atomic.AtomicReference; @@ -314,24 +312,12 @@ * locks and will have exclusive access to the resources guarded by * those locks when they run. * - * FIXME GROUP COMMIT: The {@link AbstractTask} was written to - * require the exact set of resource lock declarations. However, for - * the REST API, we want to operate on all indices associated with a - * KB instance. This requires either: - * <p> - * (a) pre-resolving the names of those indices and passing them all - * into the AbstractTask; or - * <P> - * (b) allowing the caller to only declare the namespace and then to - * be granted access to all indices whose names are in that - * namespace. - * - * (b) is now possible with the fix to the Name2Addr prefix scan. - * - * Note: We also need to isolate any named solution sets in the - * namespace of the KB. Those will be discovered along with the - * indices, but they may require changes to {@link AbstractTask} - * for GIST support. + * FIXME GROUP COMMIT: The hierarchical locking mechanisms will fail + * on durable named solution sets because they use either HTree or + * Stream and AbstractTask does not yet support those durable data + * structures (it is still being refactored to support the + * ICheckpointProtocol rather than the BTree in its Name2Addr + * isolation logic). */ // Obtain the necessary locks for R/w access to KB indices. @@ -350,7 +336,8 @@ } /** - * Acquire the locks for the named indices associated with the specified KB. + * Return the set of locks that the task must acquire in order to operate on + * the specified namespace. * * @param indexManager * The {@link Journal}. @@ -360,52 +347,41 @@ * @return The locks for the named indices associated with that KB instance. * * @throws DatasetNotFoundException - * - * FIXME GROUP COMMIT : [This should be replaced by the use of - * the namespace and hierarchical locking support in - * AbstractTask.] This could fail to discover a recently create - * KB between the time when the KB is created and when the group - * commit for that create becomes visible. This data race exists - * because we are using [lastCommitTime] rather than the - * UNISOLATED view of the GRS. - * <p> - * Note: This data race MIGHT be closed by the default locator - * cache. If it records the new KB properties when they are - * created, then they should be visible. If they are not - * visible, then we have a data race. (But if it records them - * before the group commit for the KB create, then the actual KB - * indices will not be durable until the that group commit...). - * <p> - * Note: The problem can obviously be resolved by using the - * UNISOLATED index to obtain the KB properties, but that would - * serialize ALL updates. What we need is a suitable caching - * mechanism that (a) ensures that newly create KB instances are - * visible; and (b) has high concurrency for read-only requests - * for the properties for those KB instances. */ private static String[] getLocksForKB(final Journal indexManager, final String namespace) throws DatasetNotFoundException { - final long timestamp = indexManager.getLastCommitTime(); + /* + * Note: There are two possible approaches here. One is to explicitly + * enumerate the index names for the triple store. The other is to + * specify the namespace of the triple store and use hierarchical + * locking. + * + * This is now using hierarchical locking, so it just returns the + * namespace. + */ + return new String[]{namespace}; + +// final long timestamp = indexManager.getLastCommitTime(); +// +// final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager +// .getResourceLocator().locate(namespace, timestamp); +// +// if (tripleStore == null) +// throw new DatasetNotFoundException("Not found: namespace=" +// + namespace + ", timestamp=" +// + TimestampUtility.toString(timestamp)); +// +// final Set<String> lockSet = new HashSet<String>(); +// +// lockSet.addAll(tripleStore.getSPORelation().getIndexNames()); +// +// lockSet.addAll(tripleStore.getLexiconRelation().getIndexNames()); +// +// final String[] locks = lockSet.toArray(new String[lockSet.size()]); +// +// return locks; - final AbstractTripleStore tripleStore = (AbstractTripleStore) indexManager - .getResourceLocator().locate(namespace, timestamp); - - if (tripleStore == null) - throw new DatasetNotFoundException("Not found: namespace=" - + namespace + ", timestamp=" - + TimestampUtility.toString(timestamp)); - - final Set<String> lockSet = new HashSet<String>(); - - lockSet.addAll(tripleStore.getSPORelation().getIndexNames()); - - lockSet.addAll(tripleStore.getLexiconRelation().getIndexNames()); - - final String[] locks = lockSet.toArray(new String[lockSet.size()]); - - return locks; - } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-08-07 08:50:39
|
Revision: 8604 http://sourceforge.net/p/bigdata/code/8604 Author: mrpersonick Date: 2014-08-07 08:50:25 +0000 (Thu, 07 Aug 2014) Log Message: ----------- Ticket #1001: Create extensible mechanism for inline URIs Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/URIExtensionIV.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/HashCollisionUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailEmbeddedFederationWithQuads.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithSids.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/WEB-INF/GraphStore.properties Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/vocab/BaseVocabularyDecl.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/vocab/DefaultBigdataVocabulary.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestInlineURIs.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/DTE.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -332,11 +332,16 @@ * etc. Also, the match will always be on the local name once we proof * the namespace. */ - if (datatype == null) { +// if (datatype == null) { + if (datatype.equals(XSD.IPV4)) { /* * Note: This is a bit of a rough spot in the API. There is no * datatype associated with [Extension] since it is a place holder * for any an extension for any datatype. + * + * Right now I am hijacking Extension for IPv4. + * + * TODO FIXME */ return Extension; } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,53 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import org.openrdf.model.URI; + +import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; +import com.bigdata.rdf.vocab.Vocabulary; + +/** + * This factory will create {@link URIExtensionIV}s using + * {@link InlineURIHandler} delegates. Handler delegates are registered with + * a namespace prefix that they can handle. These namespace prefixes must + * be defined in the vocabulary so that they can be properly inlined. The URI + * to be inlined will then be presented to each handler for conversion. The + * first registered handler to convert the URI wins. If no handler can handle + * the URI then no inline URI iv is created. + */ +public interface IInlineURIFactory { + + /** + * Give the handlers a chance to look up the vocab IV for their namespace + * prefixes. + */ + void init(final Vocabulary vocab); + + /** + * Create an inline URIExtensionIV for the supplied URI. + */ + URIExtensionIV<?> createInlineURIIV(final URI uri); + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IInlineURIFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -35,8 +35,6 @@ import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.LinkedList; import java.util.List; import java.util.UUID; @@ -68,7 +66,7 @@ import com.bigdata.rdf.internal.impl.literal.XSDUnsignedLongIV; import com.bigdata.rdf.internal.impl.literal.XSDUnsignedShortIV; import com.bigdata.rdf.internal.impl.uri.FullyInlineURIIV; -import com.bigdata.rdf.internal.impl.uri.IPAddrIV; +import com.bigdata.rdf.internal.impl.uri.IPv4AddrIV; import com.bigdata.rdf.internal.impl.uri.PartlyInlineURIIV; import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; import com.bigdata.rdf.internal.impl.uri.VocabURIByteIV; @@ -525,8 +523,9 @@ o += namespaceIV.byteLength(); - final FullyInlineTypedLiteralIV<BigdataLiteral> localNameIV = (FullyInlineTypedLiteralIV<BigdataLiteral>) decodeFromOffset( - key, o); + final AbstractLiteralIV<BigdataLiteral, ?> localNameIV = + (AbstractLiteralIV<BigdataLiteral, ?>) decodeFromOffset( + key, o); final IV iv = new URIExtensionIV<BigdataURI>(localNameIV, namespaceIV); @@ -538,17 +537,18 @@ // The data type final DTE dte = AbstractIV.getDTE(flags); switch (dte) { - case XSDBoolean: { - /* - * TODO Using XSDBoolean so that we can know how to decode this thing - * as an IPAddrIV. We need to fix the Extension mechanism for URIs. - * Extension is already used above. - */ - final byte[] addr = new byte[5]; - System.arraycopy(key, o, addr, 0, 5); - final Inet4Address ip = new Inet4Address(addr); - return new IPAddrIV(ip); - } +// deprecated in favor of the extensible InlineURIFactory +// case XSDBoolean: { +// /* +// * TODO Using XSDBoolean so that we can know how to decode this thing +// * as an IPAddrIV. We need to fix the Extension mechanism for URIs. +// * Extension is already used above. +// */ +// final byte[] addr = new byte[5]; +// System.arraycopy(key, o, addr, 0, 5); +// final Inet4Address ip = new Inet4Address(addr); +// return new IPv4AddrIV(ip); +// } case XSDByte: { final byte x = key[o];//KeyBuilder.decodeByte(key[o]); return new VocabURIByteIV<BigdataURI>(x); @@ -706,6 +706,18 @@ } return decodeInlineUnicodeLiteral(key,o); } + case Extension: { + /* + * TODO Set up an extended DTE mechanism and check the byte after + * the flags for the extended DTE. Right now I am just hickacking + * Extension for IPv4. + */ + final byte[] addr = new byte[5]; + System.arraycopy(key, o, addr, 0, 5); + final Inet4Address ip = new Inet4Address(addr); + final AbstractLiteralIV iv = new IPv4AddrIV(ip); + return isExtension ? new LiteralExtensionIV(iv, datatype) : iv; + } default: throw new UnsupportedOperationException("dte=" + dte); } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,61 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import com.bigdata.rdf.internal.impl.literal.AbstractLiteralIV; +import com.bigdata.rdf.internal.impl.uri.IPv4AddrIV; + +/** + * Inline URI handler for IPv4 host addresses. + */ +public class InlineIPv4URIHandler extends InlineURIHandler { + + /** + * Default URI namespace for inline IPv4 addresses. + */ + public static final String NAMESPACE = "urn:ipv4:"; + + public InlineIPv4URIHandler(final String namespace) { + super(namespace); + } + + @SuppressWarnings("rawtypes") + protected AbstractLiteralIV createInlineIV(final String localName) { + + if (localName == null) { + return null; + } + + try { + return new IPv4AddrIV(localName); + } catch (Exception ex) { + /* + * Could not parse localName into an IPv4. Fall through to TermIV. + */ + return null; + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineIPv4URIHandler.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,74 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import java.util.LinkedList; +import java.util.List; + +import org.openrdf.model.URI; + +import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; +import com.bigdata.rdf.vocab.Vocabulary; + +/** + * Default implementation of {@link IInlineURIFactory} that comes pre-loaded + * with two handlers: IPv4 ({@link InlineIPv4URIHandler}) and UUID + * ({@link InlineUUIDURIHandler}. + */ +public class InlineURIFactory implements IInlineURIFactory { + + private final List<InlineURIHandler> handlers = + new LinkedList<InlineURIHandler>(); + + /** + * By default, handle IPv4 and UUID. + */ + public InlineURIFactory() { + addHandler(new InlineUUIDURIHandler(InlineUUIDURIHandler.NAMESPACE)); + addHandler(new InlineIPv4URIHandler(InlineIPv4URIHandler.NAMESPACE)); + } + + protected void addHandler(final InlineURIHandler handler) { + this.handlers.add(handler); + } + + public void init(final Vocabulary vocab) { + for (InlineURIHandler handler : handlers) { + handler.init(vocab); + } + } + + @Override + @SuppressWarnings({ "unchecked", "rawtypes" }) + public URIExtensionIV createInlineURIIV(URI uri) { + for (InlineURIHandler handler : handlers) { + final URIExtensionIV iv = handler.createInlineIV(uri); + if (iv != null) { + return iv; + } + } + return null; + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,102 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import org.openrdf.model.URI; +import org.openrdf.model.impl.URIImpl; + +import com.bigdata.rdf.internal.impl.literal.AbstractLiteralIV; +import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; +import com.bigdata.rdf.vocab.Vocabulary; + +/** + * Handler is mapped to a namespace prefix. When a URI is presented that + * matches the handler's namespace prefix, attempt to parse the remaining + * portion of the URI into an inline literal. The namespace prefix must be + * present in the vocabulary. The localName must be parseable into an inline + * literal. If either of these things is not true the URI will not be inlined. + */ +public abstract class InlineURIHandler { + + /** + * The namespace prefix. + */ + protected final String namespace; + + /** + * Namespace prefix length. + */ + protected final int len; + + /** + * The inline vocab IV for the namespace prefix. + */ + @SuppressWarnings("rawtypes") + protected transient IV namespaceIV; + + /** + * Create a handler for the supplied namespace prefix. + */ + public InlineURIHandler(final String namespace) { + this.namespace = namespace; + this.len = namespace.length(); + } + + /** + * Lookup the namespace IV from the vocabulary. + */ + public void init(final Vocabulary vocab) { + this.namespaceIV = vocab.get(new URIImpl(namespace)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected URIExtensionIV createInlineIV(final URI uri) { + + /* + * If the namspace prefix is not in the vocabulary we can't inline + * anything. + */ + if (namespaceIV == null) { + return null; + } + + if (uri.stringValue().startsWith(namespace)) { + final String localName = uri.stringValue().substring(len); + final AbstractLiteralIV localNameIV = createInlineIV(localName); + if (localNameIV != null) { + return new URIExtensionIV(localNameIV, namespaceIV); + } + } + + return null; + } + + /** + * Concrete subclasses are responsible for actually creating the inline + * literal IV for the localName. + */ + @SuppressWarnings("rawtypes") + protected abstract AbstractLiteralIV createInlineIV(final String localName); + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineURIHandler.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,63 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import java.util.UUID; + +import com.bigdata.rdf.internal.impl.literal.AbstractLiteralIV; +import com.bigdata.rdf.internal.impl.literal.UUIDLiteralIV; + +/** + * Inline URI handler for UUIDs. + */ +public class InlineUUIDURIHandler extends InlineURIHandler { + + /** + * Default URI namespace for inline UUIDs. + */ + public static final String NAMESPACE = "urn:uuid:"; + + public InlineUUIDURIHandler(final String namespace) { + super(namespace); + } + + @SuppressWarnings("rawtypes") + protected AbstractLiteralIV createInlineIV(final String localName) { + + if (localName == null) { + return null; + } + + try { + return new UUIDLiteralIV(UUID.fromString(localName)); + } catch (IllegalArgumentException ex) { + /* + * Could not parse localName into a UUID. Fall through to TermIV. + */ + return null; + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/InlineUUIDURIHandler.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -59,7 +59,7 @@ import com.bigdata.rdf.internal.impl.literal.XSDUnsignedLongIV; import com.bigdata.rdf.internal.impl.literal.XSDUnsignedShortIV; import com.bigdata.rdf.internal.impl.uri.FullyInlineURIIV; -import com.bigdata.rdf.internal.impl.uri.IPAddrIV; +import com.bigdata.rdf.internal.impl.uri.IPv4AddrIV; import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rdf.model.BigdataBNode; @@ -152,7 +152,7 @@ * @see AbstractTripleStore.Options#EXTENSION_FACTORY_CLASS */ private final IExtensionFactory xFactory; - + /** * @see AbstractTripleStore.Options#VOCABULARY_CLASS */ @@ -164,6 +164,11 @@ private final BigdataValueFactory valueFactory; /** + * The inline URI factory for the lexicon. + */ + private final IInlineURIFactory uriFactory; + + /** * Mapping from the {@link IV} for the datatype URI of a registered * extension to the {@link IExtension}. */ @@ -255,6 +260,9 @@ sb.append(", " + AbstractTripleStore.Options.VOCABULARY_CLASS + "=" + vocab.getClass().getName()); + sb.append(", " + AbstractTripleStore.Options.INLINE_URI_FACTORY_CLASS + "=" + + uriFactory.getClass().getName()); + sb.append("}"); return sb.toString(); @@ -273,7 +281,8 @@ final boolean rejectInvalidXSDValues, final IExtensionFactory xFactory,// final Vocabulary vocab, - final BigdataValueFactory valueFactory// + final BigdataValueFactory valueFactory,// + final IInlineURIFactory uriFactory ) { if (blobsThreshold < 0) @@ -299,6 +308,7 @@ this.xFactory = xFactory; this.vocab = vocab; this.valueFactory = valueFactory; + this.uriFactory = uriFactory; /* * Note: These collections are read-only so we do NOT need additional @@ -435,24 +445,35 @@ * @return The inline {@link IV} -or- <code>null</code> if the {@link URI} * can not be inlined into the statement indices. */ + @SuppressWarnings("unchecked") private IV<BigdataURI, ?> createInlineURIIV(final URI value) { - try { - - final String s = value.stringValue(); - - if (s.startsWith(IPAddrIV.NAMESPACE)) { - - return new IPAddrIV(s.substring(IPAddrIV.NAMESPACE_LEN)); - - } - - } catch (UnknownHostException ex) { - - log.warn("unknown host exception, will not inline: " + value); - - } - +// deprecated in favor of the extensible InlineURIFactory mechanism +// try { +// +// final String s = value.stringValue(); +// +// if (s.startsWith("ip:")) { +// return new IPAddrIV(s.substring(3)); +// } +// +// } catch (UnknownHostException ex) { +// +// log.warn("unknown host exception, will not inline: " + value); +// +// } + + /* + * See if there is a handler for inline URIs for this namespace. + */ + @SuppressWarnings("rawtypes") + final URIExtensionIV inline = uriFactory.createInlineURIIV(value); + if (inline != null) { + + return inline; + + } + if (maxInlineTextLength == 0) { return null; @@ -476,8 +497,9 @@ if (namespaceIV != null) { - final FullyInlineTypedLiteralIV<BigdataLiteral> localNameIV = new FullyInlineTypedLiteralIV<BigdataLiteral>( - localName); + final FullyInlineTypedLiteralIV<BigdataLiteral> localNameIV = + new FullyInlineTypedLiteralIV<BigdataLiteral>( + localName); return new URIExtensionIV<BigdataURI>(localNameIV, namespaceIV); @@ -659,11 +681,16 @@ // get the native DTE final DTE dte = DTE.valueOf(datatype); - if (dte == DTE.Extension || dte == null) { - /* - * Either a registered IExtension datatype or a datatype for which - * there is no native DTE support. - */ +// DTE.Extension being used for IPv4 now +// if (dte == DTE.Extension || dte == null) { +// /* +// * Either a registered IExtension datatype or a datatype for which +// * there is no native DTE support. +// */ +// return null; +// } + + if (dte == null) { return null; } @@ -712,6 +739,12 @@ return new XSDUnsignedIntIV<BigdataLiteral>(parseUnsignedInt(v)); case XSDUnsignedLong: return new XSDUnsignedLongIV<BigdataLiteral>(parseUnsignedLong(v)); + case Extension: + /* + * Hijacking DTE.Extension for IPv4. Throws UnknownHostException + * if not parseable as an IPv4. + */ + return new IPv4AddrIV<BigdataLiteral>(v); default: // Not handled. return null; @@ -735,7 +768,26 @@ return null; + } catch (UnknownHostException ex) { + + if (rejectInvalidXSDValues) { + + throw new RuntimeException(ex + ": value=" + v, ex); + + } + + /* + * Note: By falling through here, we wind up accepting the Value, + * but it gets handled as a TermId instead of being inlined. + */ + + if (log.isInfoEnabled()) + log.warn("Value does not validate against datatype: " + value); + + return null; + } + } @@ -931,6 +983,7 @@ case XSDInteger: case XSDDecimal: case UUID: + case Extension: // Extension being used for IPv4 return true; case XSDString: /* Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,49 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.internal; + +import org.openrdf.model.URI; + +import com.bigdata.rdf.internal.impl.uri.URIExtensionIV; +import com.bigdata.rdf.vocab.Vocabulary; + +/** + * Do-nothing inline URI factory used in the case where there is no vocabulary + * defined. + */ +public class NoInlineURIFactory implements IInlineURIFactory { + + public NoInlineURIFactory() { + } + + public void init(final Vocabulary vocab) { + } + + @Override + @SuppressWarnings({ "unchecked", "rawtypes" }) + public URIExtensionIV createInlineURIIV(URI uri) { + return null; + } + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/NoInlineURIFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSD.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -72,6 +72,13 @@ */ static public final URI UUID = new URIImpl(NAMESPACE + "uuid"); + /** + * Not sure if there is a better solution for this. Perhaps XSSLT? + * + * http://www.codesynthesis.com/projects/xsstl/ + */ + static public final URI IPV4 = new URIImpl(NAMESPACE + "IPv4Address"); + // URI DATETIME = XMLSchema.DATETIME; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/AbstractIV.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -680,7 +680,7 @@ // The namespaceIV (a Vocabulary item). IVUtility.encode(keyBuilder, extension.getExtensionIV()); - // The inline localName (Unicode data). + // The inline localName (any inline literal data). IVUtility.encode(keyBuilder, extension.getLocalNameIV()); return keyBuilder; @@ -857,6 +857,9 @@ .setByteLength(1/* flags */+ 1/* termCode */+ b.length); return keyBuilder; } +// case Extension: { + // handled by IPv4AddrIV.encode() +// } default: throw new AssertionError(toString()); } Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -1,374 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.rdf.internal.impl.uri; - -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.io.ObjectStreamException; -import java.io.Serializable; -import java.net.UnknownHostException; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.openrdf.model.URI; -import org.openrdf.model.Value; - -import com.bigdata.btree.BytesUtil.UnsignedByteArrayComparator; -import com.bigdata.btree.keys.IKeyBuilder; -import com.bigdata.io.LongPacker; -import com.bigdata.rdf.internal.DTE; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.internal.Inet4Address; -import com.bigdata.rdf.internal.VTE; -import com.bigdata.rdf.internal.impl.AbstractInlineIV; -import com.bigdata.rdf.lexicon.LexiconRelation; -import com.bigdata.rdf.model.BigdataURI; - -/** - * Internal value representing an inline IP address. Uses the InetAddress - * class to represent the IP address and perform the translation to and from - * byte[], which is then used directly in the IV key (after the flags). - * <p> - * This internal value has a {@link VTE} of {@link VTE#URI}. - * <p> - * {@inheritDoc} - */ -public class IPAddrIV<V extends BigdataURI> extends AbstractInlineIV<V, Inet4Address> - implements Serializable, URI { - - /** - * - */ - private static final long serialVersionUID = 685148537376856907L; - -// private static final transient Logger log = Logger.getLogger(SidIV.class); - - public static final String NAMESPACE = "ip:/"; - - public static final int NAMESPACE_LEN = NAMESPACE.length(); - - /** - * The inline IP address. - */ - private final Inet4Address value; - - /** - * The cached string representation of this IP. - */ - private transient String hostAddress; - - /** - * The cached byte[] key for the encoding of this IV. - */ - private transient byte[] key; - - /** - * The cached materialized BigdataValue for this InetAddress. - */ - private transient V uri; - - public IV<V, Inet4Address> clone(final boolean clearCache) { - - final IPAddrIV<V> tmp = new IPAddrIV<V>(value);//, prefix); - - // Propagate the cached byte[] key. - tmp.key = key; - - // Propagate the cached BigdataValue. - tmp.uri = uri; - - if (!clearCache) { - - tmp.setValue(getValueCache()); - - } - - return tmp; - - } - - /** - * Ctor with internal value specified. - */ - public IPAddrIV(final Inet4Address value) {//, final byte prefix) { - - /* - * TODO Using XSDBoolean so that we can know how to decode this thing - * as an IPAddrIV. We need to fix the Extension mechanism for URIs. - */ - super(VTE.URI, DTE.XSDBoolean); - - this.value = value; - - } - - /* - * Somebody please fix this for the love of god. - */ - public static final Pattern pattern = - Pattern.compile("((?:[0-9]{1,3}\\.){3}[0-9]{1,3})((\\/)(([0-9]{1,2})))?"); - - /** - * Ctor with host address specified. - */ - public IPAddrIV(final String hostAddress) throws UnknownHostException { - - /* - * Note: XSDBoolean happens to be assigned the code value of 0, which is - * the value we we want when the data type enumeration will be ignored. - */ - super(VTE.URI, DTE.XSDBoolean); - - this.hostAddress = hostAddress; - - final Matcher matcher = pattern.matcher(hostAddress); - - final boolean matches = matcher.matches(); - - if (matches) { - - final String ip = matcher.group(1); - -// log.debug(ip); - - final String suffix = matcher.group(4); - -// log.debug(suffix); - - final String[] s; - if (suffix != null) { - - s = new String[5]; - System.arraycopy(ip.split("\\.", -1), 0, s, 0, 4); - s[4] = suffix; - - } else { - - s = ip.split("\\.", -1); - - } - - this.value = Inet4Address.textToAddr(s); - - } else { - - throw new IllegalArgumentException("not an IP: " + hostAddress); - -// log.debug("no match"); - - } - - } - - /** - * Returns the inline value. - */ - public Inet4Address getInlineValue() throws UnsupportedOperationException { - return value; - } - - /** - * Returns the URI representation of this IV. - */ - public V asValue(final LexiconRelation lex) { - if (uri == null) { - uri = (V) lex.getValueFactory().createURI(getNamespace(), getLocalName()); - uri.setIV(this); - } - return uri; - } - - /** - * Return the byte length for the byte[] encoded representation of this - * internal value. Depends on the byte length of the encoded inline value. - */ - public int byteLength() { - return 1 + key().length; - } - - public String toString() { - return "IP("+getLocalName()+")"; - } - - public int hashCode() { - return value.hashCode(); - } - -// /** -// * Implements {@link BNode#getID()}. -// * <p> -// * This implementation uses the {@link BigInteger} class to create a unique -// * blank node ID based on the <code>unsigned byte[]</code> key of the inline -// * {@link SPO}. -// */ -// @Override -// public String getID() { -//// // just use the hash code. can result in collisions -//// return String.valueOf(hashCode()); -// -// // create a big integer using the spo key. should result in unique ids -// final byte[] key = key(); -// final int signum = key.length > 0 ? 1 : 0; -// final BigInteger bi = new BigInteger(signum, key); -// return 's' + bi.toString(); -// } - - @Override - public String getNamespace() { - return NAMESPACE; - } - - @Override - public String getLocalName() { - if (hostAddress == null) { - hostAddress = value.toString(); - } - return hostAddress; - } - - /** - * Two {@link IPAddrIV} are equal if their InetAddresses are equal. - */ - public boolean equals(final Object o) { - if (this == o) - return true; - if (o instanceof IPAddrIV) { - final Inet4Address value2 = ((IPAddrIV<?>) o).value; - return value.equals(value2); - } - return false; - } - - public int _compareTo(IV o) { - - /* - * Note: This works, but it might be more expensive. - */ - return UnsignedByteArrayComparator.INSTANCE.compare(key(), ((IPAddrIV)o).key()); - - } - - /** - * Encode this internal value into the supplied key builder. Emits the - * flags, following by the encoded byte[] representing the spo, in SPO - * key order. - * <p> - * {@inheritDoc} - */ - @Override - public IKeyBuilder encode(final IKeyBuilder keyBuilder) { - - // First emit the flags byte. - keyBuilder.appendSigned(flags()); - - // Then append the InetAddress byte[] and the prefix. - keyBuilder.append(key()); - - return keyBuilder; - - } - - private byte[] key() { - - if (key == null) { - key = value.getBytes(); - } - - return key; - - } - - /** - * Object provides serialization for {@link IPAddrIV} via the write-replace - * and read-replace pattern. - */ - private static class IPAddrIVState implements Externalizable { - - private static final long serialVersionUID = -1L; - -// private byte flags; - private byte[] key; - - /** - * De-serialization constructor. - */ - public IPAddrIVState() { - - } - - private IPAddrIVState(final IPAddrIV iv) { -// this.flags = flags; - this.key = iv.key(); - } - - public void readExternal(ObjectInput in) throws IOException, - ClassNotFoundException { -// flags = in.readByte(); - final int nbytes = LongPacker.unpackInt(in); - key = new byte[nbytes]; - in.readFully(key); - } - - public void writeExternal(ObjectOutput out) throws IOException { -// out.writeByte(flags); - LongPacker.packLong(out, key.length); - out.write(key); - } - - private Object readResolve() throws ObjectStreamException { - return new Inet4Address(key); - } - - } - - private Object writeReplace() throws ObjectStreamException { - - return new IPAddrIVState(this); - - } - - /** - * Implements {@link Value#stringValue()}. - */ - @Override - public String stringValue() { - - return getLocalName(); - - } - - /** - * Does not need materialization to answer URI interface methods. - */ - @Override - public boolean needsMaterialization() { - - return false; - - } - - -} \ No newline at end of file Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java (from rev 8600, branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPAddrIV.java) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -0,0 +1,378 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.internal.impl.uri; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.ObjectStreamException; +import java.io.Serializable; +import java.net.UnknownHostException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; + +import com.bigdata.btree.BytesUtil; +import com.bigdata.btree.BytesUtil.UnsignedByteArrayComparator; +import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.io.LongPacker; +import com.bigdata.rdf.internal.DTE; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.internal.Inet4Address; +import com.bigdata.rdf.internal.VTE; +import com.bigdata.rdf.internal.XSD; +import com.bigdata.rdf.internal.impl.literal.AbstractLiteralIV; +import com.bigdata.rdf.lexicon.LexiconRelation; +import com.bigdata.rdf.model.BigdataLiteral; + +/** + * Internal value representing an inline IP address. Uses the InetAddress + * class to represent the IP address and perform the translation to and from + * byte[], which is then used directly in the IV key (after the flags). + * <p> + * This internal value has a {@link VTE} of {@link VTE#URI}. + * <p> + * {@inheritDoc} + */ +public class IPv4AddrIV<V extends BigdataLiteral> + extends AbstractLiteralIV<V, Inet4Address> + implements Serializable, Literal { + + /** + * + */ + private static final long serialVersionUID = 685148537376856907L; + + private static final transient Logger log = Logger.getLogger(IPv4AddrIV.class); + + /** + * The inline IP address. + */ + private final Inet4Address value; + + /** + * The cached string representation of this IP. + */ + private transient String hostAddress; + + /** + * The cached byte[] key for the encoding of this IV. + */ + private transient byte[] key; + + /** + * The cached materialized BigdataValue for this InetAddress. + */ + private transient V uri; + + public IV<V, Inet4Address> clone(final boolean clearCache) { + + final IPv4AddrIV<V> tmp = new IPv4AddrIV<V>(value);//, prefix); + + // Propagate the cached byte[] key. + tmp.key = key; + + // Propagate the cached BigdataValue. + tmp.uri = uri; + + if (!clearCache) { + + tmp.setValue(getValueCache()); + + } + + return tmp; + + } + + /** + * Ctor with internal value specified. + */ + public IPv4AddrIV(final Inet4Address value) {//, final byte prefix) { + + super(DTE.Extension); + + this.value = value; + + } + + /* + * Somebody please fix this for the love of god. + */ + public static final Pattern pattern = + Pattern.compile("((?:[0-9]{1,3}\\.){3}[0-9]{1,3})((\\/)(([0-9]{1,2})))?"); + + /** + * Ctor with host address specified. + */ + public IPv4AddrIV(final String hostAddress) throws UnknownHostException { + + super(DTE.Extension); + + this.hostAddress = hostAddress; + + final Matcher matcher = pattern.matcher(hostAddress); + + final boolean matches = matcher.matches(); + + if (matches) { + + final String ip = matcher.group(1); + + if (log.isDebugEnabled()) + log.debug(ip); + + final String suffix = matcher.group(4); + + if (log.isDebugEnabled()) + log.debug(suffix); + + final String[] s; + if (suffix != null) { + + s = new String[5]; + System.arraycopy(ip.split("\\.", -1), 0, s, 0, 4); + s[4] = suffix; + + } else { + + s = ip.split("\\.", -1); + + } + + this.value = Inet4Address.textToAddr(s); + + if (value == null) { + throw new UnknownHostException("not a valid IP: " + hostAddress); + } + + if (log.isDebugEnabled()) { + log.debug(value); + log.debug(byteLength()); + log.debug(BytesUtil.toString(key())); + } + + } else { + + throw new UnknownHostException("not a valid IP: " + hostAddress); + + } + + } + + /** + * Returns the inline value. + */ + public Inet4Address getInlineValue() throws UnsupportedOperationException { + return value; + } + + /** + * Returns the Literal representation of this IV. + */ + @SuppressWarnings("unchecked") + public V asValue(final LexiconRelation lex) { + if (uri == null) { + uri = (V) lex.getValueFactory().createLiteral(getLabel(), XSD.IPV4); + uri.setIV(this); + } + return uri; + } + + /** + * Return the byte length for the byte[] encoded representation of this + * internal value. Depends on the byte length of the encoded inline value. + */ + public int byteLength() { + return 1 + key().length; + } + + public String toString() { + return "IPv4("+getLabel()+")"; + } + + public int hashCode() { + return value.hashCode(); + } + +// /** +// * Implements {@link BNode#getID()}. +// * <p> +// * This implementation uses the {@link BigInteger} class to create a unique +// * blank node ID based on the <code>unsigned byte[]</code> key of the inline +// * {@link SPO}. +// */ +// @Override +// public String getID() { +//// // just use the hash code. can result in collisions +//// return String.valueOf(hashCode()); +// +// // create a big integer using the spo key. should result in unique ids +// final byte[] key = key(); +// final int signum = key.length > 0 ? 1 : 0; +// final BigInteger bi = new BigInteger(signum, key); +// return 's' + bi.toString(); +// } + +// @Override +// public String getNamespace() { +// return NAMESPACE; +// } +// +// @Override +// public String getLocalName() { +// if (hostAddress == null) { +// hostAddress = value.toString(); +// } +// return hostAddress; +// } + + @Override + public String getLabel() { + if (hostAddress == null) { + hostAddress = value.toString(); + } + return hostAddress; + } + + /** + * Two {@link IPv4AddrIV} are equal if their InetAddresses are equal. + */ + public boolean equals(final Object o) { + if (this == o) + return true; + if (o instanceof IPv4AddrIV) { + final Inet4Address value2 = ((IPv4AddrIV<?>) o).value; + return value.equals(value2); + } + return false; + } + + public int _compareTo(IV o) { + + /* + * Note: This works, but it might be more expensive. + */ + return UnsignedByteArrayComparator.INSTANCE.compare(key(), ((IPv4AddrIV)o).key()); + + } + + /** + * Encode this internal value into the supplied key builder. Emits the + * flags, following by the encoded byte[] representing the IPv4 address. + * <p> + * {@inheritDoc} + */ + @Override + public IKeyBuilder encode(final IKeyBuilder keyBuilder) { + + // First emit the flags byte. + keyBuilder.appendSigned(flags()); + + // Then append the InetAddress byte[] and the prefix. + keyBuilder.append(key()); + + return keyBuilder; + + } + + private byte[] key() { + + if (key == null) { + key = value.getBytes(); + } + + return key; + + } + + /** + * Object provides serialization for {@link IPv4AddrIV} via the write-replace + * and read-replace pattern. + */ + private static class IPAddrIVState implements Externalizable { + + private static final long serialVersionUID = -1L; + +// private byte flags; + private byte[] key; + + /** + * De-serialization constructor. + */ + public IPAddrIVState() { + + } + + private IPAddrIVState(final IPv4AddrIV iv) { +// this.flags = flags; + this.key = iv.key(); + } + + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { +// flags = in.readByte(); + final int nbytes = LongPacker.unpackInt(in); + key = new byte[nbytes]; + in.readFully(key); + } + + public void writeExternal(ObjectOutput out) throws IOException { +// out.writeByte(flags); + LongPacker.packLong(out, key.length); + out.write(key); + } + + private Object readResolve() throws ObjectStreamException { + return new Inet4Address(key); + } + + } + + private Object writeReplace() throws ObjectStreamException { + return new IPAddrIVState(this); + } + +// /** +// * Implements {@link Value#stringValue()}. +// */ +// @Override +// public String stringValue() { +// +// return getLocalName(); +// +// } + + /** + * Does not need materialization to answer URI interface methods. + */ + @Override + public boolean needsMaterialization() { + return false; + } + +} \ No newline at end of file Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/IPv4AddrIV.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/URIExtensionIV.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/URIExtensionIV.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/impl/uri/URIExtensionIV.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -9,10 +9,10 @@ import com.bigdata.rdf.internal.impl.AbstractInlineExtensionIV; import com.bigdata.rdf.internal.impl.AbstractInlineIV; import com.bigdata.rdf.internal.impl.literal.AbstractLiteralIV; -import com.bigdata.rdf.internal.impl.literal.FullyInlineTypedLiteralIV; import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.model.BigdataLiteral; import com.bigdata.rdf.model.BigdataURI; +import com.bigdata.rdf.model.BigdataValueFactory; import com.bigdata.rdf.vocab.Vocabulary; /** @@ -44,7 +44,7 @@ /** * The localName. */ - private final FullyInlineTypedLiteralIV<BigdataLiteral> delegateIV; + private final AbstractLiteralIV<BigdataLiteral, ?> delegateIV; /** * {@inheritDoc} @@ -72,7 +72,7 @@ /** * - * @param localNameIV + * @param delegateIV * The {@link IV} which represents the localName. * @param namespaceIV * The {@link IV} which represents the namespace. This MUST be a @@ -80,10 +80,10 @@ */ @SuppressWarnings("unchecked") public URIExtensionIV( - final FullyInlineTypedLiteralIV<BigdataLiteral> localNameIV, + final AbstractLiteralIV<BigdataLiteral, ?> delegateIV, final IV<?,?> namespaceIV) { - super(VTE.URI, true/* extension */, localNameIV.getDTE()); + super(VTE.URI, true/* extension */, delegateIV.getDTE()); if (namespaceIV == null) throw new IllegalArgumentException(); @@ -91,32 +91,28 @@ if (!namespaceIV.isInline()) throw new IllegalArgumentException(); - this.delegateIV = localNameIV; + this.delegateIV = delegateIV; this.namespaceIV = (AbstractInlineIV<BigdataURI, ?>) namespaceIV; } /** - * Even though Literal extension IVs are fully inline (no data in the - * lexicon indices), we do need materialization to answer the openrdf - * Literal interface correctly. We cannot properly interpret what the - * delegate IV means without the materialized value. + * The namespace IV does need materialization, although it will not need + * to go to the index to get the value (it just needs access to the lexicon's + * vocabulary). */ public boolean needsMaterialization() { - return true; + return delegateIV.needsMaterialization() + || namespaceIV.needsMaterialization(); } public AbstractLiteralIV<BigdataLiteral, ?> getLocalNameIV() { - return delegateIV; - } public Object getInlineValue() { // TODO TEST - return new URIImpl(stringValue()); - } /** @@ -124,18 +120,14 @@ */ @Override public IV<BigdataURI, ?> getExtensionIV() { - return namespaceIV; - } /** * */ public int hashCode() {// TODO Inspect distribution. - return namespaceIV.hashCode() ^ delegateIV.hashCode(); - } public boolean equals(final Object o) { @@ -148,6 +140,10 @@ return false; } + public String toString() { + return this.namespaceIV.toString() + ":" + this.delegateIV.toString(); + } + @SuppressWarnings("rawtypes") public int _compareTo(final IV o) { @@ -183,12 +179,18 @@ if (v == null) { -// final BigdataValueFactory f = lex.getValueFactory(); + final BigdataValueFactory f = lex.getValueFactory(); - final ILexiconConfiguration config = lex.getLexiconConfiguration(); +// final ILexiconConfiguration config = lex.getLexiconConfiguration(); +// +// v = setValue((V) config.asValueFromVocab(this)); - v = setValue((V) config.asValueFromVocab(this)); - + final URI namespace = namespaceIV.asValue(lex); + + final String localName = delegateIV.getInlineValue().toString(); + + v = setValue((V) f.createURI(namespace.stringValue(), localName)); + v.setIV(this); } @@ -197,25 +199,23 @@ } + //////////////////////// + // OpenRDF URI methods + //////////////////////// + @Override public String stringValue() { - return getNamespace() + getLocalName(); - } @Override public String getNamespace() { - - return ((URI) namespaceIV.getInlineValue()).stringValue(); - + return namespaceIV.getValue().stringValue(); } @Override public String getLocalName() { - - return delegateIV.getLabel(); - + return delegateIV.getInlineValue().toString(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-08-04 10:06:47 UTC (rev 8603) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java 2014-08-07 08:50:25 UTC (rev 8604) @@ -82,13 +82,16 @@ import com.bigdata.rawstore.Bytes; import com.bigdata.rdf.internal.IDatatypeURIResolver; import com.bigdata.rdf.internal.IExtensionFactory; +import com.bigdata.rdf.internal.IInlineURIFactory; import com.bigdata.rdf.internal.ILexiconConfiguration; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.IVUtility; import com.bigdata.rdf.internal.LexiconConfiguration; import com.bigdata.rdf.internal.NoExtensionFactory; +import com.bigdata.rdf.internal.NoInlineURIFactory; import com.bigdata.rdf.internal.NoSuchVocabularyItem; import com.bigdata.rdf.internal.VTE; +import com.bigdata.rdf.internal.XSD; import com.bigdata.rdf.internal.impl.BlobIV; import com.bigdata.rdf.internal.impl.TermId; import com.bigdata.rdf.internal.impl.bnode.SidIV; @@ -271,6 +274,47 @@ } + @SuppressWarnings("unchecked") + protected Class<IInlineURIFactory> determineInlineURIFactoryClass() ... [truncated message content] |
From: <tob...@us...> - 2014-08-08 00:33:52
|
Revision: 8609 http://sourceforge.net/p/bigdata/code/8609 Author: tobycraig Date: 2014-08-08 00:33:42 +0000 (Fri, 08 Aug 2014) Log Message: ----------- Added classes and IDs to elements on status HTML output Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-08-07 13:42:47 UTC (rev 8608) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-08-08 00:33:42 UTC (rev 8609) @@ -204,52 +204,84 @@ // The quorum state if (quorumService == null) { - p.text("The local quorum service is not running.").node("br") - .close(); + p.text("The local quorum service is ") + .node("span").attr("id", "quorum-state").text("not running") + .close().text(".") + .node("br").close(); } else { - p.text("The quorum is " + (quorum.isQuorumMet() ? "" : "not") - + " met.").node("br").close(); + p.text("The quorum is ") + .node("span").attr("id", "quorum-state") + .text((quorum.isQuorumMet() ? "" : "not ") + "met") + .close().text(".") + .node("br").close(); - p.text("" + njoined + " out of " + quorum.replicationFactor() - + " services are joined.").node("br").close(); + p.node("span").attr("id", "njoined").text("" + njoined).close() + .text(" out of ") + .node("span").attr("id", "replication-factor") + .text("" + quorum.replicationFactor()).close() + .text(" services are joined.") + .node("br").close(); - p.text("quorumToken=" + quorumToken + ", lastValidToken=" - + lastValidToken).node("br").close(); + p.text("quorumToken=") + .node("span").attr("id", "quorum-token") + .text("" + quorumToken).close() + .text(", lastValidToken=") + .node("span").attr("id", "last-valid-token") + .text("" + lastValidToken).close() + .node("br").close(); - p.text("logicalServiceZPath=" - + quorumService.getLogicalServiceZPath()).node("br") - .close(); + p.text("logicalServiceZPath=") + .node("span").attr("id", "logical-service-z-path") + .text(quorumService.getLogicalServiceZPath()).close() + .node("br").close(); - p.text("PlatformStatsPlugIn=" - + (journal.getPlatformStatisticsCollector() == null ? "N/A" - : "Running")).node("br").close(); + p.text("PlatformStatsPlugIn=") + .node("span").attr("id", "platform-stats-plugin") + .text(journal.getPlatformStatisticsCollector() == null ? + "N/A" : "Running").close() + .node("br").close(); - p.text("GangliaPlugIn=" - + (journal.getGangliaService() == null ? "N/A" - : "Running")).node("br").close(); + p.text("GangliaPlugIn=") + .node("span").attr("id", "ganglia-plugin") + .text(journal.getGangliaService() == null ? "N/A" : + "Running").close() + .node("br").close(); // Note: This is the *local* value of getHAStatus(). // Note: The HAReady token reflects whether or not the service // is // joined. - p.text("HAStatus: " + quorumService.getService().getHAStatus() - + ", HAReadyToken=" + haReadyToken).node("br").close(); + p.text("HAStatus: ") + .node("span").attr("id", "ha-status") + .text("" + quorumService.getService().getHAStatus()).close() + .text(", HAReadyToken=") + .node("span").attr("id", "ha-ready-token") + .text("" + haReadyToken).close() + .node("br").close(); /* * Report on the Service. */ { - p.text("Service: serviceId=" + quorumService.getServiceId()) - .node("br").close(); - p.text("Service: pid=" + quorumService.getPID()).node("br") - .close(); - p.text("Service: path=" + quorumService.getServiceDir()) - .node("br").close(); - p.text("Service: proxy=" - + journal.getHAJournalServer().getProxy()) - .node("br").close(); + p.text("Service: serviceId=") + .node("span").attr("id", "service-id") + .text("" + quorumService.getServiceId()).close() + .node("br").close(); + p.text("Service: pid=") + .node("span").attr("id", "service-pid") + .text("" + quorumService.getPID()).close() + .node("br").close(); + p.text("Service: path=") + .node("span").attr("id", "service-path") + .text("" + quorumService.getServiceDir()).close() + .node("br").close(); + p.text("Service: proxy=") + .node("span").attr("id", "service-proxy") + .text("" + journal.getHAJournalServer().getProxy()) + .close() + .node("br").close(); } @@ -277,28 +309,33 @@ final boolean takeSnapshot = mgr .isReadyToSnapshot(snapshotPolicy .newSnapshotRequest()); - p.text("Service"// - + ": snapshotPolicy=" - + snapshotPolicy// - + ", shouldSnapshot=" - + takeSnapshot// + p.text("Service: snapshotPolicy=") + .node("span").attr("id", "snapshot-policy") + .text("" + snapshotPolicy).close() + .text(", shouldSnapshot=") + .node("span").attr("id", "take-snapshot") + .text("" + takeSnapshot).close() // + ", lastSnapshotCommitCounter=" // + sinceCommitCounter// // + ", HALogFileBytesOnDiskSinceLastSnapshot=" // + haLogBytesOnDiskSinceLastSnapshot// - ).node("br").close(); + .node("br").close(); } // restore policy. - p.text("Service: restorePolicy=" - + journal.getSnapshotManager().getRestorePolicy()) - .node("br").close(); + p.text("Service: restorePolicy=") + .node("span").attr("id", "restore-policy") + .text("" + journal.getSnapshotManager().getRestorePolicy()) + .close() + .node("br").close(); // HA Load Balancer. { - p.text("Service: LBSPolicy=" - + HALoadBalancerServlet.toString(req - .getServletContext())).node("br").close(); + p.text("Service: LBSPolicy=") + .node("span").attr("id", "lbs-policy") + .text(HALoadBalancerServlet.toString(req + .getServletContext())).close() + .node("br").close(); } // if(true) { // /* @@ -346,17 +383,30 @@ // // Ignore. // } final long fileSize = file == null ? 0L : file.length(); - p.text("HAJournal: file=" + file // - + ", commitCounter=" + commitCounter // - + ", nbytes=" + fileSize// - + (digestStr == null ? "" : ", md5=" + digestStr)// + p.text("HAJournal: file=") + .node("span").attr("id", "ha-journal-file") + .text("" + file).close() + .text(", commitCounter=") + .node("span").attr("id", "ha-journal-commit-counter") + .text("" + commitCounter).close() + .text(", nbytes=") + .node("span").attr("id", "ha-journal-nbytes") + .text("" + fileSize).close(); + if(digestStr != null) { + p.text(", md5=") + .node("span").attr("id", "ha-journal-md5") + .text(digestStr).close(); + } // + (releaseTime != -1L ? ", releaseTime=" // + RootBlockView.toString(releaseTime)// // : "")// - ).node("br").close(); + p.node("br").close(); + // Show the current root block. - if(debug) - current.node("pre", rb.toString()); + if(debug) { + current.node("span").attr("id", "root-block") + .text(rb.toString()).close(); + } } } @@ -396,20 +446,28 @@ .getProperty( com.bigdata.journal.Options.HALOG_COMPRESSOR, com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR); - p.text("HALogDir: nfiles=" - + nfiles - + ", nbytes=" - + nbytes - + ", path=" - + nexus.getHALogDir() - + ", compressorKey=" - + compressorKey - + ", lastHALogClosed=" - + (r == null ? "N/A" : CommitCounterUtility - .getCommitCounterStr(r.getCommitCounter())) - + ", liveLog=" - + (currentFile == null ? "N/A" : currentFile - .getName())).node("br").close(); + p.text("HALogDir: nfiles=") + .node("span").attr("id", "ha-log-dir-nfiles") + .text("" + nfiles).close() + .text(", nbytes=") + .node("span").attr("id", "ha-log-dir-nbytes") + .text("" + nbytes).close() + .text(", path=") + .node("span").attr("id", "ha-log-dir-path") + .text("" + nexus.getHALogDir()).close() + .text(", compressorKey=") + .node("span").attr("id", "ha-log-dir-compressor-key") + .text(compressorKey).close() + .text(", lastHALogClosed=") + .node("span").attr("id", "ha-log-dir-last-ha-log-closed") + .text((r == null ? "N/A" : CommitCounterUtility + .getCommitCounterStr(r.getCommitCounter()))) + .close() + .text(", liveLog=") + .node("span").attr("id", "ha-log-dir-live-log") + .text((currentFile == null ? "N/A" : + currentFile.getName())).close() + .node("br").close(); } if (digestEnum != null && (digestEnum == DigestEnum.All || digestEnum == DigestEnum.HALogs)) { @@ -446,14 +504,21 @@ } finally { r.close(); } - p.text("HALogFile: closingCommitCounter=" - + closingCommitCounter// - + ", file=" - + file// - + ", nbytes=" - + nbytes// - + (digestStr == null ? "" : ", md5=" - + digestStr)).node("br").close(); + p.text("HALogFile: closingCommitCounter=") + .node("span").attr("id", "ha-log-file-closing-commit-counter") + .text("" + closingCommitCounter).close() + .text(", file=") + .node("span").attr("id", "ha-log-file-file") + .text("" + file).close() + .text(", nbytes=") + .node("span").attr("id", "ha-log-file-nbytes") + .text("" + nbytes).close(); + if(digestStr != null) { + p.text(", md5=") + .node("span").attr("id", "ha-log-file-digest-str") + .text(digestStr).close(); + } + p.node("br").close(); } } } @@ -492,10 +557,16 @@ nbytes += sr.sizeOnDisk(); nfiles++; } - p.text("SnapshotDir: nfiles=" + nfiles + ", nbytes=" - + nbytes + ", path=" - + journal.getSnapshotManager().getSnapshotDir()) - .node("br").close(); + p.text("SnapshotDir: nfiles=") + .node("span").attr("id", "snapshot-dir-nfiles") + .text("" + nfiles).close() + .text(", nbytes=") + .node("span").attr("id", "snapshot-dir-nbytes") + .text("" + nbytes).close() + .text(", path=") + .node("span").attr("id", "snapshot-dir-path") + .text("" + journal.getSnapshotManager().getSnapshotDir()).close() + .node("br").close(); } if (true) { @@ -529,14 +600,22 @@ } } - p.text("SnapshotFile: commitTime=" - + RootBlockView.toString(rb.getLastCommitTime()) - + ", commitCounter=" - + rb.getCommitCounter() - + ", nbytes=" - + nbytes - + (digestStr == null ? "" : ", md5=" - + digestStr)).node("br").close(); + p.text("SnapshotFile: commitTime=") + .node("span").attr("id", "snapshot-file-commit-time") + .text(RootBlockView.toString(rb.getLastCommitTime())) + .close() + .text(", commitCounter=") + .node("span").attr("id", "snapshot-file-commit-counter") + .text("" + rb.getCommitCounter()).close() + .text(", nbytes=") + .node("span").attr("id", "snapshot-file-nbytes") + .text("" + nbytes).close(); + if(digestStr != null) { + p.text(", md5=") + .node("span").attr("id", "snapshot-file-md5") + .text(digestStr).close(); + } + p.node("br").close(); } @@ -588,8 +667,10 @@ p.close(); - if(debug) - current.node("pre", quorum.toString()); + if(debug) { + current.node("span").attr("id", "quorum").text(quorum.toString()) + .close(); + } } @@ -614,8 +695,8 @@ // Request RESTORE. if (haGlue.rebuildFromLeader(new HARemoteRebuildRequest()) != null) { - current.node("h2", - "Running Disaster Recovery for this service (REBUILD)."); + current.node("h2").attr("id", "rebuild") + .text("Running Disaster Recovery for this service (REBUILD)."); } @@ -650,7 +731,8 @@ { - final XMLBuilder.Node p = current.node("p"); + final XMLBuilder.Node ul = current.node("ul") + .attr("id", "quorum-services"); final UUID[] joined = quorum.getJoined(); @@ -677,6 +759,8 @@ } + final XMLBuilder.Node li = ul.node("li"); + /* * Do all RMIs to the remote service in a try/catch. This * allows us to catch problems with communications to the @@ -710,8 +794,10 @@ * Note error and continue with the next service. */ - p.text("Unable to reach service: " + remoteService) - .close(); + li.text("Unable to reach service: ") + .node("span").attr("class", "unreachable") + .text("" + remoteService).close() + .close(); log.error(ex, ex); @@ -738,24 +824,36 @@ + BigdataStatics.getContextPath(); // hyper link to NSS service. - p.node("a").attr("href", nssUrl).text(nssUrl).close(); + li.node("a").attr("class", "nss-url").attr("href", nssUrl) + .text(nssUrl).close(); // plus the other metadata. - p.text(" : "// - + (isLeader ? "leader" : (isFollower ? "follower" - : " is not joined"))// - + ", pipelineOrder=" - + (pipelineIndex == -1 ? "N/A" : pipelineIndex)// - + ", writePipelineAddr=" + writePipelineAddr// - + ", service=" + (isSelf ? "self" : "other")// - + ", extendedRunState=" + extendedRunState// - ).node("br").close(); + li.text(" : ") + .node("span").attr("class", "service-status") + .text((isLeader ? "leader" : (isFollower ? "follower" + : " is not joined"))).close() + .text(", pipelineOrder=") + .node("span").attr("class", "service-pipeline-order") + .text("" + (pipelineIndex == -1 ? "N/A" : pipelineIndex)) + .close() + .text(", writePipelineAddr=") + .node("span").attr("class", "service-write-pipeline-addr") + .text("" + writePipelineAddr).close() + .text(", service=") + .node("span").attr("class", "service-service") + .text((isSelf ? "self" : "other")).close() + .text(", extendedRunState=") + .node("span").attr("class", "service-extended-run-state") + .text(extendedRunState).close() + .node("br").close(); + + li.close(); } - p.close(); + ul.close(); } - + // DumpZookeeper { @@ -773,7 +871,8 @@ final XMLBuilder.Node p = current.node("p"); - p.text("ZooKeeper is not available.").close(); + p.text("ZooKeeper is not available.") + .attr("id", "zookeeper-unavailable").close(); } else { @@ -785,7 +884,7 @@ final PrintWriter out = new PrintWriter( resp.getOutputStream(), true/* autoFlush */); - out.print("<pre>\n"); + out.print("<span id=\"zookeeper\">\n"); try { @@ -808,7 +907,7 @@ } // close section. - out.print("\n</pre>"); + out.print("\n</span>"); // flush PrintWriter before resuming writes on Writer. out.flush(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-08-07 13:42:47 UTC (rev 8608) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-08-08 00:33:42 UTC (rev 8609) @@ -467,7 +467,7 @@ final PrintWriter out = new PrintWriter(resp.getOutputStream(), true/* autoFlush */); - out.print("<pre>\n"); + out.print("<pre id=\"journal-dump\">\n"); final DumpJournal dump = new DumpJournal( (Journal) getIndexManager()); @@ -508,12 +508,13 @@ dump.dumpJournal(out, namespaces, dumpHistory, dumpPages, dumpIndices, dumpTuples); + out.print("\n</pre>"); + // flush PrintStream before resuming writes on Writer. out.flush(); // close section. // section.close(); - out.print("\n</pre>"); } @@ -533,11 +534,16 @@ } - current.node("br", "Accepted query count=" - + getBigdataRDFContext().getQueryIdFactory().get()); + current.node("p").text("Accepted query count=") + .node("span").attr("id", "accepted-query-count") + .text("" +getBigdataRDFContext().getQueryIdFactory().get()) + .close() + .close(); - current.node("br", "Running query count=" - + getBigdataRDFContext().getQueries().size()); + current.node("p").text("Running query count=") + .node("span").attr("id", "running-query-count") + .text("" + getBigdataRDFContext().getQueries().size()).close() + .close(); // Offer a link to the "showQueries" page. { @@ -551,11 +557,12 @@ current.node("p").text("Show ") // - .node("a").attr("href", showQueriesURL).text("queries") + .node("a").attr("href", showQueriesURL) + .attr("id", "show-queries").text("queries").close() + .text(", ")// + .node("a").attr("href", showQueriesDetailsURL) + .attr("id", "show-query-details").text("query details") .close()// - .text(", ")// - .node("a").attr("href", showQueriesDetailsURL)// - .text("query details").close()// .text(".").close(); } @@ -566,12 +573,16 @@ .getNamespaces(getTimestamp(req)); current.node("h3", "Namespaces: "); + + XMLBuilder.Node ul = current.node("ul").attr("id", "namespaces"); for (String s : namespaces) { - current.node("p", s); + ul.node("li", s); } + + ul.close(); } @@ -621,7 +632,8 @@ // // } - current.node("pre", counterSet.toString()); + current.node("p").attr("id", "counter-set") + .text(counterSet.toString()).close(); } @@ -910,6 +922,7 @@ // Open <p>. current.node("p") + .attr("class", "update") // // .text("solutions=" + solutionsOut) // // @@ -917,9 +930,12 @@ // // // .text(", children=" + children.length) // - .text("elapsed=" + elapsedMillis + "ms") + .text("elapsed=").node("span") + .attr("class", "elapsed").text("" + elapsedMillis).close() + .text("ms") // .text(", ").node("a").attr("href", detailsURL) + .attr("class", "details-url") .text("details").close()// .close(); @@ -952,7 +968,8 @@ current.node("h2", "SPARQL"); - current.node("pre", queryString); + current.node("p").attr("class", "query-string") + .text(queryString).close(); } @@ -965,7 +982,8 @@ current.node("h2", "Parse Tree"); - current.node("pre", parseTree.dump("")); + current.node("p").attr("class", "parse-tree") + .text(parseTree.dump("")).close(); } @@ -976,7 +994,8 @@ current.node("h2", "Original AST"); - current.node("pre", originalAST.toString()); + current.node("p").attr("class", "original-ast") + .text(originalAST.toString()).close(); } @@ -1078,15 +1097,21 @@ current.node("p") // - .text("solutions=" + solutionsOut) + .text("solutions=").node("span").attr("class", "solutions") + .text(""+ solutionsOut).close() // - .text(", chunks=" + chunksOut) + .text(", chunks=").node("span").attr("class", "chunks") + .text(""+ chunksOut).close() // - .text(", children=" + children.length) + .text(", children=").node("span").attr("class", "children") + .text("" + children.length).close() // - .text(", elapsed=" + elapsedMillis + "ms") + .text(", elapsed=").node("span").attr("class", "elapsed") + .text("" + elapsedMillis).close() + .text("ms, ") // - .text(", ").node("a").attr("href", detailsURL) + .node("a").attr("href", detailsURL) + .attr("class", "details-url") .text("details").close()// .close(); @@ -1121,7 +1146,8 @@ current.node("h2", "SPARQL"); - current.node("pre", queryString); + current.node("p").attr("class", "query-string").text(queryString) + .close(); } @@ -1134,7 +1160,8 @@ current.node("h2", "Parse Tree"); - current.node("pre", parseTree.dump("")); + current.node("p").attr("class", "parse-tree") + .text(parseTree.dump("")).close(); } @@ -1145,7 +1172,8 @@ current.node("h2", "Original AST"); - current.node("pre", originalAST.toString()); + current.node("p").attr("class", "original-ast") + .text(originalAST.toString()).close(); } @@ -1156,7 +1184,8 @@ current.node("h2", "Optimized AST"); - current.node("pre", optimizedAST.toString()); + current.node("p").attr("class", "optimized-ast") + .text(optimizedAST.toString()).close(); } @@ -1167,8 +1196,8 @@ current.node("h2", "Query Plan"); - current.node("pre", BOpUtility - .toString(queryPlan)); + current.node("p").attr("class", "query-plan") + .text(BOpUtility.toString(queryPlan)).close(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-08-07 13:42:47 UTC (rev 8608) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-08-08 00:33:42 UTC (rev 8609) @@ -347,6 +347,14 @@ box-sizing: border-box; } +#status-text { + word-break: break-all; +} + +#zookeeper, #counter-set { + white-space: pre; +} + /* workbench checks if we're in HA mode and shows health tab if we are */ #tab-selector a[data-target=health] { display: none; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-19 12:27:04
|
Revision: 8655 http://sourceforge.net/p/bigdata/code/8655 Author: thompsonbry Date: 2014-09-19 12:26:56 +0000 (Fri, 19 Sep 2014) Log Message: ----------- This addresses a security vunerability with the commons-fileupload component. I have run the NSS and BigdataSailWithQuads test suites locally. Committing to CI for broader validation. See #1010 (Update apache http commons-fileupload). Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/.classpath branches/BIGDATA_RELEASE_1_3_0/build.properties branches/BIGDATA_RELEASE_1_3_0/pom.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar Modified: branches/BIGDATA_RELEASE_1_3_0/.classpath =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/.classpath 2014-09-19 12:26:56 UTC (rev 8655) @@ -73,7 +73,6 @@ <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpclient-cache-4.1.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpcore-4.1.4.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/httpmime-4.1.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.10"/> @@ -99,5 +98,6 @@ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar =================================================================== (Binary files differ) Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar =================================================================== (Binary files differ) Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar 2014-09-19 12:26:56 UTC (rev 8655) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/lib/httpcomponents/commons-fileupload-1.3.1.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-19 12:26:56 UTC (rev 8655) @@ -55,7 +55,7 @@ servlet.version=3.1.0 lucene.version=3.0.0 apache.commons_codec.version=1.4 -apache.commons_fileupload.version=1.2.2 +apache.commons_fileupload.version=1.3.1 apache.commons_io.version=2.1 apache.commons_logging.version=1.1.1 apache.httpclient.version=4.1.3 Modified: branches/BIGDATA_RELEASE_1_3_0/pom.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-09-18 10:34:41 UTC (rev 8654) +++ branches/BIGDATA_RELEASE_1_3_0/pom.xml 2014-09-19 12:26:56 UTC (rev 8655) @@ -83,7 +83,7 @@ <servlet.version>3.1.0</servlet.version> <lucene.version>3.0.0</lucene.version> <apache.commons_codec.version>1.4</apache.commons_codec.version> - <apache.commons_fileupload.version>1.2.2</apache.commons_fileupload.version> + <apache.commons_fileupload.version>1.3.1</apache.commons_fileupload.version> <apache.commons_io.version>2.1</apache.commons_io.version> <apache.commons_logging.version>1.1.1</apache.commons_logging.version> <apache.httpclient.version>4.1.3</apache.httpclient.version> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-22 14:54:42
|
Revision: 8656 http://sourceforge.net/p/bigdata/code/8656 Author: thompsonbry Date: 2014-09-22 14:54:35 +0000 (Mon, 22 Sep 2014) Log Message: ----------- I have integrated a high concurrency LRU into the QueryEngine. This is used to track the UUID of an operation that has been CANCELed through the REST API but which is neither (a) currently running; nor (b) recently done. In this case, the UUID is entered into the new "pendingCancelLRU". The pendingCancelLRU is checked before we start a new query (all code paths) and before we start a SPARQL UPDATE operation (REST API). If the operation is found in the pendingCancelLRU, then the Future of that operation is cancelled. This occurs before the Future is submitted for evaluation to avoid a race in which the operation might complete before it was cancelled. Changes are to: - QueryEngine: added pendingCancelLRU and associated access methods. The pendingCancelLRU is checked for QUERY in startEval(). - BigdataRDFContext: Handle pending CANCEL of UPDATE requests. - StatusServlet: Queue UUID in the pendingCancelLRU iff not found when CANCEL request is processed. The AST, SPARQL, and NSS test suites are good locally (but see #1015 for memory leaks in the test suite). Committed to CI. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -64,6 +64,7 @@ import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; +import com.bigdata.cache.ConcurrentWeakValueCache; import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSetAccess; @@ -71,6 +72,7 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.journal.Journal; import com.bigdata.rawstore.IRawStore; +import com.bigdata.rdf.internal.constraints.TrueBOp; import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; import com.bigdata.resources.IndexManager; import com.bigdata.service.IBigdataFederation; @@ -535,7 +537,7 @@ /** * The currently executing queries. */ - final private ConcurrentHashMap<UUID/* queryId */, AbstractRunningQuery> runningQueries = new ConcurrentHashMap<UUID, AbstractRunningQuery>(); + private final ConcurrentHashMap<UUID/* queryId */, AbstractRunningQuery> runningQueries = new ConcurrentHashMap<UUID, AbstractRunningQuery>(); /** * LRU cache used to handle problems with asynchronous termination of @@ -554,7 +556,7 @@ * enough that we can not have a false cache miss on a system which is * heavily loaded by a bunch of light queries. */ - private LinkedHashMap<UUID, IHaltable<Void>> doneQueries = new LinkedHashMap<UUID,IHaltable<Void>>( + private final LinkedHashMap<UUID, IHaltable<Void>> doneQueries = new LinkedHashMap<UUID,IHaltable<Void>>( 16/* initialCapacity */, .75f/* loadFactor */, true/* accessOrder */) { private static final long serialVersionUID = 1L; @@ -568,6 +570,92 @@ }; /** + * A high concurrency cache operating as an LRU designed to close a data + * race between the asynchronous start of a submitted query or update + * operation and the explicit asynchronous CANCEL of that operation using + * its pre-assigned {@link UUID}. + * <p> + * When a CANCEL request is received, we probe both the + * {@link #runningQueries} and the {@link #doneQueries}. If no operation is + * associated with that request, then we probe the running UPDATE + * operations. Finally, if no such operation was discovered, then the + * {@link UUID} of the operation to be cancelled is entered into this + * collection. + * <p> + * Before a query starts, we consult the {@link #pendingCancelLRU}. If the + * {@link UUID} of the query is discovered, then the query is cancelled + * rather than run. + * <p> + * Note: The capacity of the backing hard reference queue is quite small. + * {@link UUID}s are only entered into this collection if a CANCEL request + * is asynchronously received either (a) before; or (b) long enough after a + * query or update is executed that is not not found in either the running + * queries map or the recently done queries map. + * + * TODO There are some cases that are not covered by this. First, we do not + * have {@link UUID}s for all REST API methods and thus they can not all be + * cancelled. If we allowed an HTTP header to specify the UUID of the + * request, then we could associate a UUID with all requests. The ongoing + * refactor to support clean interrupt of NSS requests (#753) and the + * ongoing refactor to support concurrent unisolated operations against the + * same journal (#566) will provide us with the mechanisms to identify all + * such operations so we can check their assigned UUIDs and cancel them when + * requested. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + * @see <a href="http://trac.bigdata.com/ticket/753"> HA doLocalAbort() + * should interrupt NSS requests and AbstractTasks </a> + * @see <a href="http://trac.bigdata.com/ticket/566"> Concurrent unisolated + * operations against multiple KBs on the same Journal </a> + * @see #startEval(UUID, PipelineOp, Map, IChunkMessage) + */ + private final ConcurrentWeakValueCache<UUID, UUID> pendingCancelLRU = new ConcurrentWeakValueCache<>( + 50/* queueCapacity (SWAG, but see above) */); + + /** + * Add a query {@link UUID} to the LRU of query identifiers for which we + * have received a CANCEL request, but were unable to find a running QUERY, + * recently done query, or running UPDATE request. + * + * @param queryId + * The UUID of the operation to be cancelled. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + */ + public void addPendingCancel(final UUID queryId) { + + if (queryId == null) + throw new IllegalArgumentException(); + + pendingCancelLRU.putIfAbsent(queryId, queryId); + + } + + /** + * Return <code>true</code> iff the {@link UUID} is the the collection of + * {@link UUID}s for which we have already received a CANCEL request. + * <p> + * Note: The {@link UUID} is removed from the pending cancel collection as a + * side-effect. + * + * @param queryId + * The {@link UUID} of the operation. + * + * @return <code>true</code> if that operation has already been marked for + * cancellation. + */ + public boolean pendingCancel(final UUID queryId) { + + if (queryId == null) + throw new IllegalArgumentException(); + + return pendingCancelLRU.remove(queryId) != null; + + } + + /** * A queue of {@link ChunkedRunningQuery}s having binding set chunks available for * consumption. * @@ -1695,6 +1783,22 @@ // if (c != null) // c.startCount.increment(); + if (pendingCancelLRU.containsKey(runningQuery.getQueryId())) { + /* + * The query was asynchronously scheduled for cancellation. + */ + + // Cancel the query. + runningQuery.cancel(true/* mayInterruptIfRunning */); + + // Remove from the CANCEL LRU. + pendingCancelLRU.remove(runningQuery.getQueryId()); + + // Return the query. It has already been cancelled. + return runningQuery; + + } + // notify query start runningQuery.startQuery(msg); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -75,6 +75,7 @@ import com.bigdata.BigdataStatics; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.counters.CAT; import com.bigdata.io.NullOutputStream; import com.bigdata.journal.IIndexManager; @@ -1015,6 +1016,30 @@ m_queries.put(queryId, r); m_queries2.put(queryId2, r); + /** + * Handle data races in CANCEL of an UPDATE operation whose + * cancellation was requested before it began to execute. + * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + */ + { + + final QueryEngine queryEngine = QueryEngineFactory + .getQueryController(getIndexManager()); + + if (queryEngine.pendingCancel(queryId2)) { + + /* + * There is a pending CANCEL for this UPDATE request, so + * cancel it now. + */ + updateFuture.cancel(true/* mayInterruptIfRunning */); + + } + + } + return update; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-09-19 12:26:56 UTC (rev 8655) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-09-22 14:54:35 UTC (rev 8656) @@ -234,17 +234,11 @@ * * @throws IOException * + * @see <a href="http://trac.bigdata.com/ticket/899"> REST API Query + * Cancellation </a> + * * FIXME GROUP COMMIT: Review cancellation and leader fail * scenarios. - * - * FIXME CANCEL: A remote client can not act to cancel a request - * that is in the queue until it begins to execute. This is - * because the UUID is not assigned until the request begins to - * execute. This is true for both SPARQL QUERY and SPARQL UPDATE - * requests. We need to track the CANCEL requests on a LRU and - * apply them if we observe the query / update arriving after - * the CANCEL. See <a href="http://trac.bigdata.com/ticket/988" - * > REST API cancellation of queries</a> */ static void doCancelQuery(final HttpServletRequest req, final HttpServletResponse resp, final IIndexManager indexManager, @@ -276,6 +270,7 @@ if (!tryCancelQuery(queryEngine, queryId)) { if (!tryCancelUpdate(context, queryId)) { + queryEngine.addPendingCancel(queryId); if (log.isInfoEnabled()) { log.info("No such QUERY or UPDATE: " + queryId); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-25 19:00:52
|
Revision: 8659 http://sourceforge.net/p/bigdata/code/8659 Author: thompsonbry Date: 2014-09-25 19:00:48 +0000 (Thu, 25 Sep 2014) Log Message: ----------- Unwinding some dependencies that were dragging in the HALoadBalancerServlet and hence the jetty ProxyServlet when the WAR is deployed to a tomcat container. There is still an issue with the workbench where it is specifying the servlet URL that includes the LBS. /bigdata/LBS/ When the servlet container is not jetty this does not resolve to anything and everything falls over. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -102,9 +102,7 @@ import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.CreateKBTask; import com.bigdata.rdf.sail.webapp.ConfigParams; -import com.bigdata.rdf.sail.webapp.HALoadBalancerServlet; import com.bigdata.rdf.sail.webapp.NanoSparqlServer; -import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractHATransactionService; import com.bigdata.service.jini.FakeLifeCycle; @@ -570,6 +568,22 @@ private volatile Server jettyServer; /** + * Exposed to the test suite. + */ + WebAppContext getWebAppContext() { + + final Server server = jettyServer; + + if (server == null) + throw new IllegalStateException(); + + final WebAppContext wac = NanoSparqlServer.getWebApp(server); + + return wac; + + } + + /** * Enum of the run states. The states are labeled by the goal of the run * state. */ @@ -4616,47 +4630,6 @@ } /** - * Change the {@link IHALoadBalancerPolicy}. - * <p> - * TODO There are some intrinsic problems with this method that should be - * resolved before exposing it as an administrative API on the - * {@link HAGlue} interface. - * <p> - * (1) This only applies to running instances of the - * {@link HALoadBalancerServlet}. If an instance is started after this - * method is called, it will run with the as-configured - * {@link IHALoadBalancerPolicy} instance of the one specified in the last - * invocation of this method. - * <p> - * (2) There are various race conditions that exist with respect to: (a) the - * atomic change over of the {@link IHALoadBalancerPolicy} during an - * in-flight request; and (b) the atomic destroy of the old policy once - * there are no more in-flight requests using that old policy. - * - * TODO Either the {@link IHALoadBalancerPolicy} needs to be serializable or - * we need to pass along the class name and the configuration parameters. - * For this case, the configuration should be set from the caller specified - * values rather than those potentially associated with <code>web.xml</code> - * , especially since <code>web.xml</code> might not even have the necessary - * configuration parameters defined for the caller specified policy. - */ - public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) { - - final Server server = this.jettyServer; - - if (server == null) - throw new IllegalStateException(); - - final WebAppContext wac = NanoSparqlServer.getWebApp(server); - - if (log.isInfoEnabled()) - log.info("Will set LBS: wac=" + wac + ", policy: " + policy); - - HALoadBalancerServlet.setLBSPolicy(wac.getServletContext(), policy); - - } - - /** * Conditionally create the default KB instance as identified in * <code>web.xml</code>. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -49,6 +49,7 @@ import net.jini.config.ConfigurationException; import org.apache.log4j.Logger; +import org.eclipse.jetty.webapp.WebAppContext; import com.bigdata.concurrent.FutureTaskMon; import com.bigdata.counters.PIDUtil; @@ -1444,6 +1445,36 @@ */ private AtomicReference<Throwable> lastRootCause = new AtomicReference<Throwable>(); + /** + * Change the {@link IHALoadBalancerPolicy}. + * <p> + * TODO There are some intrinsic problems with this method that should + * be resolved before exposing it as an administrative API on the + * {@link HAGlue} interface. + * <p> + * (1) This only applies to running instances of the + * {@link HALoadBalancerServlet}. If an instance is started after this + * method is called, it will run with the as-configured + * {@link IHALoadBalancerPolicy} instance of the one specified in the + * last invocation of this method. + * <p> + * (2) There are various race conditions that exist with respect to: (a) + * the atomic change over of the {@link IHALoadBalancerPolicy} during an + * in-flight request; and (b) the atomic destroy of the old policy once + * there are no more in-flight requests using that old policy. + * <p> + * (3) Exposing this method is just begging for trouble with the WAR + * artifact when deployed under a non-jetty container since it will drag + * in the jetty ProxyServlet. + * + * TODO Either the {@link IHALoadBalancerPolicy} needs to be + * serializable or we need to pass along the class name and the + * configuration parameters. For this case, the configuration should be + * set from the caller specified values rather than those potentially + * associated with <code>web.xml</code> , especially since + * <code>web.xml</code> might not even have the necessary configuration + * parameters defined for the caller specified policy. + */ @Override public void setHALoadBalancerPolicy(final IHALoadBalancerPolicy policy) throws IOException { @@ -1454,8 +1485,15 @@ if (log.isInfoEnabled()) log.info("Will set LBS policy: " + policy); - getHAJournalServer().setHALoadBalancerPolicy(policy); + final HAJournalServer haJournalServer = getHAJournalServer(); + + final WebAppContext wac = haJournalServer.getWebAppContext(); + if (log.isInfoEnabled()) + log.info("Will set LBS: wac=" + wac + ", policy: " + policy); + + HALoadBalancerServlet.setLBSPolicy(wac.getServletContext(), policy); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataServlet.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -45,7 +45,6 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.quorum.AbstractQuorum; import com.bigdata.rdf.sail.webapp.client.IMimeTypes; -import com.bigdata.rdf.sail.webapp.lbs.IHALoadBalancerPolicy; import com.bigdata.rdf.task.AbstractApiTask; /** @@ -77,20 +76,24 @@ /** * The {@link ServletContext} attribute whose value is the prefix for the - * {@link HALoadBalancerServlet} iff it is running. + * HALoadBalancerServlet (DO NOT LINK JAVADOC) iff it is running. * <p> - * Note: Do NOT reference the <code>HALoadBalancerServlet</code> here. It - * will drag in the jetty dependencies and that breaks the tomcat WAR - * deployment. + * Note: Do NOT reference the <code>HALoadBalancerServlet</code> or anything + * in the <code>com.bigdata.rdf.sail.webapp.lbs</code> package here. It will + * drag in the jetty dependencies and that breaks the tomcat WAR deployment. */ static final String ATTRIBUTE_LBS_PREFIX = "com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.prefix"; /** * The {@link ServletContext} attribute that is managed by the - * {@link HALoadBalancerServlet} and which maintains a collection of the - * active instances of that servlet. This is used to administer the - * {@link IHALoadBalancerPolicy} associated with the load balancer servlet - * instances. + * HALoadBalancerServlet (DO NOT LINK JAVADOC) and which maintains a + * collection of the active instances of that servlet. This is used to + * administer the IHALoadBalancerPolicy associated with the load balancer + * servlet instances. + * <p> + * Note: Do NOT reference the <code>HALoadBalancerServlet</code> or anything + * in the <code>com.bigdata.rdf.sail.webapp.lbs</code> package here. It will + * drag in the jetty dependencies and that breaks the tomcat WAR deployment. */ static final String ATTRIBUTE_LBS_INSTANCES = "com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.instances"; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-09-25 16:40:16 UTC (rev 8658) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2014-09-25 19:00:48 UTC (rev 8659) @@ -26,6 +26,8 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.math.BigInteger; import java.net.InetSocketAddress; import java.security.DigestException; @@ -36,6 +38,7 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; +import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -330,12 +333,35 @@ // HA Load Balancer. { - - p.text("Service: LBSPolicy=") - .node("span").attr("id", "lbs-policy") - .text(HALoadBalancerServlet.toString(req - .getServletContext())).close() - .node("br").close(); + /* + * Note: MUST NOT HAVE A DIRECT REFERENCE TO THIS CLASS OR + * IT WILL BREAK THE WAR ARTIFACT WHEN DEPLOYED TO A + * NON-JETTY CONTAINER SINCE THE JETTY ProxyServlet WILL NOT + * BE FOUND. + */ + try { + final Class<?> cls = Class + .forName("com.bigdata.rdf.sail.webapp.HALoadBalancerServlet"); + final Method m = cls.getMethod("toString", + new Class[] { ServletContext.class }); + final String rep = (String) m.invoke(null/* static */, + new Object[] { req.getServletContext() }); + p.text("Service: LBSPolicy=").node("span") + .attr("id", "lbs-policy").text(rep).close() + .node("br").close(); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } catch (SecurityException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } catch (IllegalArgumentException e) { + throw new RuntimeException(e); + } catch (InvocationTargetException e) { + throw new RuntimeException(e); + } } // if(true) { // /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-09-26 12:05:38
|
Revision: 8661 http://sourceforge.net/p/bigdata/code/8661 Author: thompsonbry Date: 2014-09-26 12:05:31 +0000 (Fri, 26 Sep 2014) Log Message: ----------- Bug fix for #1016 (LBS disabled by default in the workbench). Updated release notes. Bumped version and set snapshot=false for a release. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt 2014-09-25 21:52:55 UTC (rev 8660) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_2.txt 2014-09-26 12:05:31 UTC (rev 8661) @@ -16,7 +16,7 @@ You can checkout this release from: -https://svn.code.sf.net/p/bigdata/code/tags/BIGDATA_RELEASE_1_3_1 +https://svn.code.sf.net/p/bigdata/code/tags/BIGDATA_RELEASE_1_3_2 Critical or otherwise of note in this minor release: @@ -68,6 +68,12 @@ 1.3.2: +- http://trac.bigdata.com/ticket/1016 (Jetty/LBS issues when deployed as WAR under tomcat) +- http://trac.bigdata.com/ticket/1010 (Upgrade apache http components to 1.3.1 (security)) +- http://trac.bigdata.com/ticket/1005 (Invalidate BTree objects if error occurs during eviction) +- http://trac.bigdata.com/ticket/1004 (Concurrent binding problem) +- http://trac.bigdata.com/ticket/1002 (Concurrency issues in JVMHashJoinUtility caused by MAX_PARALLEL query hint override) +- http://trac.bigdata.com/ticket/1000 (Add configuration option to turn off bottom-up evaluation) - http://trac.bigdata.com/ticket/999 (Extend BigdataSailFactory to take arbitrary properties) - http://trac.bigdata.com/ticket/998 (SPARQL Update through BigdataGraph) - http://trac.bigdata.com/ticket/996 (Add custom prefix support for query results) @@ -120,11 +126,6 @@ - http://trac.bigdata.com/ticket/765 (order by expr skips invalid expressions) - http://trac.bigdata.com/ticket/587 (JSP page to configure KBs) - http://trac.bigdata.com/ticket/343 (Stochastic assert in AbstractBTree#writeNodeOrLeaf() in CI) -- http://trac.bigdata.com/ticket/1010 (Upgrade apache http components to 1.3.1 (security)) -- http://trac.bigdata.com/ticket/1005 (Invalidate BTree objects if error occurs during eviction) -- http://trac.bigdata.com/ticket/1004 (Concurrent binding problem) -- http://trac.bigdata.com/ticket/1002 (Concurrency issues in JVMHashJoinUtility caused by MAX_PARALLEL query hint override) -- http://trac.bigdata.com/ticket/1000 (Add configuration option to turn off bottom-up evaluation) 1.3.1: Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-09-25 21:52:55 UTC (rev 8660) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-09-26 12:05:31 UTC (rev 8661) @@ -1888,7 +1888,7 @@ function startup() { // load namespaces, default namespace, HA status - useLBS(true); + useLBS(false); // Note: default to false. Otherwise workbench breaks when not deployed into jetty container. getNamespaces(true); getDefaultNamespace(); showHealthTab(); Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-25 21:52:55 UTC (rev 8660) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-09-26 12:05:31 UTC (rev 8661) @@ -75,7 +75,6 @@ blueprints.version=2.5.0 jettison.version=1.3.3 rexster.version=2.5.0 - # Set to false to NOT start services (zookeeper, lookup server, class server, etc). # When false, tests which depend on those services will not run. (This can also be # set by CI if you leave if undefined here.) For example: @@ -91,19 +90,19 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.3.1 +build.ver=1.3.2 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=true +snapshot=false # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). # Note: The javadoc goes quite if you have enough memory, but can take forever # and then runs out of memory if the JVM is starved for RAM. The heap for the # javadoc JVM is explicitly set in the javadoc target in the build.xml file. -#javadoc= +javadoc= # packaging property set (rpm, deb). package.release=1 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-09-26 14:26:10
|
Revision: 8665 http://sourceforge.net/p/bigdata/code/8665 Author: mrpersonick Date: 2014-09-26 14:26:04 +0000 (Fri, 26 Sep 2014) Log Message: ----------- Ticket #1018: cancelAll() functionality on remote sail connection Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedBooleanQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedGraphQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedSparqlUpdate.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedTupleQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQueryListener.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -172,7 +172,7 @@ // //// queryResult = parseResults(checkResponseCode(doSparqlQuery(opts))); - queryResult = repo.tupleResults(o, queryId); + queryResult = repo.tupleResults(o, queryId, null); } finally { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -32,8 +32,16 @@ import java.io.InputStream; import java.io.Reader; import java.net.URL; +import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.log4j.Logger; import org.openrdf.model.Graph; @@ -71,6 +79,7 @@ import com.bigdata.rdf.sail.webapp.client.IPreparedBooleanQuery; import com.bigdata.rdf.sail.webapp.client.IPreparedGraphQuery; +import com.bigdata.rdf.sail.webapp.client.IPreparedQueryListener; import com.bigdata.rdf.sail.webapp.client.IPreparedSparqlUpdate; import com.bigdata.rdf.sail.webapp.client.IPreparedTupleQuery; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; @@ -94,7 +103,8 @@ * setting a binding. * TODO Support baseURIs */ -public class BigdataSailRemoteRepositoryConnection implements RepositoryConnection { +public class BigdataSailRemoteRepositoryConnection + implements RepositoryConnection, IPreparedQueryListener { private static final transient Logger log = Logger .getLogger(BigdataSailRemoteRepositoryConnection.class); @@ -107,7 +117,140 @@ this.repo = repo; } + + /** + * A concurrency-managed list of running query ids. + */ + private final Map<UUID, UUID> queryIds = new ConcurrentHashMap<UUID, UUID>(); + + /** + * Manage access to the query ids. + */ + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + /** + * Cancel the specified query. + * + * @param queryId + * The query id. + * @throws Exception + */ + public void cancel(final UUID queryId) throws Exception { + + lock.readLock().lock(); + + try { + + repo.getRemoteRepository().cancel(queryId); + queryIds.remove(queryId); + + if (log.isDebugEnabled()) { + log.debug("Query cancelled: " + queryId); + log.debug("Queries running: " + Arrays.toString(queryIds.keySet().toArray())); + } + + } finally { + lock.readLock().unlock(); + } + + } + + /** + * Cancel all queries started by this connection that have not completed + * yet at the time of this request. + * + * @param queryId + * The query id. + * @throws Exception + */ + public void cancelAll() throws Exception { + + lock.writeLock().lock(); + + try { + + final RemoteRepository repo = this.repo.getRemoteRepository(); + + for (UUID queryId : queryIds.keySet()) { + repo.cancel(queryId); + } + queryIds.clear(); + + if (log.isDebugEnabled()) { + log.debug("All queries cancelled."); + log.debug("Queries running: " + Arrays.toString(queryIds.keySet().toArray())); + } + + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Return a list of all queries initiated by this connection that have + * not completed. + * @return + */ + public Set<UUID> getQueryIds() { + + lock.readLock().lock(); + + try { + return Collections.unmodifiableSet(queryIds.keySet()); + } finally { + lock.readLock().unlock(); + } + } + + /** + * Callback from the query evaluation object that the query result has been + * closed (the query either completed or was already cancelled). + * + * @param queryId + * The query id. + */ + public void closed(final UUID queryId) { + + lock.readLock().lock(); + + try { + + queryIds.remove(queryId); + + if (log.isDebugEnabled()) { + log.debug("Query completed normally: " + queryId); + log.debug("Queries running: " + Arrays.toString(queryIds.keySet().toArray())); + } + + } finally { + lock.readLock().unlock(); + } + } + + /** + * Add a newly launched query id. + * + * @param queryId + * The query id. + */ + public void addQueryId(final UUID queryId) { + + lock.readLock().lock(); + + try { + + queryIds.put(queryId, queryId); + + if (log.isDebugEnabled()) { + log.debug("Query started: " + queryId); Thread.dumpStack(); + log.debug("Queries running: " + Arrays.toString(queryIds.keySet().toArray())); + } + + } finally { + lock.readLock().unlock(); + } + } + public long count(final Resource s, final URI p, final Value o, final Resource... c) throws RepositoryException { @@ -135,9 +278,17 @@ final RemoteRepository remote = repo.getRemoteRepository(); - final GraphQueryResult src = - remote.getStatements(s, p, o, includeInferred, c); + final IPreparedGraphQuery query = + remote.getStatements2(s, p, o, includeInferred, c); + + /* + * Add to the list of running queries. Will be later removed + * via the IPreparedQueryListener callback. + */ + addQueryId(query.getQueryId()); + final GraphQueryResult src = query.evaluate(this); + /* * Well this was certainly annoying. is there a better way? */ @@ -209,7 +360,7 @@ } @Override - public BooleanQuery prepareBooleanQuery(final QueryLanguage ql, + public RemoteBooleanQuery prepareBooleanQuery(final QueryLanguage ql, final String query) throws RepositoryException, MalformedQueryException { @@ -225,90 +376,14 @@ final IPreparedBooleanQuery q = remote.prepareBooleanQuery(query); - /* - * Only supports evaluate() right now. - */ - return new BooleanQuery() { - - @Override - public boolean evaluate() throws QueryEvaluationException { - try { - return q.evaluate(); - } catch (Exception ex) { - throw new QueryEvaluationException(ex); - } - } - - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) - */ - @Override - public int getMaxQueryTime() { - - final long millis = q.getMaxQueryMillis(); - - if (millis == -1) { - // Note: -1L is returned if the http header is not specified. - return -1; - - } - - return (int) TimeUnit.MILLISECONDS.toSeconds(millis); - - } - - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) - */ - @Override - public void setMaxQueryTime(final int seconds) { - - q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); - - } - - @Override - public void clearBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public BindingSet getBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public Dataset getDataset() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean getIncludeInferred() { - throw new UnsupportedOperationException(); - } - - @Override - public void removeBinding(String arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setBinding(String arg0, Value arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public void setDataset(Dataset arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setIncludeInferred(boolean arg0) { - throw new UnsupportedOperationException(); - } - - }; - + /* + * Add to the list of running queries. Will be later removed + * via the IPreparedQueryListener callback. + */ + addQueryId(q.getQueryId()); + + return new RemoteBooleanQuery(q); + } catch (Exception ex) { throw new RepositoryException(ex); @@ -345,98 +420,14 @@ final RemoteRepository remote = repo.getRemoteRepository(); final IPreparedGraphQuery q = remote.prepareGraphQuery(query); - - /* - * Only supports evaluate() right now. - */ - return new GraphQuery() { - - @Override - public GraphQueryResult evaluate() throws QueryEvaluationException { - try { - return q.evaluate(); - } catch (Exception ex) { - throw new QueryEvaluationException(ex); - } - } - - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on - * remote query) - */ - @Override - public int getMaxQueryTime() { - final long millis = q.getMaxQueryMillis(); - - if (millis == -1) { - // Note: -1L is returned if the http header is not specified. - return -1; - - } - - return (int) TimeUnit.MILLISECONDS.toSeconds(millis); - - } - - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on - * remote query) - */ - @Override - public void setMaxQueryTime(final int seconds) { - - q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); - - } - - @Override - public void clearBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public BindingSet getBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public Dataset getDataset() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean getIncludeInferred() { - throw new UnsupportedOperationException(); - } - - @Override - public void removeBinding(String arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setBinding(String arg0, Value arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public void setDataset(Dataset arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setIncludeInferred(boolean arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void evaluate(RDFHandler arg0) - throws QueryEvaluationException, RDFHandlerException { - throw new UnsupportedOperationException(); - } - - }; + /* + * Add to the list of running queries. Will be later removed + * via the IPreparedQueryListener callback. + */ + addQueryId(q.getQueryId()); + + return new RemoteGraphQuery(q); } catch (Exception ex) { @@ -494,98 +485,14 @@ final RemoteRepository remote = repo.getRemoteRepository(); final IPreparedTupleQuery q = remote.prepareTupleQuery(query); - - /* - * Only supports evaluate() right now. - */ - return new TupleQuery() { - - @Override - public TupleQueryResult evaluate() throws QueryEvaluationException { - try { - return q.evaluate(); - } catch (Exception ex) { - throw new QueryEvaluationException(ex); - } - } - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on - * remote query) - */ - @Override - public int getMaxQueryTime() { - - final long millis = q.getMaxQueryMillis(); - - if (millis == -1) { - // Note: -1L is returned if the http header is not specified. - return -1; - - } - - return (int) TimeUnit.MILLISECONDS.toSeconds(millis); - - } - - /** - * @see http://trac.bigdata.com/ticket/914 (Set timeout on - * remote query) - */ - @Override - public void setMaxQueryTime(final int seconds) { - - q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); - - } - - @Override - public void clearBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public BindingSet getBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public Dataset getDataset() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean getIncludeInferred() { - throw new UnsupportedOperationException(); - } - - @Override - public void removeBinding(String arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setBinding(String arg0, Value arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public void setDataset(Dataset arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setIncludeInferred(boolean arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void evaluate(TupleQueryResultHandler arg0) - throws QueryEvaluationException { - throw new UnsupportedOperationException(); - } - - }; + /* + * Add to the list of running queries. Will be later removed + * via the IPreparedQueryListener callback. + */ + addQueryId(q.getQueryId()); + + return new RemoteTupleQuery(q); } catch (Exception ex) { @@ -982,58 +889,7 @@ /* * Only execute() is currently supported. */ - return new Update() { - - @Override - public void execute() throws UpdateExecutionException { - try { - update.evaluate(); - } catch (Exception ex) { - throw new UpdateExecutionException(ex); - } - } - - @Override - public void clearBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public BindingSet getBindings() { - throw new UnsupportedOperationException(); - } - - @Override - public Dataset getDataset() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean getIncludeInferred() { - throw new UnsupportedOperationException(); - } - - @Override - public void removeBinding(String arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setBinding(String arg0, Value arg1) { - throw new UnsupportedOperationException(); - } - - @Override - public void setDataset(Dataset arg0) { - throw new UnsupportedOperationException(); - } - - @Override - public void setIncludeInferred(boolean arg0) { - throw new UnsupportedOperationException(); - } - - }; + return new RemoteUpdate(update); } catch (Exception ex) { @@ -1096,5 +952,351 @@ public ValueFactory getValueFactory() { throw new UnsupportedOperationException(); } + + public class RemoteTupleQuery implements TupleQuery { + + private final IPreparedTupleQuery q; + + public RemoteTupleQuery(final IPreparedTupleQuery q) { + this.q = q; + } + + public UUID getQueryId() { + return q.getQueryId(); + } + + @Override + public TupleQueryResult evaluate() throws QueryEvaluationException { + try { + return q.evaluate(BigdataSailRemoteRepositoryConnection.this); + } catch (Exception ex) { + throw new QueryEvaluationException(ex); + } + } + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public int getMaxQueryTime() { + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public void setMaxQueryTime(final int seconds) { + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + } + + @Override + public void clearBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public BindingSet getBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public Dataset getDataset() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getIncludeInferred() { + throw new UnsupportedOperationException(); + } + + @Override + public void removeBinding(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setBinding(String arg0, Value arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDataset(Dataset arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setIncludeInferred(boolean arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void evaluate(TupleQueryResultHandler arg0) + throws QueryEvaluationException { + throw new UnsupportedOperationException(); + } + + } + + public class RemoteGraphQuery implements GraphQuery { + + private final IPreparedGraphQuery q; + + public RemoteGraphQuery(final IPreparedGraphQuery q) { + this.q = q; + } + + public UUID getQueryId() { + return q.getQueryId(); + } + + @Override + public GraphQueryResult evaluate() throws QueryEvaluationException { + try { + return q.evaluate(BigdataSailRemoteRepositoryConnection.this); + } catch (Exception ex) { + throw new QueryEvaluationException(ex); + } + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public int getMaxQueryTime() { + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on + * remote query) + */ + @Override + public void setMaxQueryTime(final int seconds) { + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + } + + @Override + public void clearBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public BindingSet getBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public Dataset getDataset() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getIncludeInferred() { + throw new UnsupportedOperationException(); + } + + @Override + public void removeBinding(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setBinding(String arg0, Value arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDataset(Dataset arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setIncludeInferred(boolean arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void evaluate(RDFHandler arg0) + throws QueryEvaluationException, RDFHandlerException { + throw new UnsupportedOperationException(); + } + + } + + public class RemoteBooleanQuery implements BooleanQuery { + + private final IPreparedBooleanQuery q; + + public RemoteBooleanQuery(final IPreparedBooleanQuery q) { + this.q = q; + } + + public UUID getQueryId() { + return q.getQueryId(); + } + + @Override + public boolean evaluate() throws QueryEvaluationException { + try { + return q.evaluate(BigdataSailRemoteRepositoryConnection.this); + } catch (Exception ex) { + throw new QueryEvaluationException(ex); + } + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + @Override + public int getMaxQueryTime() { + + final long millis = q.getMaxQueryMillis(); + + if (millis == -1) { + // Note: -1L is returned if the http header is not specified. + return -1; + + } + + return (int) TimeUnit.MILLISECONDS.toSeconds(millis); + + } + + /** + * @see http://trac.bigdata.com/ticket/914 (Set timeout on remote query) + */ + @Override + public void setMaxQueryTime(final int seconds) { + q.setMaxQueryMillis(TimeUnit.SECONDS.toMillis(seconds)); + } + + @Override + public void clearBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public BindingSet getBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public Dataset getDataset() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getIncludeInferred() { + throw new UnsupportedOperationException(); + } + + @Override + public void removeBinding(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setBinding(String arg0, Value arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDataset(Dataset arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setIncludeInferred(boolean arg0) { + throw new UnsupportedOperationException(); + } + + } + + public class RemoteUpdate implements Update { + + private final IPreparedSparqlUpdate q; + + public RemoteUpdate(final IPreparedSparqlUpdate q) { + this.q = q; + } + + public UUID getQueryId() { + return q.getQueryId(); + } + + @Override + public void execute() throws UpdateExecutionException { + try { + q.evaluate(BigdataSailRemoteRepositoryConnection.this); + } catch (Exception ex) { + throw new UpdateExecutionException(ex); + } + } + + @Override + public void clearBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public BindingSet getBindings() { + throw new UnsupportedOperationException(); + } + + @Override + public Dataset getDataset() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getIncludeInferred() { + throw new UnsupportedOperationException(); + } + + @Override + public void removeBinding(String arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setBinding(String arg0, Value arg1) { + throw new UnsupportedOperationException(); + } + + @Override + public void setDataset(Dataset arg0) { + throw new UnsupportedOperationException(); + } + + @Override + public void setIncludeInferred(boolean arg0) { + throw new UnsupportedOperationException(); + } + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedBooleanQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedBooleanQuery.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedBooleanQuery.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -35,6 +35,25 @@ */ public interface IPreparedBooleanQuery extends IPreparedQuery { + /** + * Evaluate the boolean query. + * + * @param listener + * The query listener. + * @return The result. + * + * @throws Exception + */ boolean evaluate() throws Exception; + /** + * Evaluate the boolean query, notify the specified listener when complete. + * + * @param listener + * The query listener. + * @return The result. + * + * @throws Exception + */ + boolean evaluate(IPreparedQueryListener listener) throws Exception; } \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedGraphQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedGraphQuery.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedGraphQuery.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -37,6 +37,24 @@ */ public interface IPreparedGraphQuery extends IPreparedQuery { + /** + * Evaluate the graph query. + * + * @return The result. + * + * @throws Exception + */ GraphQueryResult evaluate() throws Exception; + /** + * Evaluate the graph query, notify the specified listener when complete. + * + * @param listener + * The query listener. + * @return The result. + * + * @throws Exception + */ + GraphQueryResult evaluate(IPreparedQueryListener listener) + throws Exception; } \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQueryListener.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQueryListener.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQueryListener.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -0,0 +1,45 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-Infinity. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.sail.webapp.client; + +import java.util.UUID; + +/** + * A listener for IPreparedQuery evaluate objects. + */ +public interface IPreparedQueryListener { + + /** + * Callback method from the query evaluation object (GraphQueryResult, + * TupleQueryResult, BooleanQueryResult) notifying that the result object + * has been closed and the query has either completed or been + * cancelled. + * + * @param uuid + * The query id. + */ + void closed(final UUID queryId); + +} Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQueryListener.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedSparqlUpdate.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedSparqlUpdate.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedSparqlUpdate.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -37,6 +37,16 @@ void evaluate() throws Exception; + /** + * Evaluate and notify the specified listener when complete. + * + * @param listener + * The query listener. + * + * @throws Exception + */ + void evaluate(IPreparedQueryListener listener) throws Exception; + UUID getQueryId(); } \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedTupleQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedTupleQuery.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedTupleQuery.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -46,4 +46,16 @@ */ public TupleQueryResult evaluate() throws Exception; + /** + * Evaluate the tuple query, notify the specified listener when complete. + * + * @param listener + * The query listener. + * @return The result. + * + * @throws Exception + */ + public TupleQueryResult evaluate(IPreparedQueryListener listener) + throws Exception; + } \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -521,7 +521,7 @@ // checkResponseCode(response = doConnect(opts)); - return graphResults(opts, null); + return graphResults(opts, null, null); } @@ -600,7 +600,7 @@ * * TODO includeInferred is currently ignored. */ - public GraphQueryResult getStatements(final Resource subj, final URI pred, + public IPreparedGraphQuery getStatements2(final Resource subj, final URI pred, final Value obj, final boolean includeInferred, final Resource... contexts) throws Exception { @@ -673,7 +673,28 @@ final IPreparedGraphQuery query = prepareGraphQuery(queryStr); - return query.evaluate(); + return query; + + } + + /** + * Return all matching statements. + * + * @param subj + * @param pred + * @param obj + * @param includeInferred + * @param contexts + * @return + * @throws Exception + * + * TODO includeInferred is currently ignored. + */ + public GraphQueryResult getStatements(final Resource subj, final URI pred, + final Value obj, final boolean includeInferred, + final Resource... contexts) throws Exception { + + return getStatements2(subj, pred, obj, includeInferred, contexts).evaluate(); } @@ -1238,9 +1259,17 @@ @Override public TupleQueryResult evaluate() throws Exception { + return evaluate(null); + + } + + @Override + public TupleQueryResult evaluate(final IPreparedQueryListener listener) + throws Exception { + setupConnectOptions(); - return tupleResults(opts, getQueryId()); + return tupleResults(opts, getQueryId(), listener); } @@ -1268,9 +1297,17 @@ @Override public GraphQueryResult evaluate() throws Exception { + return evaluate(null); + + } + + @Override + public GraphQueryResult evaluate(final IPreparedQueryListener listener) + throws Exception { + setupConnectOptions(); - return graphResults(opts, getQueryId()); + return graphResults(opts, getQueryId(), listener); } @@ -1300,9 +1337,17 @@ @Override public boolean evaluate() throws Exception { + return evaluate(null); + + } + + @Override + public boolean evaluate(final IPreparedQueryListener listener) + throws Exception { + setupConnectOptions(); - return booleanResults(opts, getQueryId()); + return booleanResults(opts, getQueryId(), listener); // HttpResponse response = null; // try { @@ -1344,7 +1389,15 @@ @Override public void evaluate() throws Exception { + + evaluate(null); + + } + @Override + public void evaluate(final IPreparedQueryListener listener) + throws Exception { + HttpResponse response = null; try { @@ -1367,6 +1420,10 @@ } + if (listener != null) { + listener.closed(getQueryId()); + } + } } @@ -1797,14 +1854,18 @@ * * @param response * The connection from which to read the results. + * @param listener + * The listener to notify when the query result has been + * closed (optional). * * @return The results. * * @throws Exception * If anything goes wrong. */ - public TupleQueryResult tupleResults(final ConnectOptions opts, final UUID queryId) - throws Exception { + public TupleQueryResult tupleResults(final ConnectOptions opts, + final UUID queryId, final IPreparedQueryListener listener) + throws Exception { HttpResponse response = null; HttpEntity entity = null; @@ -1893,6 +1954,13 @@ } + /* + * Notify the listener. + */ + if (listener != null) { + listener.closed(queryId); + } + } }; @@ -1935,6 +2003,9 @@ * * @param response * The connection from which to read the results. + * @param listener + * The listener to notify when the query result has been + * closed (optional). * * @return The graph * @@ -1942,7 +2013,8 @@ * If anything goes wrong. */ public GraphQueryResult graphResults(final ConnectOptions opts, - final UUID queryId) throws Exception { + final UUID queryId, final IPreparedQueryListener listener) + throws Exception { HttpResponse response = null; HttpEntity entity = null; @@ -2047,6 +2119,10 @@ } + if (listener != null) { + listener.closed(queryId); + } + } }; @@ -2099,7 +2175,9 @@ * If anything goes wrong, including if the result set does not * encode a single boolean value. */ - protected boolean booleanResults(final ConnectOptions opts, final UUID queryId) throws Exception { + protected boolean booleanResults(final ConnectOptions opts, + final UUID queryId, final IPreparedQueryListener listener) + throws Exception { HttpResponse response = null; HttpEntity entity = null; @@ -2150,6 +2228,10 @@ cancel(queryId); } catch (Exception ex) {log.warn(ex); } } + + if (listener != null) { + listener.closed(queryId); + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-09-26 13:03:10 UTC (rev 8664) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2014-09-26 14:26:04 UTC (rev 8665) @@ -214,7 +214,7 @@ opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER); - return graphResults(opts, null/* queryId */); + return graphResults(opts, null/* queryId */, null); // try { // // check response in try. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2014-09-29 18:53:25
|
Revision: 8670 http://sourceforge.net/p/bigdata/code/8670 Author: jeremy_carroll Date: 2014-09-29 18:53:17 +0000 (Mon, 29 Sep 2014) Log Message: ----------- Addressing memory leaks in ProtocolTests, trac 1015. Adding method tearDownSuite to close each mode of the protocol test, also explicitly clearing the standaloneQEcache which, in your kit, appeared to be the source of some of the leaks. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -71,8 +71,15 @@ private static ConcurrentWeakValueCache<IBTreeManager, QueryEngine> standaloneQECache = new ConcurrentWeakValueCache<IBTreeManager, QueryEngine>( 0/* queueCapacity */ ); - /** + * During testing the standaloneQECache can be a source of memory leaks, this method clears it. + */ + public static void clearStandAloneQECacheDuringTesting() { + standaloneQECache = new ConcurrentWeakValueCache<IBTreeManager, QueryEngine>( + 0/* queueCapacity */ + ); + } + /** * Weak value cache to enforce the singleton pattern for * {@link IBigdataClient}s (the data services are query engine peers rather * than controllers and handle their own query engine initialization so as Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractIndexManagerTestCase.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -144,5 +144,8 @@ } } + + public void tearDownAfterSuite() { + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -86,7 +86,7 @@ */ final String update = update(); - final HttpServlet servlet; + HttpServlet servlet; HttpClient client; private String responseContentType = null; private String accept = null; @@ -133,6 +133,7 @@ public void tearDown() throws Exception { client.getConnectionManager().shutdown(); client = null; + servlet = null; super.tearDown(); } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -29,6 +29,9 @@ import java.util.Enumeration; import java.util.regex.Pattern; +import com.bigdata.bop.fed.QueryEngineFactory; + +import junit.extensions.TestSetup; import junit.extensions.proxy.ProxyTestSuite; import junit.framework.Test; import junit.framework.TestCase; @@ -64,14 +67,21 @@ private static class MultiModeTestSuite extends TestSuite { private final ProxyTestSuite subs[]; - + public MultiModeTestSuite(String name, TestMode ...modes ) { super(name); subs = new ProxyTestSuite[modes.length]; int i = 0; for (final TestMode mode: modes) { final ProxyTestSuite suite2 = TestNanoSparqlServerWithProxyIndexManager.createProxyTestSuite(TestNanoSparqlServerWithProxyIndexManager.getTemporaryJournal(),mode); - super.addTest(suite2); + super.addTest(new TestSetup(suite2) { + protected void setUp() throws Exception { + } + protected void tearDown() throws Exception { + suite2.tearDownSuite(); + QueryEngineFactory.clearStandAloneQECacheDuringTesting(); + } + }); suite2.setName(mode.name()); subs[i++] = suite2; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -533,4 +533,10 @@ } + @Override + public void tearDownAfterSuite() { + this.m_indexManager.destroy(); + this.m_indexManager = null; + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java 2014-09-29 18:06:28 UTC (rev 8669) +++ branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java 2014-09-29 18:53:17 UTC (rev 8670) @@ -25,6 +25,8 @@ import org.apache.log4j.Logger; +import com.bigdata.rdf.sail.webapp.AbstractIndexManagerTestCase; + /** * <p> * A simple wrapper around {@link TestSuite} that permits the caller to specify @@ -75,7 +77,7 @@ * {@link ProxyTestSuite}. */ - private final Test m_delegate; + private Test m_delegate; /** * <p> @@ -367,5 +369,13 @@ } } + + public void tearDownSuite() { + if (m_delegate instanceof AbstractIndexManagerTestCase) { + ((AbstractIndexManagerTestCase)m_delegate).tearDownAfterSuite(); + } + m_delegate = null; + + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2014-09-29 23:30:19
|
Revision: 8672 http://sourceforge.net/p/bigdata/code/8672 Author: jeremy_carroll Date: 2014-09-29 23:29:59 +0000 (Mon, 29 Sep 2014) Log Message: ----------- commit r8670 did not compile correctly from ant because of layering issues. This commit backs out problematic code and replaces it Revision Links: -------------- http://sourceforge.net/p/bigdata/code/8670 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-29 22:40:46 UTC (rev 8671) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-29 23:29:59 UTC (rev 8672) @@ -76,7 +76,7 @@ protected void setUp() throws Exception { } protected void tearDown() throws Exception { - suite2.tearDownSuite(); + ((TestNanoSparqlServerWithProxyIndexManager)suite2.getDelegate()).tearDownAfterSuite(); /* * Note: Do not clear. Will not leak unless the * QueryEngine objects are pinned. They will not be Modified: branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java 2014-09-29 22:40:46 UTC (rev 8671) +++ branches/BIGDATA_RELEASE_1_3_0/junit-ext/src/java/junit/extensions/proxy/ProxyTestSuite.java 2014-09-29 23:29:59 UTC (rev 8672) @@ -77,7 +77,7 @@ * {@link ProxyTestSuite}. */ - private Test m_delegate; + private final Test m_delegate; /** * <p> @@ -370,12 +370,6 @@ } - public void tearDownSuite() { - if (m_delegate instanceof AbstractIndexManagerTestCase) { - ((AbstractIndexManagerTestCase)m_delegate).tearDownAfterSuite(); - } - m_delegate = null; - - } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2014-09-30 15:57:06
|
Revision: 8673 http://sourceforge.net/p/bigdata/code/8673 Author: jeremy_carroll Date: 2014-09-30 15:57:00 +0000 (Tue, 30 Sep 2014) Log Message: ----------- Completing excision of clearStandAloneQECacheDuringTesting Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java 2014-09-29 23:29:59 UTC (rev 8672) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/fed/QueryEngineFactory.java 2014-09-30 15:57:00 UTC (rev 8673) @@ -71,15 +71,8 @@ private static ConcurrentWeakValueCache<IBTreeManager, QueryEngine> standaloneQECache = new ConcurrentWeakValueCache<IBTreeManager, QueryEngine>( 0/* queueCapacity */ ); + /** - * During testing the standaloneQECache can be a source of memory leaks, this method clears it. - */ - public static void clearStandAloneQECacheDuringTesting() { - standaloneQECache = new ConcurrentWeakValueCache<IBTreeManager, QueryEngine>( - 0/* queueCapacity */ - ); - } - /** * Weak value cache to enforce the singleton pattern for * {@link IBigdataClient}s (the data services are query engine peers rather * than controllers and handle their own query engine initialization so as Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-29 23:29:59 UTC (rev 8672) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java 2014-09-30 15:57:00 UTC (rev 8673) @@ -81,7 +81,8 @@ * Note: Do not clear. Will not leak unless the * QueryEngine objects are pinned. They will not be * pinned if you shutdown the Journal correctly for each - * test. + * test; the call to tearDownAfterSuite above calls the destroy() method + * on temporary journals, which appears to do the necessary thing. */ // QueryEngineFactory.clearStandAloneQECacheDuringTesting(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-10-28 13:17:45
|
Revision: 8692 http://sourceforge.net/p/bigdata/code/8692 Author: thompsonbry Date: 2014-10-28 13:17:38 +0000 (Tue, 28 Oct 2014) Log Message: ----------- Pushing critical bug fixes for 1.3.3 RELEASE. #1021 Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback() (critical bug fix) #1026 SPARQL UPDATE with runtime errors causes problems with lexicon indices (critical bug fix) #1029 RWStore commit state not correctly rolled back if abort fails on empty journal (minor issue) #1030 RWStorage stats cleanup (minor issue) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestJournalAbort.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-10-28 13:12:49 UTC (rev 8691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -553,6 +553,16 @@ * @see #getName2Addr() */ private volatile Name2Addr _name2Addr; + + /** + * An atomic state specifying whether a clean abort is required. This is set + * to true by critical section code in the _abort if it does not complete cleanly. + * <p> + * It is checked in the commit() method ensure updates are protected. + * + * @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + */ + private final AtomicBoolean abortRequired = new AtomicBoolean(false); /** * Return the "live" BTree mapping index names to the last metadata record @@ -2745,6 +2755,8 @@ final WriteLock lock = _fieldReadWriteLock.writeLock(); lock.lock(); + // @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + boolean success = false; try { @@ -2757,6 +2769,8 @@ if (_bufferStrategy == null) { // Nothing to do. + success = true; + return; } @@ -2896,8 +2910,12 @@ if (log.isInfoEnabled()) log.info("done"); + + success = true; // mark successful abort. } finally { + // @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + abortRequired.set(!success); lock.unlock(); @@ -3049,6 +3067,10 @@ @Override public long commit() { + + // Critical Section Check. @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + if (abortRequired.get()) // FIXME Move this into commitNow() after tagging hot fix.(mark to maek sure this gets done). + throw new IllegalStateException("Commit cannot be called, a call to abort must be made before further updates"); // The timestamp to be assigned to this commit point. final long commitTime = nextCommitTimestamp(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-10-28 13:12:49 UTC (rev 8691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -3007,6 +3007,29 @@ * (RWStore does not discard deferred deletes on reset) */ m_deferredFreeOut.reset(); + + /* + * Reset any storage stats + * FIXME: Change StorageStats internals to be able to efficiently commit/reset and avoid disk read + */ + if (m_storageStatsAddr != 0) { + final long statsAddr = m_storageStatsAddr >> 16; + final int statsLen = ((int) m_storageStatsAddr) & 0xFFFF; + final byte[] stats = new byte[statsLen + 4]; // allow for checksum + getData(statsAddr, stats); + final DataInputStream instr = new DataInputStream(new ByteArrayInputStream(stats)); + try { + m_storageStats = new StorageStats(instr); + for (FixedAllocator fa: m_allocs) { + m_storageStats.register(fa); + } + } catch (IOException e) { + throw new RuntimeException("Unable to reset storage stats", e); + } + } else { + m_storageStats = new StorageStats(m_allocSizes); + } + } catch (Exception e) { throw new IllegalStateException("Unable to reset the store", e); } finally { @@ -3156,7 +3179,7 @@ /** Reset pre-commit state to support reset/abort/rollback. */ void reset() { - if (!m_allocationWriteLock.isHeldByCurrentThread()) + if (!m_allocationWriteLock.isHeldByCurrentThread()) throw new IllegalMonitorStateException(); RWStore.this.m_storageStatsAddr = m_storageStatsAddr; RWStore.this.m_committedNextAllocation = m_lastCommittedNextAllocation; Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestJournalAbort.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestJournalAbort.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestJournalAbort.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -0,0 +1,240 @@ +package com.bigdata.journal; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +import junit.framework.TestCase2; + +import com.bigdata.btree.BTree; +import com.bigdata.btree.IndexMetadata; +import com.bigdata.journal.Journal.Options; +import com.bigdata.rawstore.Bytes; +import com.bigdata.rwstore.RWStore; +import com.bigdata.util.InnerCause; + +/** + * Test suite for a failure to handle errors inside of abort() by marking the + * journal as requiring abort(). + * + * @see #1021 (Add critical section protection to AbstractJournal.abort() and + * BigdataSailConnection.rollback()) + * + * @author martyncutcher + * + * TODO Thia should be a proxied test suite. It is RWStore specific. + */ +public class TestJournalAbort extends TestCase2 { + + /** + * + */ + public TestJournalAbort() { + } + + /** + * @param name + */ + public TestJournalAbort(String name) { + super(name); + } + + @Override + public void setUp() throws Exception { + + super.setUp(); + + } + + @Override + public void tearDown() throws Exception { + + TestHelper.checkJournalsClosed(this); + + super.tearDown(); + } + + @Override + public Properties getProperties() { + File file; + try { + file = File.createTempFile(getName(), Options.JNL); + file.deleteOnExit(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + final Properties properties = new Properties(); + + properties.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString()); + + properties.setProperty(Options.FILE, file.toString()); + + properties.setProperty(Journal.Options.INITIAL_EXTENT, "" + + Bytes.megabyte * 10); + + return properties; + + } + + static private class AbortException extends RuntimeException { + private static final long serialVersionUID = 1L; + + AbortException(String msg) { + super(msg); + } + } + + /** + * In this test we want to run through some data inserts, commits and aborts. + * + * The overridden Journal will fail to abort correctly by overriding + * the discardcommitters method that AbstractJournal calls after calling bufferStragey.reset(). + * + * @throws InterruptedException + */ + public void test_simpleAbortFailure() throws InterruptedException { + + // Define atomic to control whether abort should succeed or fail + final AtomicBoolean succeed = new AtomicBoolean(true); + + final Journal jnl = new Journal(getProperties()) { + @Override + protected void discardCommitters() { + + if (succeed.get()) { + super.discardCommitters(); + } else { + throw new AbortException("Something wrong"); + } + + } + }; + + final RWStrategy strategy = (RWStrategy) jnl.getBufferStrategy(); + final RWStore store = strategy.getStore(); + + final String btreeName = "TestBTreeAbort"; + + // 1) Create and commit some data + // 2) Create more data and Abort success + // 4) Create and commit more data (should work) + // 3) Create more data and Abort fail + // 4) Create and commit more data (should fail) + + BTree btree = createBTree(jnl); + + jnl.registerIndex(btreeName, btree); + + btree.writeCheckpoint(); + jnl.commit(); + + System.out.println("Start Commit Counter: " + jnl.getCommitRecord().getCommitCounter()); + // 1) Add some data and commit + addSomeData(btree); + btree.writeCheckpoint(); + jnl.commit(); + System.out.println("After Data Commit Counter: " + jnl.getCommitRecord().getCommitCounter()); + + btree.close(); // force re-open + + btree = jnl.getIndex(btreeName); + + addSomeData(btree); + btree.writeCheckpoint(); + jnl.commit(); + + + // Show Allocators + final StringBuilder sb1 = new StringBuilder(); + store.showAllocators(sb1); + + if(log.isInfoEnabled()) log.info(sb1.toString()); + + // 2) Add more data and abort + if(log.isInfoEnabled()) log.info("Pre Abort Commit Counter: " + jnl.getCommitRecord().getCommitCounter()); + btree.close(); // force re-open + addSomeData(btree); + btree.writeCheckpoint(); + jnl.abort(); + if(log.isInfoEnabled()) log.info("Post Abort Commit Counter: " + jnl.getCommitRecord().getCommitCounter()); + + btree.close(); // force re-open after abort + btree = jnl.getIndex(btreeName); + + // Show Allocators again (should be the same visually) + final StringBuilder sb2 = new StringBuilder(); + store.showAllocators(sb2); + + if(log.isInfoEnabled()) log.info("After Abort\n" + sb2.toString()); + + // 3) More data and commit + addSomeData(btree); + btree.writeCheckpoint(); + jnl.commit(); + + // Show Allocators + final StringBuilder sb3 = new StringBuilder(); + store.showAllocators(sb3); + if(log.isInfoEnabled()) log.info("After More Data\n" + sb3.toString()); + + // 4) More data and bad abort + addSomeData(btree); + btree.writeCheckpoint(); + succeed.set(false); + try { + jnl.abort(); + fail(); + } catch (Exception e) { + // Check the Abort was Aborted + assertTrue(InnerCause.isInnerCause(e, AbortException.class)); + // good, let's see what state it is in now + } + + btree.close(); + + // 5) More data and bad commit (after bad abort) + try { + addSomeData(btree); + btree.writeCheckpoint(); + jnl.commit(); + fail(); + } catch (Exception e) { + + if(log.isInfoEnabled()) log.info("Expected exception", e); + + succeed.set(true); + jnl.abort(); // successful abort! + } + + btree = jnl.getIndex(btreeName); + + // 6) More data and good commit (after good abort) + addSomeData(btree); + btree.writeCheckpoint(); + jnl.commit(); + } + + private void addSomeData(final BTree btree) { + + final Random r = new Random(); + + for (int n = 0; n < 2000; n++) { + final byte[] key = new byte[64]; + final byte[] value = new byte[256]; + r.nextBytes(key); + r.nextBytes(value); + btree.insert(key, value); + } + } + + private BTree createBTree(final Journal store) { + final IndexMetadata metadata = new IndexMetadata(UUID.randomUUID()); + + return BTree.create(store, metadata); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-10-28 13:12:49 UTC (rev 8691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -62,6 +62,7 @@ import com.bigdata.journal.Journal; import com.bigdata.journal.Journal.Options; import com.bigdata.journal.RWStrategy; +import com.bigdata.journal.TestJournalAbort; import com.bigdata.journal.TestJournalBasics; import com.bigdata.journal.VerifyCommitRecordIndex; import com.bigdata.rawstore.AbstractRawStoreTestCase; @@ -126,10 +127,20 @@ */ suite.addTest(TestJournalBasics.suite()); + /* + * TODO This should be a proxied test suite. It is RWStore specific + * right now. + * + * @see #1021 (Add critical section protection to + * AbstractJournal.abort() and BigdataSailConnection.rollback()) + */ + suite.addTestSuite(TestJournalAbort.class); + return suite; } + @Override public Properties getProperties() { final Properties properties = super.getProperties(); @@ -2178,12 +2189,12 @@ * not robust to internal failure.</a> */ public void test_commitStateError() { - Journal store = (Journal) getStore(); + final Journal store = (Journal) getStore(); try { - RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final RWStrategy bs = (RWStrategy) store.getBufferStrategy(); - RWStore rws = bs.getStore(); + final RWStore rws = bs.getStore(); final long addr = bs.write(randomData(78)); @@ -2260,10 +2271,10 @@ } public void test_allocCommitFreeWithHistory() { - Journal store = (Journal) getStore(4); + final Journal store = (Journal) getStore(4); try { - RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final RWStrategy bs = (RWStrategy) store.getBufferStrategy(); final long addr = bs.write(randomData(78)); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-10-28 13:12:49 UTC (rev 8691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -70,6 +70,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -1652,7 +1653,14 @@ * those at a time). */ private final boolean unisolated; + + /** + * Critical section support in case rollback is not completed cleanly, in which + * case calls to commit() will fail until a clean rollback() is made. @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + */ + private final AtomicBoolean rollbackRequired = new AtomicBoolean(false); + public String toString() { return getClass().getName() + "{timestamp=" @@ -3150,26 +3158,33 @@ */ @Override public synchronized void rollback() throws SailException { - - assertWritableConn(); - - if (txLog.isInfoEnabled()) - txLog.info("SAIL-ROLLBACK-CONN: " + this); - - // discard buffered assertions and/or retractions. - clearBuffers(); - - // discard the write set. - database.abort(); + // @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + boolean success = false; + try { + assertWritableConn(); + + if (txLog.isInfoEnabled()) + txLog.info("SAIL-ROLLBACK-CONN: " + this); + + // discard buffered assertions and/or retractions. + clearBuffers(); + + // discard the write set. + database.abort(); + + if (changeLog != null) { + + changeLog.transactionAborted(); + + } + + dirty = false; + + success = true; // mark successful rollback + } finally { // @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + rollbackRequired.set(!success); + } - if (changeLog != null) { - - changeLog.transactionAborted(); - - } - - dirty = false; - } /** @@ -3197,6 +3212,13 @@ * was committed. */ public synchronized long commit2() throws SailException { + + /** + * If a call to rollback does not complete cleanly, then rollbackRequired will be set and no updates will be allowed. + * @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) + */ + if (rollbackRequired.get()) + throw new IllegalStateException("Rollback required"); assertWritableConn(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-10-28 13:12:49 UTC (rev 8691) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-10-28 13:17:38 UTC (rev 8692) @@ -350,8 +350,10 @@ * * Note: If the client closes the connection, then the response's * InputStream will be closed and the task will terminate rather than - * running on in the background with a disconnected client. + * running on in the background with a disconnected client. @see #1026 (SPARQL UPDATE with runtime errors causes problems with lexicon indices) */ + final long tx = getBigdataRDFContext().newTx(timestamp); + boolean ok = false; try { final BigdataRDFContext context = getBigdataRDFContext(); @@ -401,11 +403,21 @@ // Wait for the Future. ft.get(); + ok = true; + } catch (Throwable e) { throw BigdataRDFServlet.launderThrowable(e, resp, updateStr); - } + } finally { + + if (!ok) { + // @see #1026 (SPARQL UPDATE with runtime errors causes problems with lexicon indices) + getBigdataRDFContext().abortTx(tx); + + } + + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-11-05 18:28:44
|
Revision: 8704 http://sourceforge.net/p/bigdata/code/8704 Author: thompsonbry Date: 2014-11-05 18:28:32 +0000 (Wed, 05 Nov 2014) Log Message: ----------- Merging from git to SVN. Merge includes: - #946 (Empty PROJECTION causes IllegalArgumentException) - #1008 (remote service queries should put parameters in the request body when using POST) - #1036 (Journal file growth reported with 1.3.3) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/UpdateServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/README.md branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket946.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.trig branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestRWStoreTxBehaviors.java Added: branches/BIGDATA_RELEASE_1_3_0/README.md =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/README.md (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/README.md 2014-11-05 18:28:32 UTC (rev 8704) @@ -0,0 +1,7 @@ +## Welcome to Bigdata + +Please see the release notes in [bigdata/src/releases](bigdata/src/releases) for getting started links. This will point you to the installation instructions for the different deployment modes, the online documentation, the wiki, etc. It will also point you to resources for support, subscriptions, and licensing. + +Please also visit us at [bigdata.com](http://www.bigdata.com). + + Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -123,8 +123,9 @@ if (vars == null) throw new IllegalArgumentException(); - if (vars.length == 0) - throw new IllegalArgumentException(); + // @see #946 (Empty PROJECTION causes IllegalArgumentException) +// if (vars.length == 0) +// throw new IllegalArgumentException(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -3069,7 +3069,7 @@ public long commit() { // Critical Section Check. @see #1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) - if (abortRequired.get()) // FIXME Move this into commitNow() after tagging hot fix.(mark to maek sure this gets done). + if (abortRequired.get()) // FIXME Move this into commitNow() after tagging hot fix. throw new IllegalStateException("Commit cannot be called, a call to abort must be made before further updates"); // The timestamp to be assigned to this commit point. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -6345,6 +6345,25 @@ } /** + * Debug ONLY method added to permit unit tests to be written that the + * native transaction counter is correctly decremented to zero. The returned + * value is ONLY valid while holding the {@link #m_allocationLock}. + * Therefore this method MAY NOT be used reliably outside of code that can + * guarantee that there are no concurrent committers on the {@link RWStore}. + * + * @see <a href="http://trac.bigdata.com/ticket/1036"> Journal file growth + * reported with 1.3.3 </a> + */ + public int getActiveTxCount() { + m_allocationWriteLock.lock(); + try { + return m_activeTxCount; + } finally { + m_allocationWriteLock.unlock(); + } + } + + /** * Returns the slot size associated with this address */ public int getAssociatedSlotSize(int addr) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties 2014-11-05 18:28:32 UTC (rev 8704) @@ -6,6 +6,7 @@ log4j.rootCategory=WARN, dest2 log4j.logger.com.bigdata=WARN +#log4j.logger.com.bigdata.txLog=INFO log4j.logger.com.bigdata.btree=WARN log4j.logger.com.bigdata.counters.History=ERROR log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -1146,6 +1146,7 @@ for (HAGlue service : services) { final HAGlue haGlue = service; assertCondition(new Runnable() { + @Override public void run() { try { assertEquals(expected, haGlue.getRootBlock(req) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/task/AbstractApiTask.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -151,6 +151,9 @@ * a read-only connection. When it is associated with the * {@link ITx#UNISOLATED} view or a read-write transaction, this will be a * mutable connection. + * <p> + * This version uses the namespace and timestamp associated with the HTTP + * request. * * @throws RepositoryException */ @@ -163,6 +166,22 @@ * (unless the query explicitly overrides the timestamp of the view on * which it will operate). */ + return getQueryConnection(namespace, timestamp); + + } + + /** + * This version uses the namespace and timestamp provided by the caller. + + * @param namespace + * @param timestamp + * @return + * @throws RepositoryException + */ + protected BigdataSailRepositoryConnection getQueryConnection( + final String namespace, final long timestamp) + throws RepositoryException { + final AbstractTripleStore tripleStore = getTripleStore(namespace, timestamp); @@ -198,13 +217,15 @@ } /** - * Return an UNISOLATED connection. - * - * @return The UNISOLATED connection. - * - * @throws SailException - * @throws RepositoryException - */ + * Return an UNISOLATED connection. + * + * @return The UNISOLATED connection. + * + * @throws SailException + * @throws RepositoryException + * @throws DatasetNotFoundException + * if the specified namespace does not exist. + */ protected BigdataSailRepositoryConnection getUnisolatedConnection() throws SailException, RepositoryException { @@ -214,7 +235,8 @@ if (tripleStore == null) { - throw new RuntimeException("Not found: namespace=" + namespace); + throw new DatasetNotFoundException("Not found: namespace=" + + namespace); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -171,6 +171,9 @@ // test suite for BIND + GRAPH ticket. suite.addTestSuite(TestBindGraph1007.class); + // test suite for a sub-select with an empty PROJECTION. + suite.addTestSuite(TestTicket946.class); + /* * Runtime Query Optimizer (RTO). */ Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket946.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket946.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTicket946.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -0,0 +1,62 @@ +/** + +Copyright (C) SYSTAP, LLC 2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.eval; + + +/** + * Test suite for an issue where an empty projection causes an + * {@link IllegalArgumentException}. + * + * @see <a href="http://trac.bigdata.com/ticket/946"> Empty PROJECTION causes + * IllegalArgumentException</a> + */ +public class TestTicket946 extends AbstractDataDrivenSPARQLTestCase { + + public TestTicket946() { + } + + public TestTicket946(String name) { + super(name); + } + + /** + * <pre> + * SELECT ?x + * { BIND(1 as ?x) + * { SELECT * { FILTER (true) } } + * } + * </pre> + */ + public void test_ticket_946_empty_projection() throws Exception { + + new TestHelper( + "ticket_946", // testURI, + "ticket_946.rq",// queryFileURL + "ticket_946.trig",// dataFileURL + "ticket_946.srx"// resultFileURL + ).runTest(); + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.rq 2014-11-05 18:28:32 UTC (rev 8704) @@ -0,0 +1,4 @@ +SELECT ?x +{ BIND(1 as ?x) + { SELECT * { FILTER (true) } } +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.srx 2014-11-05 18:28:32 UTC (rev 8704) @@ -0,0 +1,16 @@ +<?xml version="1.0"?> +<sparql + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" + xmlns="http://www.w3.org/2005/sparql-results#" > + <head> + <variable name="?x"/> + </head> + <results> + <result> + <binding name="x"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">1</literal> + </binding> + </result> + </results> +</sparql> \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.trig =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.trig (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_946.trig 2014-11-05 18:28:32 UTC (rev 8704) @@ -0,0 +1 @@ +# No data is required. \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -86,6 +86,7 @@ import com.bigdata.rdf.changesets.IChangeLog; import com.bigdata.rdf.changesets.IChangeRecord; import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSailBooleanQuery; import com.bigdata.rdf.sail.BigdataSailGraphQuery; import com.bigdata.rdf.sail.BigdataSailQuery; @@ -502,6 +503,9 @@ */ public abstract class AbstractQueryTask implements Callable<Void> { + /** The connection used to isolate the query or update request. */ + private final BigdataSailRepositoryConnection cxn; + /** The namespace against which the query will be run. */ private final String namespace; @@ -691,37 +695,42 @@ return TimeUnit.NANOSECONDS.toMillis(elapsed); } - /** - * - * @param namespace - * The namespace against which the query will be run. - * @param timestamp - * The timestamp of the view for that namespace against which - * the query will be run. - * @param baseURI - * The base URI. - * @param astContainer - * The container with all the information about the submitted - * query, including the original SPARQL query, the parse - * tree, etc. - * @param queryType - * The {@link QueryType}. - * @param mimeType - * The MIME type to be used for the response. The caller must - * verify that the MIME Type is appropriate for the query - * type. - * @param charset - * The character encoding to use with the negotiated MIME - * type (this is <code>null</code> for binary encodings). - * @param fileExt - * The file extension (without the leading ".") to use with - * that MIME Type. - * @param req - * The request. - * @param os - * Where to write the data for the query result. - */ + /** + * Version for SPARQL QUERY. + * + * @param cxn + * The connection used to isolate the query or update + * request. + * @param namespace + * The namespace against which the query will be run. + * @param timestamp + * The timestamp of the view for that namespace against which + * the query will be run. + * @param baseURI + * The base URI. + * @param astContainer + * The container with all the information about the submitted + * query, including the original SPARQL query, the parse + * tree, etc. + * @param queryType + * The {@link QueryType}. + * @param mimeType + * The MIME type to be used for the response. The caller must + * verify that the MIME Type is appropriate for the query + * type. + * @param charset + * The character encoding to use with the negotiated MIME + * type (this is <code>null</code> for binary encodings). + * @param fileExt + * The file extension (without the leading ".") to use with + * that MIME Type. + * @param req + * The request. + * @param os + * Where to write the data for the query result. + */ protected AbstractQueryTask(// + final BigdataSailRepositoryConnection cxn,// final String namespace,// final long timestamp, // final String baseURI, @@ -735,6 +744,8 @@ final OutputStream os// ) { + if (cxn == null) + throw new IllegalArgumentException(); if (namespace == null) throw new IllegalArgumentException(); if (baseURI == null) @@ -754,6 +765,7 @@ if (os == null) throw new IllegalArgumentException(); + this.cxn = cxn; this.namespace = namespace; this.timestamp = timestamp; this.baseURI = baseURI; @@ -782,6 +794,7 @@ } /** + * Version for SPARQL UPDATE. * * @param namespace * The namespace against which the query will be run. @@ -802,6 +815,7 @@ * Where to write the data for the query result. */ protected AbstractQueryTask(// + final BigdataSailRepositoryConnection cxn,// final String namespace,// final long timestamp, // final String baseURI, @@ -815,6 +829,8 @@ final OutputStream os// ) { + if (cxn == null) + throw new IllegalArgumentException(); if (namespace == null) throw new IllegalArgumentException(); if (baseURI == null) @@ -828,6 +844,7 @@ if (os == null) throw new IllegalArgumentException(); + this.cxn = cxn; this.namespace = namespace; this.timestamp = timestamp; this.baseURI = baseURI; @@ -1188,11 +1205,11 @@ @Override public Void call() throws Exception { - BigdataSailRepositoryConnection cxn = null; - boolean success = false; +// BigdataSailRepositoryConnection cxn = null; +// boolean success = false; try { // Note: Will be UPDATE connection if UPDATE request!!! - cxn = getQueryConnection();//namespace, timestamp); +// cxn = getQueryConnection();//namespace, timestamp); if(log.isTraceEnabled()) log.trace("Query running..."); beginNanos = System.nanoTime(); @@ -1216,10 +1233,10 @@ * those. */ doQuery(cxn, new NullOutputStream()); - success = true; +// success = true; } else { doQuery(cxn, os); - success = true; +// success = true; os.flush(); os.close(); } @@ -1237,55 +1254,55 @@ // log.error(t, t); // } // } - if (cxn != null) { - if (!success && !cxn.isReadOnly()) { - /* - * Force rollback of the connection. - * - * Note: It is possible that the commit has already been - * processed, in which case this rollback() will be a - * NOP. This can happen when there is an IO error when - * communicating with the client, but the database has - * already gone through a commit. - */ - try { - // Force rollback of the connection. - cxn.rollback(); - } catch (Throwable t) { - log.error(t, t); - } - } - try { - // Force close of the connection. - cxn.close(); - } catch (Throwable t) { - log.error(t, t); - } - } +// if (cxn != null) { +// if (!success && !cxn.isReadOnly()) { +// /* +// * Force rollback of the connection. +// * +// * Note: It is possible that the commit has already been +// * processed, in which case this rollback() will be a +// * NOP. This can happen when there is an IO error when +// * communicating with the client, but the database has +// * already gone through a commit. +// */ +// try { +// // Force rollback of the connection. +// cxn.rollback(); +// } catch (Throwable t) { +// log.error(t, t); +// } +// } +// try { +// // Force close of the connection. +// cxn.close(); +// } catch (Throwable t) { +// log.error(t, t); +// } +// } } } - } + } // class SparqlRestApiTask @Override final public Void call() throws Exception { - final String queryOrUpdateStr = astContainer.getQueryString(); +// final String queryOrUpdateStr = astContainer.getQueryString(); - try { +// try { return AbstractApiTask.submitApiTask(getIndexManager(), new SparqlRestApiTask(req, resp, namespace, timestamp)) .get(); - } catch (Throwable t) { +// } catch (Throwable t) { +// +// // FIXME GROUP_COMMIT: check calling stack for existing launderThrowable. +// throw BigdataRDFServlet.launderThrowable(t, resp, +// queryOrUpdateStr); +// +// } - // FIXME GROUP_COMMIT: check calling stack for existing launderThrowable. - throw BigdataRDFServlet.launderThrowable(t, resp, - queryOrUpdateStr); - - } - } // call() } // class AbstractQueryTask @@ -1295,14 +1312,15 @@ */ private class AskQueryTask extends AbstractQueryTask { - public AskQueryTask(final String namespace, final long timestamp, + public AskQueryTask(final BigdataSailRepositoryConnection cxn, + final String namespace, final long timestamp, final String baseURI, final ASTContainer astContainer, final QueryType queryType, final BooleanQueryResultFormat format, final HttpServletRequest req, final HttpServletResponse resp, final OutputStream os) { - super(namespace, timestamp, baseURI, astContainer, queryType, + super(cxn, namespace, timestamp, baseURI, astContainer, queryType, format.getDefaultMIMEType(), format.getCharset(), format .getDefaultFileExtension(), req, resp, os); @@ -1334,14 +1352,15 @@ */ private class TupleQueryTask extends AbstractQueryTask { - public TupleQueryTask(final String namespace, final long timestamp, + public TupleQueryTask(final BigdataSailRepositoryConnection cxn, + final String namespace, final long timestamp, final String baseURI, final ASTContainer astContainer, final QueryType queryType, final String mimeType, final Charset charset, final String fileExt, final HttpServletRequest req, final HttpServletResponse resp, final OutputStream os) { - super(namespace, timestamp, baseURI, astContainer, queryType, + super(cxn, namespace, timestamp, baseURI, astContainer, queryType, mimeType, charset, fileExt, req, resp, os); } @@ -1419,13 +1438,14 @@ */ private class GraphQueryTask extends AbstractQueryTask { - public GraphQueryTask(final String namespace, final long timestamp, + public GraphQueryTask(final BigdataSailRepositoryConnection cxn, + final String namespace, final long timestamp, final String baseURI, final ASTContainer astContainer, final QueryType queryType, final RDFFormat format, final HttpServletRequest req, final HttpServletResponse resp, final OutputStream os) { - super(namespace, timestamp, baseURI, astContainer, queryType, + super(cxn, namespace, timestamp, baseURI, astContainer, queryType, format.getDefaultMIMEType(), format.getCharset(), format .getDefaultFileExtension(), req, resp, os); @@ -1437,28 +1457,6 @@ final BigdataSailGraphQuery query = (BigdataSailGraphQuery) setupQuery(cxn); - /* - * FIXME An error thrown here (such as if format is null and we do - * not check it) will cause the response to hang, at least for the - * test suite. Look into this further and make the error handling - * bullet proof! - * - * This may be related to queryId2. That should be imposed on the - * IRunningQuery via QueryHints.QUERYID such that the QueryEngine - * assigns that UUID to the query. We can then correlate the queryId - * to the IRunningQuery, which is important for some of the status - * pages. This will also let us INTERRUPT the IRunningQuery if there - * is an error during evaluation, which might be necessary. For - * example, if the client dies while the query is running. Look at - * the old NSS code and see what it was doing and whether this was - * logic was lost of simply never implemented. - * - * However, I do not see how that would explain the failure of the - * ft.get() method to return. - */ -// if(true) -// throw new RuntimeException(); - // Note: getQueryTask() verifies that format will be non-null. final RDFFormat format = RDFWriterRegistry.getInstance() .getFileFormatForMIMEType(mimeType); @@ -1472,6 +1470,16 @@ } + UpdateTask getUpdateTask(final BigdataSailRepositoryConnection cxn, + final String namespace, final long timestamp, final String baseURI, + final ASTContainer astContainer, final HttpServletRequest req, + final HttpServletResponse resp, final OutputStream os) { + + return new UpdateTask(cxn, namespace, timestamp, baseURI, astContainer, + req, resp, os); + + } + /** * Executes a SPARQL UPDATE. */ @@ -1483,16 +1491,13 @@ */ public final AtomicLong commitTime = new AtomicLong(-1); - public UpdateTask(final String namespace, final long timestamp, + public UpdateTask(final BigdataSailRepositoryConnection cxn, + final String namespace, final long timestamp, final String baseURI, final ASTContainer astContainer, final HttpServletRequest req, final HttpServletResponse resp, final OutputStream os) { - super(namespace, timestamp, baseURI, astContainer, -// null,//queryType -// null,//format.getDefaultMIMEType() -// null,//format.getCharset(), -// null,//format.getDefaultFileExtension(), + super(cxn, namespace, timestamp, baseURI, astContainer, req,// resp,// os// @@ -1926,46 +1931,44 @@ } /** - * Return the task which will execute the SPARQL Query -or- SPARQL UPDATE. - * <p> - * Note: The {@link OutputStream} is passed in rather than the - * {@link HttpServletResponse} in order to permit operations such as - * "DELETE WITH QUERY" where this method is used in a context which writes - * onto an internal pipe rather than onto the {@link HttpServletResponse}. - * - * @param namespace - * The namespace associated with the {@link AbstractTripleStore} - * view. - * @param timestamp - * The timestamp associated with the {@link AbstractTripleStore} - * view. - * @param queryStr - * The query. - * @param acceptOverride - * Override the Accept header (optional). This is used by UPDATE - * and DELETE so they can control the {@link RDFFormat} of the - * materialized query results. - * @param req - * The request. - * @param os - * Where to write the results. - * @param update - * <code>true</code> iff this is a SPARQL UPDATE request. - * - * @return The task -or- <code>null</code> if the named data set was not - * found. When <code>null</code> is returned, the - * {@link HttpServletResponse} will also have been committed. - * @throws IOException - */ + * Return the task which will execute the SPARQL Query -or- SPARQL UPDATE. + * <p> + * Note: The {@link OutputStream} is passed in rather than the + * {@link HttpServletResponse} in order to permit operations such as + * "DELETE WITH QUERY" where this method is used in a context which writes + * onto an internal pipe rather than onto the {@link HttpServletResponse}. + * + * @param namespace + * The namespace associated with the {@link AbstractTripleStore} + * view. + * @param timestamp + * The timestamp associated with the {@link AbstractTripleStore} + * view. + * @param queryStr + * The query. + * @param acceptOverride + * Override the Accept header (optional). This is used by UPDATE + * and DELETE so they can control the {@link RDFFormat} of the + * materialized query results. + * @param req + * The request. + * @param os + * Where to write the results. + * + * @return The task. + * + * @throws IOException + */ public AbstractQueryTask getQueryTask(// + final BigdataSailRepositoryConnection cxn,// final String namespace,// final long timestamp,// final String queryStr,// final String acceptOverride,// final HttpServletRequest req,// final HttpServletResponse resp,// - final OutputStream os,// - final boolean update// + final OutputStream os// +// final boolean update// ) throws MalformedQueryException, IOException { /* @@ -1973,38 +1976,6 @@ */ final String baseURI = req.getRequestURL().toString(); - final AbstractTripleStore tripleStore = getTripleStore(namespace, - timestamp); - - if (tripleStore == null) { - /* - * There is no such triple/quad store instance. - */ - BigdataServlet.buildResponse(resp, BigdataServlet.HTTP_NOTFOUND, - BigdataServlet.MIME_TEXT_PLAIN); - return null; - } - - if (update) { - - /* - * Parse the query so we can figure out how it will need to be executed. - * - * Note: This goes through some pains to make sure that we parse the - * query exactly once in order to minimize the resources associated with - * the query parser. - */ - final ASTContainer astContainer = new Bigdata2ASTSPARQLParser( - tripleStore).parseUpdate2(queryStr, baseURI); - - if (log.isDebugEnabled()) - log.debug(astContainer.toString()); - - return new UpdateTask(namespace, timestamp, baseURI, astContainer, - req, resp, os); - - } - /* * Parse the query so we can figure out how it will need to be executed. * @@ -2012,6 +1983,7 @@ * query exactly once in order to minimize the resources associated with * the query parser. */ + final AbstractTripleStore tripleStore = cxn.getTripleStore(); final ASTContainer astContainer = new Bigdata2ASTSPARQLParser( tripleStore).parseQuery2(queryStr, baseURI); @@ -2065,7 +2037,7 @@ case CONSTRUCT: /* Generate RDF/XML so we can apply XSLT transform. * - * FIXME This should be sending back RDFs or using a lens. + * TODO This should be sending back RDFs or using a lens. */ acceptStr = RDFFormat.RDFXML.getDefaultMIMEType(); break; @@ -2086,7 +2058,7 @@ final BooleanQueryResultFormat format = util .getBooleanQueryResultFormat(BooleanQueryResultFormat.SPARQL); - return new AskQueryTask(namespace, timestamp, baseURI, + return new AskQueryTask(cxn, namespace, timestamp, baseURI, astContainer, queryType, format, req, resp, os); } @@ -2095,7 +2067,7 @@ final RDFFormat format = util.getRDFFormat(RDFFormat.RDFXML); - return new GraphQueryTask(namespace, timestamp, baseURI, + return new GraphQueryTask(cxn, namespace, timestamp, baseURI, astContainer, queryType, format, req, resp, os); } @@ -2120,7 +2092,7 @@ charset = format.getCharset(); fileExt = format.getDefaultFileExtension(); } - return new TupleQueryTask(namespace, timestamp, baseURI, + return new TupleQueryTask(cxn, namespace, timestamp, baseURI, astContainer, queryType, mimeType, charset, fileExt, req, resp, os); @@ -2423,20 +2395,27 @@ } /** - * Obtain a new transaction to protect operations against the specified view - * of the database. - * - * @param timestamp - * The timestamp for the desired view. - * - * @return The transaction identifier -or- <code>timestamp</code> if the - * {@link IIndexManager} is not a {@link Journal}. - * - * @see ITransactionService#newTx(long) - * - * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency - * problem with list namespaces and create namespace </a> - */ + * Obtain a new transaction to protect operations against the specified view + * of the database. This uses the transaction mechanisms to prevent + * recycling during operations NOT OTHERWISE PROTECTED by a + * {@link BigdataSailConnection} for what would otherwise amount to dirty + * reads. This is especially critical for reads on the global row store + * since it can not be protected by the {@link BigdataSailConnection} for + * cases where the KB instance does not yet exist. The presence of such a tx + * does NOT prevent concurrent commits. It only prevents recycling during + * such commits (and even then only on the RWStore backend). + * + * @param timestamp + * The timestamp for the desired view. + * + * @return The transaction identifier -or- <code>timestamp</code> if the + * {@link IIndexManager} is not a {@link Journal}. + * + * @see ITransactionService#newTx(long) + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ public long newTx(final long timestamp) { long tx = timestamp; // use dirty reads unless Journal. @@ -2459,7 +2438,10 @@ } /** - * Abort a transaction obtained by {@link #newTx(long)}. + * Abort a transaction obtained by {@link #newTx(long)}. This decements the + * native active transaction counter for the RWStore. Once that counter + * reaches zero, recycling will occur the next time an unisolated mutation + * goes through a commit on the journal. * * @param tx * The transaction identifier. @@ -2482,6 +2464,15 @@ } + /* + * + */ +// /** +// * Commit a transaction obtained by {@link #newTx(long)} +// * +// * @param tx +// * The transaction identifier. +// */ // public void commitTx(final long tx) { // // if (getIndexManager() instanceof Journal) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -47,6 +47,7 @@ import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.MalformedQueryException; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFWriter; @@ -185,18 +186,35 @@ } if (resp != null) { if (!resp.isCommitted()) { - if (InnerCause.isInnerCause(t, - ConstraintViolationException.class)) { - /* - * A constraint violation is a bad request (the data - * violates the rules) not a server error. - */ - resp.setStatus(HTTP_BADREQUEST); + if (InnerCause.isInnerCause(t, DatasetNotFoundException.class)) { + /* + * The addressed KB does not exist. + */ + resp.setStatus(HttpServletResponse.SC_NOT_FOUND); + resp.setContentType(MIME_TEXT_PLAIN); + } else if (InnerCause.isInnerCause(t, + ConstraintViolationException.class)) { + /* + * A constraint violation is a bad request (the data + * violates the rules) not a server error. + */ + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + resp.setContentType(MIME_TEXT_PLAIN); + } else if (InnerCause.isInnerCause(t, + MalformedQueryException.class)) { + /* + * Send back a BAD REQUEST (400) along with the text of the + * syntax error message. + * + * TODO Write unit test for 400 response for bad client + * request. + */ + resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); + resp.setContentType(MIME_TEXT_PLAIN); + } else { + // Internal server error. + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); resp.setContentType(MIME_TEXT_PLAIN); - } else { - // Internal server error. - resp.setStatus(HTTP_INTERNALERROR); - resp.setContentType(MIME_TEXT_PLAIN); } } OutputStream os = null; @@ -337,12 +355,12 @@ /** * Factory for the {@link PipedInputStream}. */ - protected PipedInputStream newPipedInputStream(final PipedOutputStream os) - throws IOException { + final static protected PipedInputStream newPipedInputStream( + final PipedOutputStream os) throws IOException { - return new PipedInputStream(os); + return new PipedInputStream(os); - } + } /** * Report a mutation count and elapsed time back to the user agent. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BlueprintsServlet.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -68,14 +68,14 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (getBigdataRDFContext().getTripleStore(getNamespace(req), - getTimestamp(req)) == null) { - /* - * There is no such triple/quad store instance. - */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } +// if (getBigdataRDFContext().getTripleStore(getNamespace(req), +// getTimestamp(req)) == null) { +// /* +// * There is no such triple/quad store instance. +// */ +// buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); +// return; +// } final String contentType = req.getContentType(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/DeleteServlet.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -37,6 +37,7 @@ import org.openrdf.model.Statement; import org.openrdf.model.URI; import org.openrdf.model.Value; +import org.openrdf.query.MalformedQueryException; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParser; @@ -113,126 +114,199 @@ * operation would be broken by group commit since other tasks could have * updated the KB since the lastCommitTime and been checkpointed and hence * be visible to an unisolated operation without there being an intervening - * commit point. + * commit point. [I think that this is resolved by taking the unisolated + * connection first and then taking the read-only lastCommitTime connection + * view, which is what the code now does.] */ private void doDeleteWithQuery(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + + final String baseURI = req.getRequestURL().toString(); - final long begin = System.currentTimeMillis(); + final String namespace = getNamespace(req); + + final String queryStr = req.getParameter("query"); + + if (queryStr == null) + throw new UnsupportedOperationException(); + + if (log.isInfoEnabled()) + log.info("delete with query: " + queryStr); + + try { + + submitApiTask( + new DeleteWithQueryTask(req, resp, namespace, + ITx.UNISOLATED, // + queryStr,// + baseURI// + )).get(); + + } catch (Throwable t) { + + launderThrowable(t, resp, "UPDATE-WITH-QUERY" + ": queryStr=" + + queryStr + ", baseURI=" + baseURI); + + } + + } + + private static class DeleteWithQueryTask extends AbstractRestApiTask<Void> { + + private final String queryStr; + private final String baseURI; + + /** + * + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. + * @param baseURI + * The base URI for the operation. + */ + public DeleteWithQueryTask(final HttpServletRequest req, + final HttpServletResponse resp, + final String namespace, final long timestamp, + final String queryStr,// + final String baseURI + ) { + super(req, resp, namespace, timestamp); + this.queryStr = queryStr; + this.baseURI = baseURI; + } - final String baseURI = req.getRequestURL().toString(); + @Override + public boolean isReadOnly() { + return false; + } - final String namespace = getNamespace(req); + @Override + public Void call() throws Exception { - final String queryStr = req.getParameter("query"); + final long begin = System.currentTimeMillis(); + + final AtomicLong nmodified = new AtomicLong(0L); - if (queryStr == null) - throw new UnsupportedOperationException(); + BigdataSailRepositoryConnection conn = null; + boolean success = false; + try { - if (log.isInfoEnabled()) - log.info("delete with query: " + queryStr); + conn = getUnisolatedConnection(); - try { + { - /* - * Note: pipe is drained by this thread to consume the query - * results, which are the statements to be deleted. - */ - final PipedOutputStream os = new PipedOutputStream(); - final InputStream is = newPipedInputStream(os); + if (log.isInfoEnabled()) + log.info("delete with query: " + queryStr); - // Use this format for the query results. - final RDFFormat format = RDFFormat.NTRIPLES; - - final AbstractQueryTask queryTask = getBigdataRDFContext() - .getQueryTask(namespace, ITx.READ_COMMITTED, queryStr, - format.getDefaultMIMEType(), - req, resp, os, false/*update*/); + final BigdataRDFContext context = BigdataServlet + .getBigdataRDFContext(req.getServletContext()); - if(queryTask == null) { - // KB not found. Response already committed. - return; - } + /* + * Note: pipe is drained by this thread to consume the query + * results, which are the statements to be deleted. + */ + final PipedOutputStream os = new PipedOutputStream(); - switch (queryTask.queryType) { - case DESCRIBE: - case CONSTRUCT: - break; - default: - buildResponse(resp, HTTP_BADREQUEST, MIME_TEXT_PLAIN, - "Must be DESCRIBE or CONSTRUCT query."); - return; - } + // The read-only connection for the query. + BigdataSailRepositoryConnection roconn = null; + try { - final AtomicLong nmodified = new AtomicLong(0L); + final long readOnlyTimestamp = ITx.READ_COMMITTED; - BigdataSailRepositoryConnection conn = null; - boolean success = false; - try { + roconn = getQueryConnection(namespace, + readOnlyTimestamp); - conn = getBigdataRDFContext().getUnisolatedConnection( - namespace); + // Use this format for the query results. + final RDFFormat format = RDFFormat.NTRIPLES; - final RDFParserFactory factory = RDFParserRegistry - .getInstance().get(format); + final AbstractQueryTask queryTask = context + .getQueryTask(roconn, namespace, + readOnlyTimestamp, queryStr, + format.getDefaultMIMEType(), req, resp, + os); - final RDFParser rdfParser = factory.getParser(); + switch (queryTask.queryType) { + case DESCRIBE: + case CONSTRUCT: + break; + default: + throw new MalformedQueryException( + "Must be DESCRIBE or CONSTRUCT query"); + } - rdfParser.setValueFactory(conn.getTripleStore() - .getValueFactory()); + final RDFParserFactory factory = RDFParserRegistry + .getInstance().get(format); - rdfParser.setVerifyData(false); + final RDFParser rdfParser = factory.getParser(); - rdfParser.setStopAtFirstError(true); + rdfParser.setValueFactory(conn.getTripleStore() + .getValueFactory()); - rdfParser - .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); + rdfParser.setVerifyData(false); - rdfParser.setRDFHandler(new RemoveStatementHandler(conn - .getSailConnection(), nmodified)); + rdfParser.setStopAtFirstError(true); - // Wrap as Future. - final FutureTask<Void> ft = new FutureTask<Void>(queryTask); - - // Submit query for evaluation. - getBigdataRDFContext().queryService.execute(ft); - - // Run parser : visited statements will be deleted. - rdfParser.parse(is, baseURI); + rdfParser + .setDatatypeHandling(RDFParser.DatatypeHandling.IGNORE); - // Await the Future (of the Query) - ft.get(); - - // Commit the mutation. - conn.commit(); + rdfParser.setRDFHandler(new RemoveStatementHandler(conn + .getSailConnection(), nmodified)); - success = true; - - final long elapsed = System.currentTimeMillis() - begin; - - reportModifiedCount(resp, nmodified.get(), elapsed); + // Wrap as Future. + final FutureTask<Void> ft = new FutureTask<Void>( + queryTask); - } finally { + // Submit query for evaluation. + context.queryService.execute(ft); - if (conn != null) { + // Reads on the statements produced by the query. + final InputStream is = newPipedInputStream(os); - if (!success) - conn.rollback(); + // Run parser : visited statements will be deleted. + rdfParser.parse(is, baseURI); - conn.close(); + // Await the Future (of the Query) + ft.get(); - } + } finally { - } + if (roconn != null) { + // close the read-only connection for the query. + roconn.rollback(); + } - } catch (Throwable t) { + } - throw BigdataRDFServlet.launderThrowable(t, resp, queryStr); + } - } + conn.commit(); - } + success = true; + final long elapsed = System.currentTimeMillis() - begin; + + reportModifiedCount(nmodified.get(), elapsed); + + return null; + + } finally { + + if (conn != null) { + + if (!success) + conn.rollback(); + + conn.close(); + + } + + } + + } + + } // class DeleteWithQueryTask + @Override protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-11-05 15:13:07 UTC (rev 8703) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-11-05 18:28:32 UTC (rev 8704) @@ -48,7 +48,6 @@ import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.impl.GraphImpl; -import org.openrdf.query.MalformedQueryException; import org.openrdf.repository.RepositoryResult; import com.bigdata.bop.BOpUtility; @@ -65,6 +64,7 @@ import com.bigdata.mdi.PartitionLocator; import com.bigdata.rdf.sail.BigdataSailQuery; import com.bigdata.rdf.sail.BigdataSailRepositoryConnection; +import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.RunningQuery; @@ -327,10 +327,6 @@ return; } - final String namespace = getNamespace(req); - - final long timestamp = ITx.UNISOLATED;//getTimestamp(req); - // The SPARQL update final String updateStr = getUpdateString(req); @@ -343,84 +339,149 @@ } - /* - * Setup task to execute the request. The task is executed on a thread - * pool. This bounds the possible concurrency of query execution (as - * opposed to queries accepted for eventual execution). + try { + + final String namespace = getNamespace(req); + + final long timestamp = ITx.UNISOLATED;//getTimestamp(req); + + submitApiTask( + new SparqlUpdateTask(req, resp, namespace, timestamp, + updateStr, getBigdataRDFContext() // + )).get(); + + } catch (Throwable t) { + + launderThrowable(t, resp, "SPARQL-UPDATE: updateStr=" + updateStr); + + } + + } + + private static class SparqlUpdateTask extends AbstractRestApiTask<Void> { + + private final String updateStr; + private final BigdataRDFContext context; + + /** * - * Note: If the client closes the connection, then the response's - * InputStream will be closed and the task will terminate rather than - * running on in the background with a disconnected client. @see #1026 (SPARQL UPDATE with runtime errors causes problems with lexicon indices) + * @param namespace + * The namespace of the target KB instance. + * @param timestamp + * The timestamp used to obtain a mutable connection. */ - final long tx = getBigdataRDFContext().newTx(timestamp); - boolean ok = false; - try { + public SparqlUpdateTask(// + final HttpServletRequest req,// + final HttpServletResponse resp,// + final String namespace, // + final long timestamp,// + final String updateStr,// + final BigdataRDFContext context// + ) { + super(req, resp, namespace, timestamp); + this.updateStr = updateStr; + this.context = context; + } + + @Override + final public boolean isReadOnly() { + return false; + } - final BigdataRDFContext context = getBigdataRDFContext(); + @Override + public Void call() throws Exception { - final UpdateTask updateTask; - try { + BigdataSailRepositoryConnection conn = null; + boolean success = false; + try { - /* - * Attempt to construct a task which we can use to evaluate the - * query. - */ - - updateTask = (UpdateTask) context.getQueryTask(namespace, - timestamp, updateStr, null/* acceptOverride */, req, - resp, resp.getOutputStream(), true/* update */); - - if (updateTask == null) { - // KB not found. Response already committed. - return; - } - - } catch (MalformedQueryException ex) { - /* - * Send back a BAD REQUEST (400) along with the text of the - * syntax error message. - */ - resp.sendError(HttpServletResponse.SC_BAD_REQUEST, - ex.getLocalizedMessage()); - return; - } + conn = getUnisolatedConnection(); - final FutureTask<Void> ft = new FutureTask<Void>(updateTask); + { - if (log.isTraceEnabled()) - log.trace("Will run update: " + updateStr); + /* + * Setup the baseURI for this request. It will be set to the + * requestURI. + */ + final String baseURI = req.getRequestURL().toString(); - updateTask.updateFuture = ft; - - /* - * Begin executing the query (asynchronous). - * - * Note: UPDATEs currently contend with QUERYs against the same - * thread pool. - */ - getBigdataRDFContext().queryService.execute(ft); + final AbstractTriple... [truncated message content] |
From: <tho...@us...> - 2014-11-06 14:21:20
|
Revision: 8705 http://sourceforge.net/p/bigdata/code/8705 Author: thompsonbry Date: 2014-11-06 14:21:08 +0000 (Thu, 06 Nov 2014) Log Message: ----------- Bumping version for 1.3.4 release. Disabling snapshot builds. Added 1.3.4 release notes. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_4.txt Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_4.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_4.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_3_4.txt 2014-11-06 14:21:08 UTC (rev 8705) @@ -0,0 +1,548 @@ +This is a critical fix release of bigdata(R). All users are encouraged to upgrade immediately. + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal), highly available replication cluster mode (HAJournalServer), and a horizontally sharded cluster mode (BigdataFederation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The HAJournalServer adds replication, online backup, horizontal scaling of query, and high availability. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the HAJournalServer for high availability and linear scaling in query throughput. Choose the BigdataFederation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +Starting with the 1.3.0 release, we offer a tarball artifact [10] for easy installation of the HA replication cluster. + +You can download the WAR (standalone) or HA artifacts from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://svn.code.sf.net/p/bigdata/code/tags/BIGDATA_RELEASE_1_3_4 + +Critical or otherwise of note in this minor release: + +- #1036 (Journal leaks storage with SPARQL UPDATE and REST API) + +New features in 1.3.x: + +- Java 7 is now required. +- High availability [10]. +- High availability load balancer. +- New RDF/SPARQL workbench. +- Blueprints API. +- RDF Graph Mining Service (GASService) [12]. +- Reification Done Right (RDR) support [11]. +- Property Path performance enhancements. +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Highly Available Replication Clusters (HAJournalServer [10]) +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited (BigdataFederation); +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- Column-wise indexing; +- Runtime Query Optimizer for quads; +- Performance optimization for scale-out clusters; and +- Simplified deployment, configuration, and administration for scale-out clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.3.4: + +- http://trac.bigdata.com/ticket/946 (Empty PROJECTION causes IllegalArgumentException) +- http://trac.bigdata.com/ticket/1036 (Journal leaks storage with SPARQL UPDATE and REST API) +- http://trac.bigdata.com/ticket/1008 (remote service queries should put parameters in the request body when using POST) + +1.3.3: + +- http://trac.bigdata.com/ticket/980 (Object position of query hint is not a Literal (partial resolution - see #1028 as well)) +- http://trac.bigdata.com/ticket/1018 (Add the ability to track and cancel all queries issued through a BigdataSailRemoteRepositoryConnection) +- http://trac.bigdata.com/ticket/1021 (Add critical section protection to AbstractJournal.abort() and BigdataSailConnection.rollback()) +- http://trac.bigdata.com/ticket/1024 (GregorianCalendar? does weird things before 1582) +- http://trac.bigdata.com/ticket/1026 (SPARQL UPDATE with runtime errors causes problems with lexicon indices) +- http://trac.bigdata.com/ticket/1028 (very rare NotMaterializedException: XSDBoolean(true)) +- http://trac.bigdata.com/ticket/1029 (RWStore commit state not correctly rolled back if abort fails on empty journal) +- http://trac.bigdata.com/ticket/1030 (RWStorage stats cleanup) + +1.3.2: + +- http://trac.bigdata.com/ticket/1016 (Jetty/LBS issues when deployed as WAR under tomcat) +- http://trac.bigdata.com/ticket/1010 (Upgrade apache http components to 1.3.1 (security)) +- http://trac.bigdata.com/ticket/1005 (Invalidate BTree objects if error occurs during eviction) +- http://trac.bigdata.com/ticket/1004 (Concurrent binding problem) +- http://trac.bigdata.com/ticket/1002 (Concurrency issues in JVMHashJoinUtility caused by MAX_PARALLEL query hint override) +- http://trac.bigdata.com/ticket/1000 (Add configuration option to turn off bottom-up evaluation) +- http://trac.bigdata.com/ticket/999 (Extend BigdataSailFactory to take arbitrary properties) +- http://trac.bigdata.com/ticket/998 (SPARQL Update through BigdataGraph) +- http://trac.bigdata.com/ticket/996 (Add custom prefix support for query results) +- http://trac.bigdata.com/ticket/995 (Allow general purpose SPARQL queries through BigdataGraph) +- http://trac.bigdata.com/ticket/992 (Deadlock between AbstractRunningQuery.cancel(), QueryLog.log(), and ArbitraryLengthPathTask) +- http://trac.bigdata.com/ticket/990 (Query hints not recognized in FILTERs) +- http://trac.bigdata.com/ticket/989 (Stored query service) +- http://trac.bigdata.com/ticket/988 (Bad performance for FILTER EXISTS) +- http://trac.bigdata.com/ticket/987 (maven build is broken) +- http://trac.bigdata.com/ticket/986 (Improve locality for small allocation slots) +- http://trac.bigdata.com/ticket/985 (Deadlock in BigdataTriplePatternMaterializer) +- http://trac.bigdata.com/ticket/975 (HA Health Status Page) +- http://trac.bigdata.com/ticket/974 (Name2Addr.indexNameScan(prefix) uses scan + filter) +- http://trac.bigdata.com/ticket/973 (RWStore.commit() should be more defensive) +- http://trac.bigdata.com/ticket/971 (Clarify HTTP Status codes for CREATE NAMESPACE operation) +- http://trac.bigdata.com/ticket/968 (no link to wiki from workbench) +- http://trac.bigdata.com/ticket/966 (Failed to get namespace under concurrent update) +- http://trac.bigdata.com/ticket/965 (Can not run LBS mode with HA1 setup) +- http://trac.bigdata.com/ticket/961 (Clone/modify namespace to create a new one) +- http://trac.bigdata.com/ticket/960 (Export namespace properties in XML/Java properties text format) +- http://trac.bigdata.com/ticket/938 (HA Load Balancer) +- http://trac.bigdata.com/ticket/936 (Support larger metabits allocations) +- http://trac.bigdata.com/ticket/932 (Bigdata/Rexster integration) +- http://trac.bigdata.com/ticket/919 (Formatted Layout for Status pages) +- http://trac.bigdata.com/ticket/899 (REST API Query Cancellation) +- http://trac.bigdata.com/ticket/885 (Panels do not appear on startup in Firefox) +- http://trac.bigdata.com/ticket/884 (Executing a new query should clear the old query results from the console) +- http://trac.bigdata.com/ticket/882 (Abbreviate URIs that can be namespaced with one of the defined common namespaces) +- http://trac.bigdata.com/ticket/880 (Can't explore an absolute URI with < >) +- http://trac.bigdata.com/ticket/878 (Explore page looks weird when empty) +- http://trac.bigdata.com/ticket/873 (Allow user to go use browser back & forward buttons to view explore history) +- http://trac.bigdata.com/ticket/865 (OutOfMemoryError instead of Timeout for SPARQL Property Paths) +- http://trac.bigdata.com/ticket/858 (Change explore URLs to include URI being clicked so user can see what they've clicked on before) +- http://trac.bigdata.com/ticket/855 (AssertionError: Child does not have persistent identity) +- http://trac.bigdata.com/ticket/850 (Search functionality in workbench) +- http://trac.bigdata.com/ticket/847 (Query results panel should recognize well known namespaces for easier reading) +- http://trac.bigdata.com/ticket/845 (Display the properties for a namespace) +- http://trac.bigdata.com/ticket/843 (Create new tabs for status & performance counters, and add per namespace service/VoID description links) +- http://trac.bigdata.com/ticket/837 (Configurator for new namespaces) +- http://trac.bigdata.com/ticket/836 (Allow user to create namespace in the workbench) +- http://trac.bigdata.com/ticket/830 (Output RDF data from queries in table format) +- http://trac.bigdata.com/ticket/829 (Export query results) +- http://trac.bigdata.com/ticket/828 (Save selected namespace in browser) +- http://trac.bigdata.com/ticket/827 (Explore tab in workbench) +- http://trac.bigdata.com/ticket/826 (Create shortcut to execute load/query) +- http://trac.bigdata.com/ticket/823 (Disable textarea when a large file is selected) +- http://trac.bigdata.com/ticket/820 (Allow non-file:// URLs to be loaded) +- http://trac.bigdata.com/ticket/819 (Retrieve default namespace on page load) +- http://trac.bigdata.com/ticket/772 (Query timeout only checked at operator start/stop) +- http://trac.bigdata.com/ticket/765 (order by expr skips invalid expressions) +- http://trac.bigdata.com/ticket/587 (JSP page to configure KBs) +- http://trac.bigdata.com/ticket/343 (Stochastic assert in AbstractBTree#writeNodeOrLeaf() in CI) + +1.3.1: + +- http://trac.bigdata.com/ticket/242 (Deadlines do not play well with GROUP_BY, ORDER_BY, etc.) +- http://trac.bigdata.com/ticket/256 (Amortize RTO cost) +- http://trac.bigdata.com/ticket/257 (Support BOP fragments in the RTO.) +- http://trac.bigdata.com/ticket/258 (Integrate RTO into SAIL) +- http://trac.bigdata.com/ticket/259 (Dynamically increase RTO sampling limit.) +- http://trac.bigdata.com/ticket/526 (Reification done right) +- http://trac.bigdata.com/ticket/580 (Problem with the bigdata RDF/XML parser with sids) +- http://trac.bigdata.com/ticket/622 (NSS using jetty+windows can lose connections (windows only; jdk 6/7 bug)) +- http://trac.bigdata.com/ticket/624 (HA Load Balancer) +- http://trac.bigdata.com/ticket/629 (Graph processing API) +- http://trac.bigdata.com/ticket/721 (Support HA1 configurations) +- http://trac.bigdata.com/ticket/730 (Allow configuration of embedded NSS jetty server using jetty-web.xml) +- http://trac.bigdata.com/ticket/759 (multiple filters interfere) +- http://trac.bigdata.com/ticket/763 (Stochastic results with Analytic Query Mode) +- http://trac.bigdata.com/ticket/774 (Converge on Java 7.) +- http://trac.bigdata.com/ticket/779 (Resynchronization of socket level write replication protocol (HA)) +- http://trac.bigdata.com/ticket/780 (Incremental or asynchronous purge of HALog files) +- http://trac.bigdata.com/ticket/782 (Wrong serialization version) +- http://trac.bigdata.com/ticket/784 (Describe Limit/offset don't work as expected) +- http://trac.bigdata.com/ticket/787 (Update documentations and samples, they are OUTDATED) +- http://trac.bigdata.com/ticket/788 (Name2Addr does not report all root causes if the commit fails.) +- http://trac.bigdata.com/ticket/789 (ant task to build sesame fails, docs for setting up bigdata for sesame are ancient) +- http://trac.bigdata.com/ticket/790 (should not be pruning any children) +- http://trac.bigdata.com/ticket/791 (Clean up query hints) +- http://trac.bigdata.com/ticket/793 (Explain reports incorrect value for opCount) +- http://trac.bigdata.com/ticket/796 (Filter assigned to sub-query by query generator is dropped from evaluation) +- http://trac.bigdata.com/ticket/797 (add sbt setup to getting started wiki) +- http://trac.bigdata.com/ticket/798 (Solution order not always preserved) +- http://trac.bigdata.com/ticket/799 (mis-optimation of quad pattern vs triple pattern) +- http://trac.bigdata.com/ticket/802 (Optimize DatatypeFactory instantiation in DateTimeExtension) +- http://trac.bigdata.com/ticket/803 (prefixMatch does not work in full text search) +- http://trac.bigdata.com/ticket/804 (update bug deleting quads) +- http://trac.bigdata.com/ticket/806 (Incorrect AST generated for OPTIONAL { SELECT }) +- http://trac.bigdata.com/ticket/808 (Wildcard search in bigdata for type suggessions) +- http://trac.bigdata.com/ticket/810 (Expose GAS API as SPARQL SERVICE) +- http://trac.bigdata.com/ticket/815 (RDR query does too much work) +- http://trac.bigdata.com/ticket/816 (Wildcard projection ignores variables inside a SERVICE call.) +- http://trac.bigdata.com/ticket/817 (Unexplained increase in journal size) +- http://trac.bigdata.com/ticket/821 (Reject large files, rather then storing them in a hidden variable) +- http://trac.bigdata.com/ticket/831 (UNION with filter issue) +- http://trac.bigdata.com/ticket/841 (Using "VALUES" in a query returns lexical error) +- http://trac.bigdata.com/ticket/848 (Fix SPARQL Results JSON writer to write the RDR syntax) +- http://trac.bigdata.com/ticket/849 (Create writers that support the RDR syntax) +- http://trac.bigdata.com/ticket/851 (RDR GAS interface) +- http://trac.bigdata.com/ticket/852 (RemoteRepository.cancel() does not consume the HTTP response entity.) +- http://trac.bigdata.com/ticket/853 (Follower does not accept POST of idempotent operations (HA)) +- http://trac.bigdata.com/ticket/854 (Allow override of maximum length before converting an HTTP GET to an HTTP POST) +- http://trac.bigdata.com/ticket/855 (AssertionError: Child does not have persistent identity) +- http://trac.bigdata.com/ticket/862 (Create parser for JSON SPARQL Results) +- http://trac.bigdata.com/ticket/863 (HA1 commit failure) +- http://trac.bigdata.com/ticket/866 (Batch remove API for the SAIL) +- http://trac.bigdata.com/ticket/867 (NSS concurrency problem with list namespaces and create namespace) +- http://trac.bigdata.com/ticket/869 (HA5 test suite) +- http://trac.bigdata.com/ticket/872 (Full text index range count optimization) +- http://trac.bigdata.com/ticket/874 (FILTER not applied when there is UNION in the same join group) +- http://trac.bigdata.com/ticket/876 (When I upload a file I want to see the filename.) +- http://trac.bigdata.com/ticket/877 (RDF Format selector is invisible) +- http://trac.bigdata.com/ticket/883 (CANCEL Query fails on non-default kb namespace on HA follower.) +- http://trac.bigdata.com/ticket/886 (Provide workaround for bad reverse DNS setups.) +- http://trac.bigdata.com/ticket/887 (BIND is leaving a variable unbound) +- http://trac.bigdata.com/ticket/892 (HAJournalServer does not die if zookeeper is not running) +- http://trac.bigdata.com/ticket/893 (large sparql insert optimization slow?) +- http://trac.bigdata.com/ticket/894 (unnecessary synchronization) +- http://trac.bigdata.com/ticket/895 (stack overflow in populateStatsMap) +- http://trac.bigdata.com/ticket/902 (Update Basic Bigdata Chef Cookbook) +- http://trac.bigdata.com/ticket/904 (AssertionError: PropertyPathNode got to ASTJoinOrderByType.optimizeJoinGroup) +- http://trac.bigdata.com/ticket/905 (unsound combo query optimization: union + filter) +- http://trac.bigdata.com/ticket/906 (DC Prefix Button Appends "</li>") +- http://trac.bigdata.com/ticket/907 (Add a quick-start ant task for the BD Server "ant start") +- http://trac.bigdata.com/ticket/912 (Provide a configurable IAnalyzerFactory) +- http://trac.bigdata.com/ticket/913 (Blueprints API Implementation) +- http://trac.bigdata.com/ticket/914 (Settable timeout on SPARQL Query (REST API)) +- http://trac.bigdata.com/ticket/915 (DefaultAnalyzerFactory issues) +- http://trac.bigdata.com/ticket/920 (Content negotiation orders accept header scores in reverse) +- http://trac.bigdata.com/ticket/939 (NSS does not start from command line: bigdata-war/src not found.) +- http://trac.bigdata.com/ticket/940 (ProxyServlet in web.xml breaks tomcat WAR (HA LBS) + +1.3.0: + +- http://trac.bigdata.com/ticket/530 (Journal HA) +- http://trac.bigdata.com/ticket/621 (Coalesce write cache records and install reads in cache) +- http://trac.bigdata.com/ticket/623 (HA TXS) +- http://trac.bigdata.com/ticket/639 (Remove triple-buffering in RWStore) +- http://trac.bigdata.com/ticket/645 (HA backup) +- http://trac.bigdata.com/ticket/646 (River not compatible with newer 1.6.0 and 1.7.0 JVMs) +- http://trac.bigdata.com/ticket/648 (Add a custom function to use full text index for filtering.) +- http://trac.bigdata.com/ticket/651 (RWS test failure) +- http://trac.bigdata.com/ticket/652 (Compress write cache blocks for replication and in HALogs) +- http://trac.bigdata.com/ticket/662 (Latency on followers during commit on leader) +- http://trac.bigdata.com/ticket/663 (Issue with OPTIONAL blocks) +- http://trac.bigdata.com/ticket/664 (RWStore needs post-commit protocol) +- http://trac.bigdata.com/ticket/665 (HA3 LOAD non-responsive with node failure) +- http://trac.bigdata.com/ticket/666 (Occasional CI deadlock in HALogWriter testConcurrentRWWriterReader) +- http://trac.bigdata.com/ticket/670 (Accumulating HALog files cause latency for HA commit) +- http://trac.bigdata.com/ticket/671 (Query on follower fails during UPDATE on leader) +- http://trac.bigdata.com/ticket/673 (DGC in release time consensus protocol causes native thread leak in HAJournalServer at each commit) +- http://trac.bigdata.com/ticket/674 (WCS write cache compaction causes errors in RWS postHACommit()) +- http://trac.bigdata.com/ticket/676 (Bad patterns for timeout computations) +- http://trac.bigdata.com/ticket/677 (HA deadlock under UPDATE + QUERY) +- http://trac.bigdata.com/ticket/678 (DGC Thread and Open File Leaks: sendHALogForWriteSet()) +- http://trac.bigdata.com/ticket/679 (HAJournalServer can not restart due to logically empty log file) +- http://trac.bigdata.com/ticket/681 (HAJournalServer deadlock: pipelineRemove() and getLeaderId()) +- http://trac.bigdata.com/ticket/684 (Optimization with skos altLabel) +- http://trac.bigdata.com/ticket/686 (Consensus protocol does not detect clock skew correctly) +- http://trac.bigdata.com/ticket/687 (HAJournalServer Cache not populated) +- http://trac.bigdata.com/ticket/689 (Missing URL encoding in RemoteRepositoryManager) +- http://trac.bigdata.com/ticket/690 (Error when using the alias "a" instead of rdf:type for a multipart insert) +- http://trac.bigdata.com/ticket/691 (Failed to re-interrupt thread in HAJournalServer) +- http://trac.bigdata.com/ticket/692 (Failed to re-interrupt thread) +- http://trac.bigdata.com/ticket/693 (OneOrMorePath SPARQL property path expression ignored) +- http://trac.bigdata.com/ticket/694 (Transparently cancel update/query in RemoteRepository) +- http://trac.bigdata.com/ticket/695 (HAJournalServer reports "follower" but is in SeekConsensus and is not participating in commits.) +- http://trac.bigdata.com/ticket/701 (Problems in BackgroundTupleResult) +- http://trac.bigdata.com/ticket/702 (InvocationTargetException on /namespace call) +- http://trac.bigdata.com/ticket/704 (ask does not return json) +- http://trac.bigdata.com/ticket/705 (Race between QueryEngine.putIfAbsent() and shutdownNow()) +- http://trac.bigdata.com/ticket/706 (MultiSourceSequentialCloseableIterator.nextSource() can throw NPE) +- http://trac.bigdata.com/ticket/707 (BlockingBuffer.close() does not unblock threads) +- http://trac.bigdata.com/ticket/708 (BIND heisenbug - race condition on select query with BIND) +- http://trac.bigdata.com/ticket/711 (sparql protocol: mime type application/sparql-query) +- http://trac.bigdata.com/ticket/712 (SELECT ?x { OPTIONAL { ?x eg:doesNotExist eg:doesNotExist } } incorrect) +- http://trac.bigdata.com/ticket/715 (Interrupt of thread submitting a query for evaluation does not always terminate the AbstractRunningQuery) +- http://trac.bigdata.com/ticket/716 (Verify that IRunningQuery instances (and nested queries) are correctly cancelled when interrupted) +- http://trac.bigdata.com/ticket/718 (HAJournalServer needs to handle ZK client connection loss) +- http://trac.bigdata.com/ticket/720 (HA3 simultaneous service start failure) +- http://trac.bigdata.com/ticket/723 (HA asynchronous tasks must be canceled when invariants are changed) +- http://trac.bigdata.com/ticket/725 (FILTER EXISTS in subselect) +- http://trac.bigdata.com/ticket/726 (Logically empty HALog for committed transaction) +- http://trac.bigdata.com/ticket/727 (DELETE/INSERT fails with OPTIONAL non-matching WHERE) +- http://trac.bigdata.com/ticket/728 (Refactor to create HAClient) +- http://trac.bigdata.com/ticket/729 (ant bundleJar not working) +- http://trac.bigdata.com/ticket/731 (CBD and Update leads to 500 status code) +- http://trac.bigdata.com/ticket/732 (describe statement limit does not work) +- http://trac.bigdata.com/ticket/733 (Range optimizer not optimizing Slice service) +- http://trac.bigdata.com/ticket/734 (two property paths interfere) +- http://trac.bigdata.com/ticket/736 (MIN() malfunction) +- http://trac.bigdata.com/ticket/737 (class cast exception) +- http://trac.bigdata.com/ticket/739 (Inconsistent treatment of bind and optional property path) +- http://trac.bigdata.com/ticket/741 (ctc-striterators should build as independent top-level project (Apache2)) +- http://trac.bigdata.com/ticket/743 (AbstractTripleStore.destroy() does not filter for correct prefix) +- http://trac.bigdata.com/ticket/746 (Assertion error) +- http://trac.bigdata.com/ticket/747 (BOUND bug) +- http://trac.bigdata.com/ticket/748 (incorrect join with subselect renaming vars) +- http://trac.bigdata.com/ticket/754 (Failure to setup SERVICE hook and changeLog for Unisolated and Read/Write connections) +- http://trac.bigdata.com/ticket/755 (Concurrent QuorumActors can interfere leading to failure to progress) +- http://trac.bigdata.com/ticket/756 (order by and group_concat) +- http://trac.bigdata.com/ticket/760 (Code review on 2-phase commit protocol) +- http://trac.bigdata.com/ticket/764 (RESYNC failure (HA)) +- http://trac.bigdata.com/ticket/770 (alpp ordering) +- http://trac.bigdata.com/ticket/772 (Query timeout only checked at operator start/stop.) +- http://trac.bigdata.com/ticket/776 (Closed as duplicate of #490) +- http://trac.bigdata.com/ticket/778 (HA Leader fail results in transient problem with allocations on other services) +- http://trac.bigdata.com/ticket/783 (Operator Alerts (HA)) + +1.2.4: + +- http://trac.bigdata.com/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://trac.bigdata.com/ticket/168 (Maven Build) +- http://trac.bigdata.com/ticket/196 (Journal leaks memory). +- http://trac.bigdata.com/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://trac.bigdata.com/ticket/312 (CI (mock) quorums deadlock) +- http://trac.bigdata.com/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://trac.bigdata.com/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://trac.bigdata.com/ticket/485 (RDFS Plus Profile) +- http://trac.bigdata.com/ticket/495 (SPARQL 1.1 Property Paths) +- http://trac.bigdata.com/ticket/519 (Negative parser tests) +- http://trac.bigdata.com/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://trac.bigdata.com/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://trac.bigdata.com/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://trac.bigdata.com/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://trac.bigdata.com/ticket/570 (MemoryManager Journal does not implement all methods). +- http://trac.bigdata.com/ticket/575 (NSS Admin API) +- http://trac.bigdata.com/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://trac.bigdata.com/ticket/578 (Concise Bounded Description (CBD)) +- http://trac.bigdata.com/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://trac.bigdata.com/ticket/583 (VoID in ServiceDescription) +- http://trac.bigdata.com/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://trac.bigdata.com/ticket/590 (nxparser fails with uppercase language tag) +- http://trac.bigdata.com/ticket/592 (Optimize RWStore allocator sizes) +- http://trac.bigdata.com/ticket/593 (Ugrade to Sesame 2.6.10) +- http://trac.bigdata.com/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://trac.bigdata.com/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://trac.bigdata.com/ticket/597 (SPARQL UPDATE LISTENER) +- http://trac.bigdata.com/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://trac.bigdata.com/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://trac.bigdata.com/ticket/600 (BlobIV collision counter hits false limit.) +- http://trac.bigdata.com/ticket/601 (Log uncaught exceptions) +- http://trac.bigdata.com/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://trac.bigdata.com/ticket/607 (History service / index) +- http://trac.bigdata.com/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://trac.bigdata.com/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://trac.bigdata.com/ticket/611 (The code that processes SPARQL Update has a typo) +- http://trac.bigdata.com/ticket/612 (Bigdata scale-up depends on zookeper) +- http://trac.bigdata.com/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://trac.bigdata.com/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://trac.bigdata.com/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://trac.bigdata.com/ticket/616 (Row store read/update not isolated on Journal) +- http://trac.bigdata.com/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://trac.bigdata.com/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://trac.bigdata.com/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://trac.bigdata.com/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://trac.bigdata.com/ticket/626 (Expose performance counters for read-only indices) +- http://trac.bigdata.com/ticket/627 (Environment variable override for NSS properties file) +- http://trac.bigdata.com/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://trac.bigdata.com/ticket/631 (ClassCastException in SIDs mode query) +- http://trac.bigdata.com/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://trac.bigdata.com/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://trac.bigdata.com/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://trac.bigdata.com/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://trac.bigdata.com/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://trac.bigdata.com/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://trac.bigdata.com/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://trac.bigdata.com/ticket/650 (Can not POST RDF to a graph using REST API) +- http://trac.bigdata.com/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://trac.bigdata.com/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://trac.bigdata.com/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://trac.bigdata.com/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://trac.bigdata.com/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://trac.bigdata.com/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://trac.bigdata.com/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://trac.bigdata.com/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://trac.bigdata.com/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://trac.bigdata.com/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://trac.bigdata.com/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://trac.bigdata.com/ticket/533 (Review materialization for inline IVs) +- http://trac.bigdata.com/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://trac.bigdata.com/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://trac.bigdata.com/ticket/541 (MemoryManaged backed Journal mode) +- http://trac.bigdata.com/ticket/546 (Index cache for Journal) +- http://trac.bigdata.com/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://trac.bigdata.com/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://trac.bigdata.com/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://trac.bigdata.com/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://trac.bigdata.com/ticket/563 (DISTINCT ORDER BY) +- http://trac.bigdata.com/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://trac.bigdata.com/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://trac.bigdata.com/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://trac.bigdata.com/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://trac.bigdata.com/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://trac.bigdata.com/ticket/92 (Monitoring webapp) +- http://trac.bigdata.com/ticket/267 (Support evaluation of 3rd party operators) +- http://trac.bigdata.com/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://trac.bigdata.com/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://trac.bigdata.com/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://trac.bigdata.com/ticket/438 (KeyBeforePartitionException on cluster) +- http://trac.bigdata.com/ticket/439 (Class loader problem) +- http://trac.bigdata.com/ticket/441 (Ganglia integration) +- http://trac.bigdata.com/ticket/443 (Logger for RWStore transaction service and recycler) +- http://trac.bigdata.com/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://trac.bigdata.com/ticket/445 (RWStore does not track tx release correctly) +- http://trac.bigdata.com/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://trac.bigdata.com/ticket/448 (SPARQL 1.1 UPDATE) +- http://trac.bigdata.com/ticket/449 (SPARQL 1.1 Federation extension) +- http://trac.bigdata.com/ticket/451 (Serialization error in SIDs mode on cluster) +- http://trac.bigdata.com/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://trac.bigdata.com/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://trac.bigdata.com/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://trac.bigdata.com/ticket/458 (Java level deadlock in DS) +- http://trac.bigdata.com/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://trac.bigdata.com/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://trac.bigdata.com/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://trac.bigdata.com/ticket/464 (Query statistics do not update correctly on cluster) +- http://trac.bigdata.com/ticket/465 (Too many GRS reads on cluster) +- http://trac.bigdata.com/ticket/469 (Sail does not flush assertion buffers before query) +- http://trac.bigdata.com/ticket/472 (acceptTaskService pool size on cluster) +- http://trac.bigdata.com/ticket/475 (Optimize serialization for query messages on cluster) +- http://trac.bigdata.com/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://trac.bigdata.com/ticket/478 (Cluster does not map input solution(s) across shards) +- http://trac.bigdata.com/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://trac.bigdata.com/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://trac.bigdata.com/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://trac.bigdata.com/ticket/484 (Java API for NanoSparqlServer REST API) +- http://trac.bigdata.com/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://trac.bigdata.com/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://trac.bigdata.com/ticket/493 (Virtual Graphs) +- http://trac.bigdata.com/ticket/496 (Sesame 2.6.3) +- http://trac.bigdata.com/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://trac.bigdata.com/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://trac.bigdata.com/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://trac.bigdata.com/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://trac.bigdata.com/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://trac.bigdata.com/ticket/504 (UNION with Empty Group Pattern) +- http://trac.bigdata.com/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://trac.bigdata.com/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://trac.bigdata.com/ticket/508 (LIMIT causes hash join utility to log errors) +- http://trac.bigdata.com/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://trac.bigdata.com/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://trac.bigdata.com/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://trac.bigdata.com/ticket/517 (Java 7 Compiler Compatibility) +- http://trac.bigdata.com/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://trac.bigdata.com/ticket/520 (CONSTRUCT WHERE shortcut) +- http://trac.bigdata.com/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://trac.bigdata.com/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://trac.bigdata.com/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://trac.bigdata.com/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://trac.bigdata.com/ticket/533 (Review materialization for inline IVs) +- http://trac.bigdata.com/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://trac.bigdata.com/ticket/23 (Lexicon joins) + - http://trac.bigdata.com/ticket/109 (Store large literals as "blobs") + - http://trac.bigdata.com/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://trac.bigdata.com/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://trac.bigdata.com/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://trac.bigdata.com/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://trac.bigdata.com/ticket/232 (Bottom-up evaluation semantics). + - http://trac.bigdata.com/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://trac.bigdata.com/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://trac.bigdata.com/ticket/261 (Lift conditions out of subqueries.) + - http://trac.bigdata.com/ticket/300 (Native ORDER BY) + - http://trac.bigdata.com/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://trac.bigdata.com/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://trac.bigdata.com/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://trac.bigdata.com/ticket/364 (Scalable default graph evaluation) + - http://trac.bigdata.com/ticket/368 (Prune variable bindings during query evaluation) + - http://trac.bigdata.com/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://trac.bigdata.com/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://trac.bigdata.com/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://trac.bigdata.com/ticket/380 (Native SPARQL evaluation on cluster) + - http://trac.bigdata.com/ticket/387 (Cluster does not compute closure) + - http://trac.bigdata.com/ticket/395 (HTree hash join performance) + - http://trac.bigdata.com/ticket/401 (inline xsd:unsigned datatypes) + - http://trac.bigdata.com/ticket/408 (xsd:string cast fails for non-numeric data) + - http://trac.bigdata.com/ticket/421 (New query hints model.) + - http://trac.bigdata.com/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://trac.bigdata.com/ticket/217 (BTreeCounters does not track bytes released) + - http://trac.bigdata.com/ticket/269 (Refactor performance counters using accessor interface) + - http://trac.bigdata.com/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://trac.bigdata.com/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://trac.bigdata.com/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://trac.bigdata.com/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://trac.bigdata.com/ticket/391 (Release age advanced on WORM mode journal) + - http://trac.bigdata.com/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://trac.bigdata.com/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://trac.bigdata.com/ticket/394 (log4j configuration error message in WAR deployment) + - http://trac.bigdata.com/ticket/399 (Add a fast range count method to the REST API) + - http://trac.bigdata.com/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://trac.bigdata.com/ticket/424 (NQuads support for NanoSparqlServer) + - http://trac.bigdata.com/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://trac.bigdata.com/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://trac.bigdata.com/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://trac.bigdata.com/ticket/435 (Address is 0L) + - http://trac.bigdata.com/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://trac.bigdata.com/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://trac.bigdata.com/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://trac.bigdata.com/ticket/356 (Query not terminated by error.) + - http://trac.bigdata.com/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://trac.bigdata.com/ticket/361 (IRunningQuery not closed promptly.) + - http://trac.bigdata.com/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://trac.bigdata.com/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://trac.bigdata.com/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://trac.bigdata.com/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://trac.bigdata.com/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://trac.bigdata.com/ticket/107 (Unicode clean schema names in the sparse row store). + - http://trac.bigdata.com/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://trac.bigdata.com/ticket/225 (OSX requires specialized performance counter collection classes). + - http://trac.bigdata.com/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://trac.bigdata.com/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://trac.bigdata.com/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://trac.bigdata.com/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://trac.bigdata.com/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://trac.bigdata.com/ticket/355 (Query failure when comparing with non materialized value). + - http://trac.bigdata.com/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://trac.bigdata.com/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://trac.bigdata.com/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://wiki.bigdata.com/wiki/index.php/Main_Page +[2] http://wiki.bigdata.com/wiki/index.php/GettingStarted +[3] http://wiki.bigdata.com/wiki/index.php/Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://wiki.bigdata.com/wiki/index.php/DataMigration +[10] http://wiki.bigdata.com/wiki/index.php/HAJournalServer +[11] http://www.bigdata.com/whitepapers/reifSPARQL.pdf +[12] http://wiki.bigdata.com/wiki/index.php/RDF_GAS_API + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-11-05 18:28:32 UTC (rev 8704) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-11-06 14:21:08 UTC (rev 8705) @@ -90,12 +90,12 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.3.3 +build.ver=1.3.4 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=true +snapshot=false # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |