From: <tho...@us...> - 2014-03-26 01:31:04
|
Revision: 8016 http://sourceforge.net/p/bigdata/code/8016 Author: thompsonbry Date: 2014-03-26 01:30:59 +0000 (Wed, 26 Mar 2014) Log Message: ----------- Bug fix for the concurrent create/drop and list of namespaces. See #867 (NSS concurrency problem with list namespaces and create namespace) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-03-24 15:41:50 UTC (rev 8015) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -82,6 +82,7 @@ import com.bigdata.rdf.sail.webapp.client.DefaultClientConnectionManagerFactory; import com.bigdata.rdf.sail.webapp.client.HttpException; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.DaemonThreadFactory; @@ -551,6 +552,21 @@ } + protected RemoteRepositoryManager getRemoteRepositoryManager(final HAGlue haGlue) + throws IOException { + + final String endpointURL = getNanoSparqlServerURL(haGlue); + + // Client for talking to the NSS. + final HttpClient httpClient = new DefaultHttpClient(ccm); + + final RemoteRepositoryManager repo = new RemoteRepositoryManager(endpointURL, + httpClient, executorService); + + return repo; + + } + /** * Counts the #of results in a SPARQL result set. * Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -0,0 +1,211 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.journal.jini.ha; + +import java.util.Properties; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import org.openrdf.model.Statement; +import org.openrdf.query.GraphQueryResult; + +import com.bigdata.ha.HAGlue; +import com.bigdata.ha.HAStatusEnum; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; + +/** + * Test case for concurrent list namespace and create namespace operations. + * <p> + * Note: The underlying issue is NOT HA specific. This test SHOULD be ported + * to the standard NSS test suite. + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ +public class TestHANamespace extends AbstractHA3JournalServerTestCase { + + public TestHANamespace() { + } + + public TestHANamespace(String name) { + super(name); + } + + /** + * Test case for concurrent list namespace and create namespace operations. + * <p> + * Note: The underlying issue is NOT HA specific. This test SHOULD be ported + * to the standard NSS test suite. + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ + public void test_ticket_867() throws Throwable { + + /* + * Controls the #of create/drop namespace operations. This many permits + * are obtained, and a permit is released each time we do a create + * namespace or drop namespace operation. + */ + final int NPERMITS = 50; + + /* + * Controls the #of queries that are executed in the main thread + * concurrent with those create/drop namespace operations. + */ + final int NQUERIES = 10; + + final String NAMESPACE_PREFIX = getName() + "-"; + + final ABC abc = new ABC(false/* simultaneous */); + + // Await quorum meet. + final long token = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + // Figure out which service is the leader. + final HAGlue leader = quorum.getClient().getLeader(token); + + // Wait until up and running as the leader. + awaitHAStatus(leader, HAStatusEnum.Leader); + + final RemoteRepositoryManager repositoryManager = getRemoteRepositoryManager(leader); + + final Semaphore awaitDone = new Semaphore(0); + + final AtomicReference<Exception> failure = new AtomicReference<Exception>(null); + + try { + + final Thread getNamespacesThread = new Thread(new Runnable() { + + @Override + public void run() { + + try { + + /* + * Create-delete namespaces with incrementing number in + * name. + */ + int n = 0; + while (true) { + + final String namespace = NAMESPACE_PREFIX + n; + + final Properties props = new Properties(); + + props.put(BigdataSail.Options.NAMESPACE, + namespace); + + if (log.isInfoEnabled()) + log.info("Creating namespace " + namespace); + + repositoryManager + .createRepository(namespace, props); + + awaitDone.release(); // release a permit. + + if (n % 2 == 0) { + + if (log.isInfoEnabled()) + log.info("Removing namespace " + namespace); + + repositoryManager.deleteRepository(namespace); + + } + + n++; + + } + + } catch (Exception e) { + failure.set(e); + } finally { + // release all permits. + awaitDone.release(NPERMITS); + } + + } + + }); + + // Start running the create/drop namespace thread. + getNamespacesThread.start(); + + try { + /* + * Run list namespace requests concurrent with the create/drop + * namespace requests. + * + * FIXME Martyn: The list namespace requests should be running + * fully asynchronously with respect to the create/drop + * namespace requests, not getting a new set of permits and then + * just running the list namespace once for those NPERMITS + * create/drop requests. The way this is setup is missing too + * many opportunities for a concurrency issue with only one list + * namespace request per 50 create/drop requests. + */ + for (int n = 0; n < NQUERIES; n++) { + awaitDone.acquire(NPERMITS); + + if (failure.get() != null) + fail("Thread failure", failure.get()); + + if (log.isInfoEnabled()) + log.info("Get namespace list..."); + + try { + + final GraphQueryResult gqres = repositoryManager + .getRepositoryDescriptions(); + int count = 0; + while (gqres.hasNext()) { + final Statement st = gqres.next(); + if (log.isInfoEnabled()) + log.info("Statement: " + st); + count++; + } + log.warn("Processed " + count + " statements"); + assertTrue(count > 0); + } catch (Exception e) { + fail("Unable to retrieve namespaces", e); + + } + } + } finally { + getNamespacesThread.interrupt(); + } + + } finally { + + // repositoryManager.shutdown(); + + } + + } +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-03-24 15:41:50 UTC (rev 8015) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -72,19 +72,14 @@ import org.openrdf.rio.RDFWriterRegistry; import org.openrdf.sail.SailException; -import com.bigdata.bop.BufferAnnotations; -import com.bigdata.bop.IPredicate; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; -import com.bigdata.bop.join.PipelineJoin; -import com.bigdata.btree.IndexMetadata; import com.bigdata.counters.CAT; import com.bigdata.io.NullOutputStream; -import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; -import com.bigdata.journal.RWStrategy; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.changesets.IChangeLog; import com.bigdata.rdf.changesets.IChangeRecord; @@ -106,9 +101,7 @@ import com.bigdata.rdf.sparql.ast.QueryType; import com.bigdata.rdf.sparql.ast.Update; import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.relation.AbstractResource; import com.bigdata.relation.RelationSchema; -import com.bigdata.rwstore.RWStore; import com.bigdata.sparse.ITPS; import com.bigdata.sparse.SparseRowStore; import com.bigdata.util.concurrent.DaemonThreadFactory; @@ -2208,273 +2201,163 @@ } /** - * Return various interesting metadata about the KB state. + * Return a list of the namespaces for the {@link AbstractTripleStore}s + * registered against the bigdata instance. * - * @todo The range counts can take some time if the cluster is heavily - * loaded since they must query each shard for the primary statement - * index and the TERM2ID index. + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> */ - protected StringBuilder getKBInfo(final String namespace, - final long timestamp) { + /*package*/ List<String> getNamespaces(final long timestamp) { + + final long tx = newTx(timestamp); + + try { + + return getNamespaces(timestamp, tx); + + } finally { + + abortTx(tx); + + } - final StringBuilder sb = new StringBuilder(); + } - BigdataSailRepositoryConnection conn = null; + private List<String> getNamespaces(long timestamp, final long tx) { - try { + if (timestamp == ITx.READ_COMMITTED) { - conn = getQueryConnection(namespace, timestamp); - - final AbstractTripleStore tripleStore = conn.getTripleStore(); + // Use the last commit point. + timestamp = getIndexManager().getLastCommitTime(); - sb.append("class\t = " + tripleStore.getClass().getName() + "\n"); + } - sb - .append("indexManager\t = " - + tripleStore.getIndexManager().getClass() - .getName() + "\n"); + // the triple store namespaces. + final List<String> namespaces = new LinkedList<String>(); - sb.append("namespace\t = " + tripleStore.getNamespace() + "\n"); + if (log.isInfoEnabled()) + log.info("getNamespaces for " + timestamp); - sb.append("timestamp\t = " - + TimestampUtility.toString(tripleStore.getTimestamp()) - + "\n"); + final SparseRowStore grs = getIndexManager().getGlobalRowStore( + timestamp); - sb.append("statementCount\t = " + tripleStore.getStatementCount() - + "\n"); + if (grs == null) { - sb.append("termCount\t = " + tripleStore.getTermCount() + "\n"); + log.warn("No GRS @ timestamp=" + + TimestampUtility.toString(timestamp)); - sb.append("uriCount\t = " + tripleStore.getURICount() + "\n"); + // Empty. + return namespaces; - sb.append("literalCount\t = " + tripleStore.getLiteralCount() + "\n"); + } - /* - * Note: The blank node count is only available when using the told - * bnodes mode. - */ - sb - .append("bnodeCount\t = " - + (tripleStore.getLexiconRelation() - .isStoreBlankNodes() ? "" - + tripleStore.getBNodeCount() : "N/A") - + "\n"); + // scan the relation schema in the global row store. + @SuppressWarnings("unchecked") + final Iterator<ITPS> itr = (Iterator<ITPS>) grs + .rangeIterator(RelationSchema.INSTANCE); - sb.append(IndexMetadata.Options.BTREE_BRANCHING_FACTOR - + "=" - + tripleStore.getSPORelation().getPrimaryIndex() - .getIndexMetadata().getBranchingFactor() + "\n"); + while (itr.hasNext()) { - sb.append(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY - + "=" - + tripleStore.getSPORelation().getPrimaryIndex() - .getIndexMetadata() - .getWriteRetentionQueueCapacity() + "\n"); + // A timestamped property value set is a logical row with + // timestamped property values. + final ITPS tps = itr.next(); - sb.append("-- All properties.--\n"); - - // get the triple store's properties from the global row store. - final Map<String, Object> properties = getIndexManager() - .getGlobalRowStore(timestamp).read(RelationSchema.INSTANCE, - namespace); + // If you want to see what is in the TPS, uncomment this. + // System.err.println(tps.toString()); - // write them out, - for (String key : properties.keySet()) { - sb.append(key + "=" + properties.get(key)+"\n"); - } + // The namespace is the primary key of the logical row for the + // relation schema. + final String namespace = (String) tps.getPrimaryKey(); - /* - * And show some properties which can be inherited from - * AbstractResource. These have been mainly phased out in favor of - * BOP annotations, but there are a few places where they are still - * in use. - */ - - sb.append("-- Interesting AbstractResource effective properties --\n"); - - sb.append(AbstractResource.Options.CHUNK_CAPACITY + "=" - + tripleStore.getChunkCapacity() + "\n"); + // Get the name of the implementation class + // (AbstractTripleStore, SPORelation, LexiconRelation, etc.) + final String className = (String) tps.get(RelationSchema.CLASS) + .getValue(); - sb.append(AbstractResource.Options.CHUNK_OF_CHUNKS_CAPACITY + "=" - + tripleStore.getChunkOfChunksCapacity() + "\n"); + if (className == null) { + // Skip deleted triple store entry. + continue; + } - sb.append(AbstractResource.Options.CHUNK_TIMEOUT + "=" - + tripleStore.getChunkTimeout() + "\n"); + try { + final Class<?> cls = Class.forName(className); + if (AbstractTripleStore.class.isAssignableFrom(cls)) { + // this is a triple store (vs something else). + namespaces.add(namespace); + } + } catch (ClassNotFoundException e) { + log.error(e, e); + } - sb.append(AbstractResource.Options.FULLY_BUFFERED_READ_THRESHOLD + "=" - + tripleStore.getFullyBufferedReadThreshold() + "\n"); + } - sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "=" - + tripleStore.getMaxParallelSubqueries() + "\n"); +// if (log.isInfoEnabled()) +// log.info("getNamespaces returning " + namespaces.size()); - /* - * And show some interesting effective properties for the KB, SPO - * relation, and lexicon relation. - */ - sb.append("-- Interesting KB effective properties --\n"); - - sb - .append(AbstractTripleStore.Options.TERM_CACHE_CAPACITY - + "=" - + tripleStore - .getLexiconRelation() - .getProperties() - .getProperty( - AbstractTripleStore.Options.TERM_CACHE_CAPACITY, - AbstractTripleStore.Options.DEFAULT_TERM_CACHE_CAPACITY) + "\n"); + return namespaces; - /* - * And show several interesting properties with their effective - * defaults. - */ + } + + /** + * Obtain a new transaction to protect operations against the specified view + * of the database. + * + * @param timestamp + * The timestamp for the desired view. + * + * @return The transaction identifier -or- <code>timestamp</code> if the + * {@link IIndexManager} is not a {@link Journal}. + * + * @see ITransactionService#newTx(long) + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> + */ + public long newTx(final long timestamp) { - sb.append("-- Interesting Effective BOP Annotations --\n"); + long tx = timestamp; // use dirty reads unless Journal. - sb.append(BufferAnnotations.CHUNK_CAPACITY - + "=" - + tripleStore.getProperties().getProperty( - BufferAnnotations.CHUNK_CAPACITY, - "" + BufferAnnotations.DEFAULT_CHUNK_CAPACITY) - + "\n"); + if (getIndexManager() instanceof Journal) { + final ITransactionService txs = ((Journal) getIndexManager()) + .getLocalTransactionManager().getTransactionService(); - sb - .append(BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY - + "=" - + tripleStore - .getProperties() - .getProperty( - BufferAnnotations.CHUNK_OF_CHUNKS_CAPACITY, - "" - + BufferAnnotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY) - + "\n"); - - sb.append(BufferAnnotations.CHUNK_TIMEOUT - + "=" - + tripleStore.getProperties().getProperty( - BufferAnnotations.CHUNK_TIMEOUT, - "" + BufferAnnotations.DEFAULT_CHUNK_TIMEOUT) - + "\n"); - - sb.append(PipelineJoin.Annotations.MAX_PARALLEL_CHUNKS - + "=" - + tripleStore.getProperties().getProperty( - PipelineJoin.Annotations.MAX_PARALLEL_CHUNKS, - "" + PipelineJoin.Annotations.DEFAULT_MAX_PARALLEL_CHUNKS) + "\n"); - - sb - .append(IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD - + "=" - + tripleStore - .getProperties() - .getProperty( - IPredicate.Annotations.FULLY_BUFFERED_READ_THRESHOLD, - "" - + IPredicate.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD) - + "\n"); - - // sb.append(tripleStore.predicateUsage()); - - if (tripleStore.getIndexManager() instanceof Journal) { - - final Journal journal = (Journal) tripleStore.getIndexManager(); - - final IBufferStrategy strategy = journal.getBufferStrategy(); - - if (strategy instanceof RWStrategy) { - - final RWStore store = ((RWStrategy) strategy).getStore(); - - store.showAllocators(sb); - - } - + try { + tx = txs.newTx(timestamp); + } catch (IOException e) { + // Note: Local operation. Will not throw IOException. + throw new RuntimeException(e); } - } catch (Throwable t) { - - log.warn(t.getMessage(), t); - - } finally { - - if(conn != null) { - try { - conn.close(); - } catch (RepositoryException e) { - log.error(e, e); - } - - } - } - return sb; - + return tx; } - /** - * Return a list of the namespaces for the {@link AbstractTripleStore}s - * registered against the bigdata instance. - */ - /*package*/ List<String> getNamespaces(final long timestamp) { - - // the triple store namespaces. - final List<String> namespaces = new LinkedList<String>(); + /** + * Abort a transaction obtained by {@link #newTx(long)}. + * + * @param tx + * The transaction identifier. + */ + public void abortTx(final long tx) { + if (getIndexManager() instanceof Journal) { +// if (!TimestampUtility.isReadWriteTx(tx)) { +// // Not a transaction. +// throw new IllegalStateException(); +// } - final SparseRowStore grs = getIndexManager().getGlobalRowStore( - timestamp); + final ITransactionService txs = ((Journal) getIndexManager()) + .getLocalTransactionManager().getTransactionService(); - if (grs == null) { + try { + txs.abort(tx); + } catch (IOException e) { + // Note: Local operation. Will not throw IOException. + throw new RuntimeException(e); + } - log.warn("No GRS @ timestamp=" - + TimestampUtility.toString(timestamp)); + } - // Empty. - return namespaces; - - } - - // scan the relation schema in the global row store. - @SuppressWarnings("unchecked") - final Iterator<ITPS> itr = (Iterator<ITPS>) grs - .rangeIterator(RelationSchema.INSTANCE); - - while (itr.hasNext()) { - - // A timestamped property value set is a logical row with - // timestamped property values. - final ITPS tps = itr.next(); - - // If you want to see what is in the TPS, uncomment this. -// System.err.println(tps.toString()); - - // The namespace is the primary key of the logical row for the - // relation schema. - final String namespace = (String) tps.getPrimaryKey(); - - // Get the name of the implementation class - // (AbstractTripleStore, SPORelation, LexiconRelation, etc.) - final String className = (String) tps.get(RelationSchema.CLASS) - .getValue(); - - if (className == null) { - // Skip deleted triple store entry. - continue; - } - - try { - final Class<?> cls = Class.forName(className); - if (AbstractTripleStore.class.isAssignableFrom(cls)) { - // this is a triple store (vs something else). - namespaces.add(namespace); - } - } catch (ClassNotFoundException e) { - log.error(e,e); - } - - } - - return namespaces; - - } - + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2014-03-24 15:41:50 UTC (rev 8015) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -343,17 +343,6 @@ // context.setAttribute(BigdataServlet.ATTRIBUTE_SPARQL_CACHE, // new SparqlCache(new MemoryManager(DirectBufferPool.INSTANCE))); - if (log.isInfoEnabled()) { - /* - * Log some information about the default kb (#of statements, etc). - */ - final long effectiveTimestamp = config.timestamp == ITx.READ_COMMITTED ? indexManager - .getLastCommitTime() : config.timestamp; - log.info("\n" - + rdfContext - .getKBInfo(config.namespace, effectiveTimestamp)); - } - { final boolean forceOverflow = Boolean.valueOf(context Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-24 15:41:50 UTC (rev 8015) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -459,30 +459,47 @@ * @throws IOException */ private void doShowProperties(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { + final HttpServletResponse resp) throws IOException { - final String namespace = getNamespace(req); + final String namespace = getNamespace(req); - final long timestamp = getTimestamp(req); + long timestamp = getTimestamp(req); - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + if (timestamp == ITx.READ_COMMITTED) { - if (tripleStore == null) { - /* - * There is no such triple/quad store instance. - */ - buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); - return; - } + // Use the last commit point. + timestamp = getIndexManager().getLastCommitTime(); - final Properties properties = PropertyUtil.flatCopy(tripleStore - .getProperties()); + } - sendProperties(req, resp, properties); - - } + final long tx = getBigdataRDFContext().newTx(timestamp); + + try { + + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(namespace, timestamp); + if (tripleStore == null) { + /* + * There is no such triple/quad store instance. + */ + buildResponse(resp, HTTP_NOTFOUND, MIME_TEXT_PLAIN); + return; + } + + final Properties properties = PropertyUtil.flatCopy(tripleStore + .getProperties()); + + sendProperties(req, resp, properties); + + } finally { + + getBigdataRDFContext().abortTx(tx); + + } + + } + /** * Generate a VoID Description for the known namespaces. */ @@ -498,51 +515,66 @@ } - /* - * The set of registered namespaces for KBs. + /** + * Protect the entire operation with a transaction, including the + * describe of each namespace that we discover. + * + * @see <a href="http://trac.bigdata.com/ticket/867"> NSS concurrency + * problem with list namespaces and create namespace </a> */ - final List<String> namespaces = getBigdataRDFContext() - .getNamespaces(timestamp); + final long tx = getBigdataRDFContext().newTx(timestamp); + + try { + /* + * The set of registered namespaces for KBs. + */ + final List<String> namespaces = getBigdataRDFContext() + .getNamespaces(timestamp); - final Graph g = new GraphImpl(); + final Graph g = new GraphImpl(); - for(String namespace : namespaces) { - - // Get a view onto that KB instance for that timestamp. - final AbstractTripleStore tripleStore = getBigdataRDFContext() - .getTripleStore(namespace, timestamp); + for (String namespace : namespaces) { - if (tripleStore == null) { + // Get a view onto that KB instance for that timestamp. + final AbstractTripleStore tripleStore = getBigdataRDFContext() + .getTripleStore(namespace, timestamp); - /* - * There is no such triple/quad store instance (could be a - * concurrent delete of the namespace). - */ - - continue; - - } + if (tripleStore == null) { - final BNode aDataSet = g.getValueFactory().createBNode(); - - /* - * Figure out the service end point. - * - * Note: This is just the requestURL as reported. This makes is - * possible to support virtual hosting and similar http proxy - * patterns since the SPARQL end point is just the URL at which the - * service is responding. - */ - final String serviceURI = req.getRequestURL().toString(); - - final VoID v = new VoID(g, tripleStore, serviceURI, aDataSet); + /* + * There is no such triple/quad store instance (could be a + * concurrent delete of the namespace). + */ - v.describeDataSet(false/* describeStatistics */, - getBigdataRDFContext().getConfig().describeEachNamedGraph); + continue; + } + + final BNode aDataSet = g.getValueFactory().createBNode(); + + /* + * Figure out the service end point. + * + * Note: This is just the requestURL as reported. This makes is + * possible to support virtual hosting and similar http proxy + * patterns since the SPARQL end point is just the URL at which + * the service is responding. + */ + final String serviceURI = req.getRequestURL().toString(); + + final VoID v = new VoID(g, tripleStore, serviceURI, aDataSet); + + v.describeDataSet( + false/* describeStatistics */, + getBigdataRDFContext().getConfig().describeEachNamedGraph); + + } + + sendGraph(req, resp, g); + + } finally { + getBigdataRDFContext().abortTx(tx); } - - sendGraph(req, resp, g); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-03-24 15:41:50 UTC (rev 8015) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java 2014-03-26 01:30:59 UTC (rev 8016) @@ -58,7 +58,6 @@ import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.DumpJournal; import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.sparql.ast.SimpleNode; import com.bigdata.rdf.sail.webapp.BigdataRDFContext.AbstractQueryTask; @@ -97,12 +96,6 @@ .getLogger(StatusServlet.class); /** - * The name of a request parameter used to request metadata about the - * default namespace. - */ - private static final String SHOW_KB_INFO = "showKBInfo"; - - /** * The name of a request parameter used to request a list of the namespaces * which could be served. */ @@ -415,9 +408,6 @@ maxBopLength = 0; } - // Information about the KB (stats, properties). - final boolean showKBInfo = req.getParameter(SHOW_KB_INFO) != null; - // bigdata namespaces known to the index manager. final boolean showNamespaces = req.getParameter(SHOW_NAMESPACES) != null; @@ -542,19 +532,10 @@ } - if (showNamespaces) { - - long timestamp = getTimestamp(req); - - if (timestamp == ITx.READ_COMMITTED) { - - // Use the last commit point. - timestamp = getIndexManager().getLastCommitTime(); - - } - + if (showNamespaces) { + final List<String> namespaces = getBigdataRDFContext() - .getNamespaces(timestamp); + .getNamespaces(getTimestamp(req)); current.node("h3", "Namespaces: "); @@ -564,16 +545,8 @@ } - } + } - if (showKBInfo) { - - // General information on the connected kb. - current.node("pre", getBigdataRDFContext().getKBInfo( - getNamespace(req), getTimestamp(req)).toString()); - - } - /* * Performance counters for the QueryEngine. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |