|
From: <tho...@us...> - 2014-06-04 15:37:18
|
Revision: 8445
http://sourceforge.net/p/bigdata/code/8445
Author: thompsonbry
Date: 2014-06-04 15:37:13 +0000 (Wed, 04 Jun 2014)
Log Message:
-----------
We have extensively modified the TestMROWTransaction test suite. This class is designed to test for problems where there is a single writer and concurrent readers. We have modified the class to force spurious failures in the BTree.writeNodeOrLeaf() method. These failures directly simulate the behavior on the system of an exception in DefaultNodeCoder.encodeLive(). That method does not have a side-effect. It either succeeds, in which case the caller applies the side-effect, or it fails, in which case there is no side effect. We have demonstrated that the Sail level rollback correctly discards the partial update of the index and that new writer threads continue to make progress and that readers do not observe errors. This effectively disproves the hypothesis that rollback() was failing to discard some state.
See #855 (AssertionError: Child does not have persistent identity)
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java
branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2014-06-04 10:55:01 UTC (rev 8444)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactions.java 2014-06-04 15:37:13 UTC (rev 8445)
@@ -1,460 +1,652 @@
-package com.bigdata.rdf.sail;
-
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.log4j.Logger;
-import org.openrdf.model.URI;
-import org.openrdf.model.impl.URIImpl;
-
-import com.bigdata.btree.IndexMetadata;
-import com.bigdata.counters.CAT;
-import com.bigdata.journal.BufferMode;
-import com.bigdata.journal.ITx;
-import com.bigdata.journal.Journal;
-import com.bigdata.rdf.axioms.NoAxioms;
-import com.bigdata.rdf.sail.BigdataSail.Options;
-import com.bigdata.rdf.store.AbstractTripleStore;
-import com.bigdata.rdf.store.BD;
-import com.bigdata.rdf.store.BigdataStatementIterator;
-import com.bigdata.rdf.vocab.NoVocabulary;
-import com.bigdata.service.AbstractTransactionService;
-import com.bigdata.util.InnerCause;
-import com.bigdata.util.concurrent.DaemonThreadFactory;
-
-abstract public class TestMROWTransactions extends ProxyBigdataSailTestCase {
-
- private static final Logger txLog = Logger.getLogger("com.bigdata.txLog");
-
- TestMROWTransactions() {
- }
-
- TestMROWTransactions(String arg0) {
- super(arg0);
- }
-
- void domultiple_csem_transaction_onethread(final int retentionMillis) throws Exception {
-
- domultiple_csem_transaction_onethread(retentionMillis, 2000, 50);
-
- }
-
- void domultiple_csem_transaction(final int retentionMillis) throws Exception {
-
- domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */,
- 1000/* nwriters */, 20 * 1000/* nreaders */);
-
- }
-
- /**
- *
- * @param retentionMillis
- * The retention time (milliseconds).
- * @param nreaderThreads
- * The #of threads running reader tasks. Increase nreaderThreads
- * to increase chance startup condition and decrement to increase
- * chance of commit point with no open read-only transaction (no
- * sessions). Value is in [1:...].
- * @param nwriters
- * The #of writer tasks (there is only one writer thread).
- * @param nreaders
- * The #of reader tasks.
- *
- * @throws Exception
- */
- void domultiple_csem_transaction2(final int retentionMillis,
- final int nreaderThreads, final int nwriters, final int nreaders)
- throws Exception {
-
- /**
- * The most likely problem is related to the session protection in the
- * RWStore. In development we saw problems when concurrent transactions
- * had reduced the open/active transactions to zero, therefore releasing
- * session protection. If the protocol works correctly we should never
- * release session protection if any transaction has been initialized.
- *
- * The message of "invalid address" would be generated if an allocation
- * has been freed and is no longer protected from recycling when an
- * attempt is made to read from it.
- *
- * TODO Experiment with different values of [nthreads] for the with and
- * w/o history variations of this test. Consider lifting that parameter
- * into the signature of this method.
- */
- final int nuris = 2000; // number of unique subject/objects
- final int npreds = 50; //
- // final PseudoRandom r = new PseudoRandom(2000);
- // r.next(1500);
- final Random r = new Random();
-
- final CAT commits = new CAT();
- final CAT nreadersDone = new CAT();
- final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null);
- // Set [true] iff there are no failures by the time we cancel the running tasks.
- final AtomicBoolean success = new AtomicBoolean(false);
- final BigdataSail sail = getSail(getProperties(retentionMillis));
- // log.warn("Journal: "+sail.getDatabase().getIndexManager()+", file="+((Journal)sail.getDatabase().getIndexManager()).getFile());
- try {
-
- sail.initialize();
- final BigdataSailRepository repo = new BigdataSailRepository(sail);
- final AbstractTripleStore origStore = repo.getDatabase();
-
- final URI[] subs = new URI[nuris];
- for (int i = 0; i < nuris; i++) {
- subs[i] = uri("uri:" + i);
- }
- final URI[] preds = new URI[npreds];
- for (int i = 0; i < npreds; i++) {
- preds[i] = uri("pred:" + i);
- }
-
- // Writer task adds nwrites statements then commits
- class Writer implements Callable<Long> {
- final int nwrites;
-
- Writer(final int nwrites) {
- this.nwrites = nwrites;
- }
-
- public Long call() throws Exception {
- try {
- final boolean isQuads = origStore.isQuads();
- // Thread.sleep(r.nextInt(2000) + 500);
- try {
-
- for (int i = 0; i < nwrites; i++) {
- origStore
- .addStatement(
- subs[r.nextInt(nuris)],
- preds[r.nextInt(npreds)],
- subs[r.nextInt(nuris)],
- isQuads ? subs[r.nextInt(nuris)]
- : null);
- // System.out.print('.');
- }
- // System.out.println("\n");
- commits.increment();
-
- } finally {
- origStore.commit();
- if (log.isInfoEnabled()) {
- log.info("Commit #" + commits);
- }
- }
- } catch (Throwable ise) {
- if (!InnerCause.isInnerCause(ise,
- InterruptedException.class)) {
- if (failex
- .compareAndSet(null/* expected */, ise/* newValue */)) {
- log.error("firstCause:" + ise, ise);
- } else {
- if (log.isInfoEnabled())
- log.info("Other error: " + ise, ise);
- }
- } else {
- // Ignore.
- }
- }
- return null;
- }
-
- }
-
- // ReaderTask makes nreads and closes
- class Reader implements Callable<Long> {
- final int nreads;
-
- Reader(final int nwrites) {
- this.nreads = nwrites;
- }
-
- public Long call() throws Exception {
- try {
- final Long txId = ((Journal) origStore
- .getIndexManager()).newTx(ITx.READ_COMMITTED);
-
- try {
- /*
- * Note: This sleep makes it much easier to hit the
- * bug documented here. However, the sleep can also
- * cause the test to really stretch out. So the
- * sleep is only used until the writers are done.
- *
- * https://sourceforge.net/apps/trac/bigdata/ticket/467
- */
- if (commits.get() < nwriters)
- Thread.sleep(2000/* millis */);
- txLog.info("Reading with tx: " + txId);
- final AbstractTripleStore readstore = (AbstractTripleStore) origStore
- .getIndexManager().getResourceLocator()
- .locate(origStore.getNamespace(), txId);
-
- for (int i = 0; i < nreads; i++) {
- final BigdataStatementIterator stats = readstore
- .getStatements(subs[r.nextInt(nuris)],
- null, null);
- try {
- while (stats.hasNext()) {
- stats.next();
- }
- } finally {
- stats.close();
- }
- }
-
- txLog.info("Finished with tx: " + txId);
- } catch (IllegalStateException ise) {
- txLog.info("IllegalStateException tx: " + txId);
- failex.compareAndSet(null, ise);
- } catch (Exception e) {
- txLog.info("UnexpectedException tx: " + txId);
- failex.compareAndSet(null, e);
- throw e;
- } finally {
- txLog.info("Aborting tx: " + txId);
- ((Journal) origStore.getIndexManager()).abort(txId);
- nreadersDone.increment();
- }
- } catch (Throwable ise) {
- if (!InnerCause.isInnerCause(ise,
- InterruptedException.class)) {
- if (failex
- .compareAndSet(null/* expected */, ise/* newValue */)) {
- log.error("firstCause:" + ise, ise);
- } else {
- if (log.isInfoEnabled())
- log.info("Other error: " + ise, ise);
- }
- } else {
- // Ignore.
- }
- }
- return null;
- }
-
- }
-
- ExecutorService writers = null;
- ExecutorService readers = null;
- try {
-
- writers = Executors
- .newSingleThreadExecutor(new DaemonThreadFactory(
- "test-writer-pool"));
-
- readers = Executors.newFixedThreadPool(nreaderThreads,
- new DaemonThreadFactory("test-reader-pool"));
-
- // let's schedule a few writers and readers (more than needed)
- // writers.submit(new Writer(5000000/* nwrite */));
- Future<Long> lastWriterFuture = null;
- Future<Long> lastReaderFuture = null;
- for (int i = 0; i < nwriters; i++) {
- lastWriterFuture = writers
- .submit(new Writer(500/* nwrite */));
- }
- for (int rdrs = 0; rdrs < nreaders; rdrs++) {
- lastReaderFuture = readers
- .submit(new Reader(60/* nread */));
- }
-
- // let the writers run riot for a time, checking for failure
- while (true) {
- final boolean bothDone = lastWriterFuture.isDone()
- && lastReaderFuture.isDone();
- if (bothDone)
- break;
- if (failex.get() != null) {
- // Something errored.
- break;
- }
- Thread.sleep(1000/* ms */);
- }
- // for (int i = 0; i < 600; i++) {
- // Thread.sleep(1000);
- // if (failex.get() != null)
- // break;
- // }
- if (failex.get() == null) {
- /*
- * Note whether or not there are failures before we
- * interrupt the running tasks.
- */
- success.set(true);
- }
- writers.shutdownNow();
- readers.shutdownNow();
- writers.awaitTermination(5, TimeUnit.SECONDS);
- readers.awaitTermination(5, TimeUnit.SECONDS);
- if (!success.get()) {
- final Throwable ex = failex.get();
- if (ex != null) {
- fail("Test failed: firstCause=" + ex
- + ", retentionMillis=" + retentionMillis
- + ", nreaderThreads=" + nreaderThreads
- + ", nwriters=" + nwriters + ", nreaders="
- + nreaders + ", indexManager="
- + repo.getDatabase().getIndexManager(), ex);
- }
- }
- if (log.isInfoEnabled())
- log.info("Writers committed: " + commits.get()
- + ", readers done: " + nreadersDone.get());
- } finally {
- if (writers != null)
- writers.shutdownNow();
- if (readers != null)
- readers.shutdownNow();
- }
- } finally {
-
- sail.__tearDownUnitTest();
-
- }
-
- }
-
- void domultiple_csem_transaction_onethread(final int retention, final int nuris, final int npreds) throws Exception {
-
- // final PseudoRandom r = new PseudoRandom(20000 /*10000*/);
- final Random r = new Random();
-
- final CAT writes = new CAT();
- final CAT reads = new CAT();
-// final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null);
- // Set [true] iff there are no failures by the time we cancel the
- // running tasks.
- // final AtomicBoolean success = new AtomicBoolean(false);
- final BigdataSail sail = getSail(getProperties(retention));
- try {
-
- sail.initialize();
- final BigdataSailRepository repo = new BigdataSailRepository(sail);
- final AbstractTripleStore origStore = repo.getDatabase();
-
- final URI[] subs = new URI[nuris];
- for (int i = 0; i < nuris; i++) {
- subs[i] = uri("uri:" + i);
- }
- final URI[] preds = new URI[npreds + 20];
- for (int i = 0; i < npreds; i++) {
- preds[i] = uri("pred:" + i);
- }
- final int nwrites = 600;
- final int nreads = 50;
- final int ntrials = 20;
- final boolean isQuads = origStore.isQuads();
-
- for (int loop = 0; loop < ntrials; loop++) {
- final Long txId = ((Journal) origStore.getIndexManager())
- .newTx(ITx.READ_COMMITTED);
- try {
- // System.err.println("READ_STATE: " + txId);
- final AbstractTripleStore readstore = (AbstractTripleStore) origStore
- .getIndexManager().getResourceLocator()
- .locate(origStore.getNamespace(), txId);
- for (int i = 0; i < nreads; i++) {
- final BigdataStatementIterator stats = readstore
- // .getStatements(subs[nuris/2 + loop], null,
- // null);
- .getStatements(subs[r.nextInt(nuris)], null,
- null);
- try {
- while (stats.hasNext()) {
- stats.next();
- reads.increment();
- }
- } finally {
- stats.close();
- }
- }
-
- // Thread.sleep(r.nextInt(1000) + 500);
- try {
-
- for (int i = 0; i < nwrites; i++) {
- origStore.addStatement(subs[r.nextInt(nuris)],
- preds[r.nextInt(npreds)],
- subs[r.nextInt(nuris)],
- isQuads ? subs[r.nextInt(nuris)] : null);
- // origStore.addStatement(subs[nuris/2 + loop],
- // preds[npreds/2 + loop],
- // subs[nuris/2 - loop],
- // isQuads ? subs[nuris/2 + loop] : null);
- writes.increment();
- // System.out.print('.');
- }
- // System.out.println("\n");
-
- } finally {
- origStore.commit();
- log.warn("Commit: " + loop);
- // if (log.isInfoEnabled())
- // log.info("Commit");
- }
- // Close Read Connection
- ((Journal) readstore.getIndexManager()).abort(txId);
-
- } catch (Throwable ise) {
- log.error("firstCause:" + ise, ise);
- throw new Exception(ise);
- }
- }
-
- } finally {
-
- sail.__tearDownUnitTest();
-
- }
-
- }
-
- protected URI uri(String s) {
- return new URIImpl(BD.NAMESPACE + s);
- }
-
- @Override
- public Properties getProperties() {
-
- Properties props = super.getProperties();
-
- props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true");
- props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
- props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName());
- props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName());
- props.setProperty(BigdataSail.Options.JUSTIFY, "false");
- props.setProperty(BigdataSail.Options.TEXT_INDEX, "false");
- // props.setProperty(Options.WRITE_CACHE_BUFFER_COUNT, "3");
-
- // ensure using RWStore
- props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString());
- // props.setProperty(RWStore.Options.MAINTAIN_BLACKLIST, "false");
- // props.setProperty(RWStore.Options.OVERWRITE_DELETE, "true");
- // props.setProperty(Options.CREATE_TEMP_FILE, "false");
- // props.setProperty(Options.FILE, "/Volumes/SSDData/csem.jnl");
-
- // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "20");
- // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "0");
- props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "500");
- props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "10");
-
- return props;
-
- }
-
- protected Properties getProperties(int retention) {
- final Properties props = getProperties();
- props.setProperty(AbstractTransactionService.Options.MIN_RELEASE_AGE, "" + retention);
-
- return props;
- }
-
-}
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on Dec 19, 2006
+ */
+package com.bigdata.rdf.sail;
+
+import info.aduna.iteration.CloseableIteration;
+
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.openrdf.model.Resource;
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.model.Value;
+import org.openrdf.model.impl.URIImpl;
+import org.openrdf.sail.SailException;
+
+import com.bigdata.btree.AbstractNode;
+import com.bigdata.btree.BTree;
+import com.bigdata.btree.Checkpoint;
+import com.bigdata.btree.IndexMetadata;
+import com.bigdata.counters.CAT;
+import com.bigdata.journal.BufferMode;
+import com.bigdata.rawstore.IRawStore;
+import com.bigdata.rdf.axioms.NoAxioms;
+import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection;
+import com.bigdata.rdf.sail.BigdataSail.Options;
+import com.bigdata.rdf.store.BD;
+import com.bigdata.rdf.vocab.NoVocabulary;
+import com.bigdata.service.AbstractTransactionService;
+import com.bigdata.util.InnerCause;
+import com.bigdata.util.concurrent.DaemonThreadFactory;
+
+/**
+ * TestCase to test single writer/mutiple transaction committed readers with
+ * SAIL interface.
+ *
+ * @author Martyn Cutcher
+ */
+abstract public class TestMROWTransactions extends ProxyBigdataSailTestCase {
+
+// private static final Logger txLog = Logger.getLogger("com.bigdata.txLog");
+
+ TestMROWTransactions() {
+ }
+
+ TestMROWTransactions(final String arg0) {
+ super(arg0);
+ }
+
+// void domultiple_csem_transaction_onethread(final int retentionMillis) throws Exception {
+//
+// domultiple_csem_transaction_onethread(retentionMillis, 2000, 50);
+//
+// }
+//
+// void domultiple_csem_transaction(final int retentionMillis) throws Exception {
+//
+// domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */,
+// 1000/* nwriters */, 20 * 1000/* nreaders */);
+//
+// }
+
+ /**
+ *
+ * @param retentionMillis
+ * The retention time (milliseconds).
+ * @param nreaderThreads
+ * The #of threads running reader tasks. Increase nreaderThreads
+ * to increase chance startup condition and decrement to increase
+ * chance of commit point with no open read-only transaction (no
+ * sessions). Value is in [1:...].
+ * @param nwriters
+ * The #of writer tasks (there is only one writer thread).
+ * @param nreaders
+ * The #of reader tasks.
+ * @param isolatableIndice
+ * When <code>true</code> the writers will use read/write
+ * transactions. Otherwise they will use the unisolated
+ * connection.
+ * @throws Exception
+ */
+ void domultiple_csem_transaction2(final int retentionMillis,
+ final int nreaderThreads, final int nwriters, final int nreaders,
+ final boolean isolatableIndices) throws Exception {
+
+ if (log.isInfoEnabled()) {
+ log.info("=================================================================================");
+ log.info("retentionMillis=" + retentionMillis + ", nreaderThreads="
+ + nreaderThreads + ", nwriters=" + nwriters + ", nreaders="
+ + nreaders + ", isolatableIndices=" + isolatableIndices);
+ log.info("=================================================================================");
+ }
+
+ /**
+ * The most likely problem is related to the session protection in the
+ * RWStore. In development we saw problems when concurrent transactions
+ * had reduced the open/active transactions to zero, therefore releasing
+ * session protection. If the protocol works correctly we should never
+ * release session protection if any transaction has been initialized.
+ *
+ * The message of "invalid address" would be generated if an allocation
+ * has been freed and is no longer protected from recycling when an
+ * attempt is made to read from it.
+ *
+ * TODO Experiment with different values of [nthreads] for the with and
+ * w/o history variations of this test. Consider lifting that parameter
+ * into the signature of this method.
+ */
+ final int nuris = 2000; // number of unique subject/objects
+ final int npreds = 50; //
+ // final PseudoRandom r = new PseudoRandom(2000);
+ // r.next(1500);
+ final Random r = new Random();
+
+ final int maxAborts = 100;
+
+ final CAT commits = new CAT();
+ final CAT aborts = new CAT();
+ final CAT nreadersDone = new CAT();
+ final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null);
+ // Set [true] iff there are no failures by the time we cancel the running tasks.
+ final AtomicBoolean success = new AtomicBoolean(false);
+ final BigdataSail sail = getSail(getProperties(retentionMillis,
+ isolatableIndices));
+ // log.warn("Journal: "+sail.getDatabase().getIndexManager()+", file="+((Journal)sail.getDatabase().getIndexManager()).getFile());
+ try {
+
+ sail.initialize();
+ // TODO Force an initial commit?
+
+// final BigdataSailRepository repo = new BigdataSailRepository(sail);
+// final AbstractTripleStore origStore = repo.getDatabase();
+
+ final URI[] subs = new URI[nuris];
+ for (int i = 0; i < nuris; i++) {
+ subs[i] = uri("uri:" + i);
+ }
+ final URI[] preds = new URI[npreds];
+ for (int i = 0; i < npreds; i++) {
+ preds[i] = uri("pred:" + i);
+ }
+
+ ExecutorService writers = null;
+ ExecutorService readers = null;
+ try {
+
+ writers = Executors
+ .newSingleThreadExecutor(new DaemonThreadFactory(
+ "test-writer-pool"));
+
+ readers = Executors.newFixedThreadPool(nreaderThreads,
+ new DaemonThreadFactory("test-reader-pool"));
+
+ // let's schedule a few writers and readers (more than needed)
+ // writers.submit(new Writer(5000000/* nwrite */));
+ Future<Long> lastWriterFuture = null;
+ @SuppressWarnings("unused")
+ Future<Long> lastReaderFuture = null;
+
+ for (int i = 0; i < nwriters; i++) {
+
+ lastWriterFuture = writers.submit(new Writer(r,
+ 500/* nwrites */, sail, commits, aborts,
+ maxAborts, failex, subs, preds));
+
+ }
+
+ for (int rdrs = 0; rdrs < nreaders; rdrs++) {
+
+ lastReaderFuture = readers.submit(new Reader(r,
+ 60/* nread */, nwriters, sail, failex,
+ commits, nreadersDone, subs));
+
+ }
+
+ // let the writers run riot for a time, checking for failure
+ while (true) {
+// final boolean bothDone = lastWriterFuture.isDone()
+// && lastReaderFuture.isDone();
+// if (bothDone)
+// break;
+ if(lastWriterFuture.isDone()) {
+ // End test when the writers are done.
+ break;
+ }
+ if (failex.get() != null) {
+ // Something errored.
+ break;
+ }
+ Thread.sleep(250/* ms */);
+ }
+ if (failex.get() == null) {
+ /*
+ * Note whether or not there are failures before we
+ * interrupt the running tasks.
+ */
+ success.set(true);
+ }
+ writers.shutdownNow();
+ readers.shutdownNow();
+ writers.awaitTermination(5, TimeUnit.SECONDS);
+ readers.awaitTermination(5, TimeUnit.SECONDS);
+ if (!success.get()) {
+ final Throwable ex = failex.get();
+ if (ex != null) {
+ fail("Test failed: firstCause=" + ex
+ + ", retentionMillis=" + retentionMillis
+ + ", nreaderThreads=" + nreaderThreads
+ + ", nwriters=" + nwriters + ", nreaders="
+ + nreaders + ", indexManager="
+ + sail.getDatabase().getIndexManager(), ex);
+ }
+ }
+ if (log.isInfoEnabled())
+ log.info("Writers committed: " + commits.get()
+ + ", writers aborted: " + aborts.get()
+ + ", readers done: " + nreadersDone.get());
+ } finally {
+ if (writers != null)
+ writers.shutdownNow();
+ if (readers != null)
+ readers.shutdownNow();
+ }
+ } finally {
+ try {
+ sail.__tearDownUnitTest();
+ } catch (Throwable t) {
+ /*
+ * FIXME The test helper tear down should not throw anything,
+ * but it can do so if a tx has been asynchronously closed. This
+ * has to do with the logic that openrdf uses to close open
+ * transactions when the sail is shutdown by the caller.
+ */
+ log.error("Problem with test shutdown: " + t, t);
+ }
+
+ }
+
+ }
+
+ /** Writer task adds nwrites statements then commits */
+ static private class Writer implements Callable<Long> {
+
+ final Random r;
+ final int nwrites;
+ final BigdataSail sail;
+ final CAT commits;
+ final CAT aborts;
+ final int maxAborts;
+ final AtomicReference<Throwable> failex;
+ final int nuris;
+ final int npreds;
+ final URI[] subs;
+ final URI[] preds;
+
+ Writer(final Random r, final int nwrites,
+ final BigdataSail sail, final CAT commits,
+ final CAT aborts, final int maxAborts,
+ final AtomicReference<Throwable> failex, final URI[] subs,
+ final URI[] preds) {
+
+ this.r = r;
+ this.nwrites = nwrites;
+ this.sail = sail;
+ this.commits = commits;
+ this.aborts = aborts;
+ this.maxAborts = maxAborts;
+ this.failex = failex;
+ this.nuris = subs.length;
+ this.npreds = preds.length;
+ this.subs = subs;
+ this.preds = preds;
+
+ }
+
+ @Override
+ public Long call() throws Exception {
+ final boolean isQuads = sail.isQuads();
+ // Thread.sleep(r.nextInt(2000) + 500);
+ BigdataSailConnection con = null;
+ boolean ok = false;
+ try {
+ con = sail.getConnection();
+ for (int i = 0; i < nwrites; i++) {
+ con.addStatement(subs[r.nextInt(nuris)],
+ preds[r.nextInt(npreds)], subs[r.nextInt(nuris)],
+ isQuads ? subs[r.nextInt(nuris)] : null);
+ // System.out.print('.');
+ }
+ // System.out.println("\n");
+ con.commit();
+ ok = true;
+ commits.increment();
+ if (log.isInfoEnabled())
+ log.info("Commit #" + commits);
+
+ } catch (Throwable ise) {
+ log.warn(ise, ise);
+ if (InnerCause.isInnerCause(ise, InterruptedException.class)) {
+ // ignore
+ } else if (InnerCause.isInnerCause(ise, MyBTreeException.class)
+ && aborts.get() < maxAborts) {
+ // ignore
+ } else {
+ // Set the first cause (but not for the forced abort).
+ if (failex
+ .compareAndSet(null/* expected */, ise/* newValue */)) {
+ log.error("firstCause:" + ise, ise);
+ }
+ }
+ } finally {
+ if (con != null) {
+ if (!ok) {
+ con.rollback();
+ aborts.increment();
+ log.error("Abort #" + aborts + " (with "
+ + commits.get() + " commits)");
+ }
+ con.close();
+ }
+ }
+ return null;
+ }
+
+ } // Writer
+
+ /** ReaderTask makes nreads and closes. */
+ private static class Reader implements Callable<Long> {
+
+ final Random r;
+ final int nreads;
+ final int nwriters;
+ final BigdataSail sail;
+ final AtomicReference<Throwable> failex;
+ final CAT commits;
+ final CAT nreadersDone;
+ final int nuris;
+ final URI[] subs;
+
+ Reader(final Random r, final int nreads, final int nwriters,
+ final BigdataSail sail,
+ final AtomicReference<Throwable> failex, final CAT commits,
+ final CAT nreadersDone, final URI[] subs) {
+ this.r = r;
+ this.nreads = nreads;
+ this.nwriters = nwriters;
+ this.sail = sail;
+ this.failex = failex;
+ this.commits = commits;
+ this.nreadersDone = nreadersDone;
+ this.nuris = subs.length;
+ this.subs = subs;
+ }
+
+ @Override
+ public Long call() throws Exception {
+ BigdataSailConnection con = null;
+ try {
+ con = sail.getReadOnlyConnection();
+ /*
+ * Note: This sleep makes it much easier to hit the bug
+ * documented here. However, the sleep can also cause the test
+ * to really stretch out. So the sleep is only used until the
+ * writers are done.
+ *
+ * https://sourceforge.net/apps/trac/bigdata/ticket/467
+ */
+ if (commits.get() < Math.max(nwriters, 5))
+ Thread.sleep(2000/* millis */);
+
+ for (int i = 0; i < nreads; i++) {
+ final CloseableIteration<? extends Statement, SailException> stats = con
+ .getStatements(subs[r.nextInt(nuris)], (URI) null,
+ (Value) null, (Resource) null);
+ try {
+ while (stats.hasNext()) {
+ stats.next();
+ }
+ } finally {
+ stats.close();
+ }
+ }
+ } catch (Throwable ise) {
+ if (InnerCause.isInnerCause(ise, InterruptedException.class)) {
+ // Ignore.
+ } else {
+ if (failex
+ .compareAndSet(null/* expected */, ise/* newValue */)) {
+ log.error("firstCause:" + ise, ise);
+ } else {
+ if (log.isInfoEnabled())
+ log.info("Other error: " + ise, ise);
+ }
+ }
+ } finally {
+ if (con != null) {
+ con.rollback();
+ con.close();
+ }
+ nreadersDone.increment();
+ }
+ return null;
+ }
+
+ } // Reader
+
+
+// void domultiple_csem_transaction_onethread(final int retention, final int nuris, final int npreds) throws Exception {
+//
+// // final PseudoRandom r = new PseudoRandom(20000 /*10000*/);
+// final Random r = new Random();
+//
+// final CAT writes = new CAT();
+// final CAT reads = new CAT();
+//// final AtomicReference<Throwable> failex = new AtomicReference<Throwable>(null);
+// // Set [true] iff there are no failures by the time we cancel the
+// // running tasks.
+// // final AtomicBoolean success = new AtomicBoolean(false);
+// final boolean isolatableIndices = false;
+// final BigdataSail sail = getSail(getProperties(retention,isolatableIndices));
+// try {
+//
+// sail.initialize();
+// final BigdataSailRepository repo = new BigdataSailRepository(sail);
+// final AbstractTripleStore origStore = repo.getDatabase();
+//
+// final URI[] subs = new URI[nuris];
+// for (int i = 0; i < nuris; i++) {
+// subs[i] = uri("uri:" + i);
+// }
+// final URI[] preds = new URI[npreds + 20];
+// for (int i = 0; i < npreds; i++) {
+// preds[i] = uri("pred:" + i);
+// }
+// final int nwrites = 600;
+// final int nreads = 50;
+// final int ntrials = 20;
+// final boolean isQuads = origStore.isQuads();
+//
+// for (int loop = 0; loop < ntrials; loop++) {
+// final Long txId = ((Journal) origStore.getIndexManager())
+// .newTx(ITx.READ_COMMITTED);
+// try {
+// // System.err.println("READ_STATE: " + txId);
+// final AbstractTripleStore readstore = (AbstractTripleStore) origStore
+// .getIndexManager().getResourceLocator()
+// .locate(origStore.getNamespace(), txId);
+// for (int i = 0; i < nreads; i++) {
+// final BigdataStatementIterator stats = readstore
+// // .getStatements(subs[nuris/2 + loop], null,
+// // null);
+// .getStatements(subs[r.nextInt(nuris)], null,
+// null);
+// try {
+// while (stats.hasNext()) {
+// stats.next();
+// reads.increment();
+// }
+// } finally {
+// stats.close();
+// }
+// }
+//
+// // Thread.sleep(r.nextInt(1000) + 500);
+// try {
+//
+// for (int i = 0; i < nwrites; i++) {
+// origStore.addStatement(subs[r.nextInt(nuris)],
+// preds[r.nextInt(npreds)],
+// subs[r.nextInt(nuris)],
+// isQuads ? subs[r.nextInt(nuris)] : null);
+// // origStore.addStatement(subs[nuris/2 + loop],
+// // preds[npreds/2 + loop],
+// // subs[nuris/2 - loop],
+// // isQuads ? subs[nuris/2 + loop] : null);
+// writes.increment();
+// // System.out.print('.');
+// }
+// // System.out.println("\n");
+//
+// } finally {
+// origStore.commit();
+// log.warn("Commit: " + loop);
+// // if (log.isInfoEnabled())
+// // log.info("Commit");
+// }
+// // Close Read Connection
+// ((Journal) readstore.getIndexManager()).abort(txId);
+//
+// } catch (Throwable ise) {
+// log.error("firstCause:" + ise, ise);
+// throw new Exception(ise);
+// }
+// }
+//
+// } finally {
+//
+// sail.__tearDownUnitTest();
+//
+// }
+//
+// }
+
+ protected URI uri(String s) {
+ return new URIImpl(BD.NAMESPACE + s);
+ }
+
+ @Override
+ public Properties getProperties() {
+
+ final Properties props = super.getProperties();
+
+ props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
+ props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName());
+ props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName());
+ props.setProperty(BigdataSail.Options.JUSTIFY, "false");
+ props.setProperty(BigdataSail.Options.TEXT_INDEX, "false");
+ // props.setProperty(Options.WRITE_CACHE_BUFFER_COUNT, "3");
+
+ // ensure using RWStore
+ props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString());
+ // props.setProperty(RWStore.Options.MAINTAIN_BLACKLIST, "false");
+ // props.setProperty(RWStore.Options.OVERWRITE_DELETE, "true");
+ // props.setProperty(Options.CREATE_TEMP_FILE, "false");
+ // props.setProperty(Options.FILE, "/Volumes/SSDData/csem.jnl");
+
+ // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "20");
+ // props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "0");
+ props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_CAPACITY, "500");
+ props.setProperty(IndexMetadata.Options.WRITE_RETENTION_QUEUE_SCAN, "10");
+
+ return props;
+
+ }
+
+ protected Properties getProperties(final int retention,
+ final boolean isolatableIndices) {
+
+ final Properties props = getProperties();
+
+ props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES,
+ Boolean.toString(isolatableIndices));
+
+ props.setProperty(AbstractTransactionService.Options.MIN_RELEASE_AGE,
+ "" + retention);
+
+ final boolean isQuads = Boolean.valueOf(props.getProperty(
+ Options.QUADS_MODE, "false"));
+
+ /**
+ * Force override of the BTree on one index to occasionally prompt
+ * errors during the test run.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError:
+ * Child does not have persistent identity </a>.
+ */
+ if (!isolatableIndices) {
+ /*
+ * Note: if this is used with read/write tx for the updates then we
+ * do not observe the desired exception in the Writer class when we
+ * call con.commit(). This causes the test to fail, but it is
+ * failing in an uninteresting manner. Hence, the forced abort of
+ * the B+Tree update is only present at this time for the unisolated
+ * indices. This is where the problem is reported for ticket #855.
+ */
+ final String name = isQuads ? "SPOC" : "SPO";
+ props.setProperty("com.bigdata.namespace.kb.spo." + name
+ + ".com.bigdata.btree.BTree.className",
+ MyBTree.class.getName());
+ }
+ return props;
+ }
+
+ /**
+ * Helper class for force abort of a B+Tree write.
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child
+ * does not have persistent identity </a>.
+ */
+ public static class MyBTree extends BTree {
+
+ private final Random r = new Random(12L);
+
+ public MyBTree(IRawStore store, Checkpoint checkpoint,
+ IndexMetadata metadata, boolean readOnly) {
+
+ super(store, checkpoint, metadata, readOnly);
+
+ }
+
+ @Override
+ protected long writeNodeOrLeaf(final AbstractNode<?> node) {
+
+ if (node.isLeaf() && r.nextInt(500) == 0) {
+
+ throw new MyBTreeException("Forcing abort: " + this);
+
+ }
+
+ final long addr = super.writeNodeOrLeaf(node);
+
+ return addr;
+
+ }
+
+ }
+ /** Marker exception for a force abort of a B+Tree write. */
+ private static class MyBTreeException extends RuntimeException {
+
+ public MyBTreeException(final String string) {
+ super(string);
+ }
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+
+ }
+
+}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java 2014-06-04 10:55:01 UTC (rev 8444)
+++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsNoHistory.java 2014-06-04 15:37:13 UTC (rev 8445)
@@ -1,3 +1,29 @@
+/**
+
+Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
+
+Contact:
+ SYSTAP, LLC
+ 4501 Tower Road
+ Greensboro, NC 27410
+ lic...@bi...
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*
+ * Created on Dec 19, 2006
+ */
package com.bigdata.rdf.sail;
import java.util.Random;
@@ -7,106 +33,142 @@
* SAIL interface.
*
* @author Martyn Cutcher
- *
*/
public class TestMROWTransactionsNoHistory extends TestMROWTransactions {
- /**
- *
- */
- public TestMROWTransactionsNoHistory() {
- }
+ public TestMROWTransactionsNoHistory() {
+ }
- /**
- * @param arg0
- */
- public TestMROWTransactionsNoHistory(String arg0) {
- super(arg0);
- }
+ public TestMROWTransactionsNoHistory(final String arg0) {
+ super(arg0);
+ }
- @Override
+ @Override
protected void setUp() throws Exception {
super.setUp();
}
-
+
@Override
protected void tearDown() throws Exception {
super.tearDown();
}
-
-// // similar to test_multiple_transactions but uses direct AbsractTripleStore
-// // manipulations rather than RepositoryConnections
-// public void test_multiple_csem_transaction_nohistory() throws Exception {
-//
-//// domultiple_csem_transaction(0);
-//
-// domultiple_csem_transaction2(0/* retentionMillis */,
-// 2/* nreaderThreads */, 1000/* nwriters */, 20 * 1000/* nreaders */);
-//
-// }
-//
-// public void test_multiple_csem_transaction_nohistory_oneReaderThread() throws Exception {
-//
-// domultiple_csem_transaction2(0/* retentionMillis */,
-// 1/* nreaderThreads */, 1000/* nwriters */, 20 * 1000/* nreaders */);
-//
-// }
-
- public void test_multiple_csem_transaction_nohistory_stress() throws Exception {
+
+ /**
+ * I do observe problems with the "no-history" version of this test. The
+ * RWStore has known issues and a minimum retention time of zero is not
+ * supported at this time.
+ *
+ * <pre>
+ * junit.framework.AssertionFailedError: Test failed: firstCause=java.lang.RuntimeException: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220}, retentionMillis=0, nreaderThreads=19, nwriters=100, nreaders=400, indexManager=com.bigdata.journal.Journal@327556d1
+ * at junit.framework.TestCase2.fail(TestCase2.java:90)
+ * at com.bigdata.rdf.sail.TestMROWTransactions.domultiple_csem_transaction2(TestMROWTransactions.java:237)
+ * at com.bigdata.rdf.sail.TestMROWTransactionsNoHistory.test_multiple_csem_transaction_no_history_stress(TestMROWTransactionsNoHistory.java:66)
+ * at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+ * at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
+ * at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+ * at java.lang.reflect.Method.invoke(Method.java:606)
+ * at junit.framework.TestCase.runTest(TestCase.java:154)
+ * at junit.framework.TestCase.runBare(TestCase.java:127)
+ * at junit.framework.TestResult$1.protect(TestResult.java:106)
+ * at junit.framework.TestResult.runProtected(TestResult.java:124)
+ * at junit.framework.TestResult.run(TestResult.java:109)
+ * at junit.framework.TestCase.run(TestCase.java:118)
+ * at junit.framework.TestSuite.runTest(TestSuite.java:208)
+ * at junit.framework.TestSuite.run(TestSuite.java:203)
+ * at org.eclipse.jdt.internal.junit.runner.junit3.JUnit3TestReference.run(JUnit3TestReference.java:130)
+ * at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38)
+ * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467)
+ * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683)
+ * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390)
+ * at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197)
+ * Caused by: java.lang.RuntimeException: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220}
+ * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1861)
+ * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1722)
+ * at com.bigdata.rdf.store.AbstractTripleStore.getAccessPath(AbstractTripleStore.java:2868)
+ * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3534)
+ * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3470)
+ * at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.getStatements(BigdataSail.java:3433)
+ * at com.bigdata.rdf.sail.TestMROWTransactions$Reader.call(TestMROWTransactions.java:404)
+ * at com.bigdata.rdf.sail.TestMROWTransactions$Reader.call(TestMROWTransactions.java:1)
+ * at java.util.concurrent.FutureTask.run(FutureTask.java:262)
+ * at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
+ * at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
+ * at java.lang.Thread.run(Thread.java:745)
+ * Caused by: java.lang.RuntimeException: Could not load Checkpoint: store=com.bigdata.journal.Journal@327556d1, addrCheckpoint={off=852288,len=220}
+ * at com.bigdata.btree.Checkpoint.loadFromCheckpoint(Checkpoint.java:756)
+ * at com.bigdata.journal.AbstractJournal.getIndexWithCheckpointAddr(AbstractJournal.java:5288)
+ * at com.bigdata.journal.AbstractJournal.getIndexWithCommitRecord(AbstractJournal.java:5135)
+ * at com.bigdata.journal.AbstractJournal.getIndexLocal(AbstractJournal.java:5005)
+ * at com.bigdata.journal.AbstractJournal.getIndex(AbstractJournal.java:4897)
+ * at com.bigdata.journal.Journal.getIndexSources(Journal.java:2656)
+ * at com.bigdata.journal.Journal.getIndex(Journal.java:2892)
+ * at com.bigdata.journal.Journal.getIndex(Journal.java:1)
+ * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:238)
+ * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:198)
+ * at com.bigdata.relation.AbstractRelation.getIndex(AbstractRelation.java:166)
+ * at com.bigdata.rdf.lexicon.LexiconRelation.getTerm2IdIndex(LexiconRelation.java:984)
+ * at com.bigdata.rdf.lexicon.LexiconRelation.addTerms(LexiconRelation.java:1857)
+ * ... 11 more
+ * Caused by: java.lang.RuntimeException: addr=-8196 : cause=com.bigdata.util.ChecksumError: offset=852288,nbytes=224,expected=721420255,actual=-1747893185
+ * at com.bigdata.rwstore.RWStore.getData(RWStore.java:1899)
+ * at com.bigdata.journal.RWStrategy.readFromLocalStore(RWStrategy.java:727)
+ * at com.bigdata.journal.RWStrategy.read(RWStrategy.java:154)
+ * at com.bigdata.journal.AbstractJournal.read(AbstractJournal.java:4043)
+ * at com.bigdata.btree.Checkpoint.load(Checkpoint.java:575)
+ * at com.bigdata.btree.Checkpoint.loadFromCheckpoint(Checkpoint.java:754)
+ * ... 23 more
+ * Caused by: com.bigdata.util.ChecksumError: offset=852288,nbytes=224,expected=721420255,actual=-1747893185
+ * at com.bigdata.io.writecache.WriteCacheService._readFromLocalDiskIntoNewHeapByteBuffer(WriteCacheService.java:3706)
+ * at com.bigdata.io.writecache.WriteCacheService._getRecord(WriteCacheService.java:3521)
+ * at com.bigdata.io.writecache.WriteCacheService.access$1(WriteCacheService.java:3493)
+ * at com.bigdata.io.writecache.WriteCacheService$1.compute(WriteCacheService.java:3358)
+ * at com.bigdata.io.writecache.WriteCacheService$1.compute(WriteCacheService.java:1)
+ * at com.bigdata.util.concurrent.Memoizer$1.call(Memoizer.java:77)
+ * at java.util.concurrent.FutureTask.run(FutureTask.java:262)
+ * at com.bigdata.util.concurrent.Memoizer.compute(Memoizer.java:92)
+ * at com.bigdata.io.writecache.WriteCacheService.loadRecord(WriteCacheService.java:3463)
+ * at com.bigdata.io.writecache.WriteCacheService.read(WriteCacheService.java:3182)
+ * at com.bigdata.rwstore.RWStore.getData(RWStore.java:1890)
+ * ... 28 more
+ * </pre>
+ *
+ * @see <a href="http://trac.bigdata.com/ticket/855"> AssertionError: Child
+ * does not have persistent identity </a>.
+ */
+ // Note: This test is disabled since there are known issues when retentionMillis:=0.
+ public void _test_multiple_csem_transaction_no_history_stress() throws Exception {
- final Random r = new Random();
-
- for (int i = 0; i < 10; i++) {
+ final Random r = new Random();
+
+ for (int i = 0; i < 10; i++) {
- final int nreaderThreads = r.nextInt(19) + 1;
-
- log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads);
+ final int nreaderThreads = r.nextInt(19) + 1;
+
+ log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads);
- domultiple_csem_transaction2(0/* retentionMillis */,
- nreaderThreads, 20/* nwriters */, 400/* nreaders */);
+ domultiple_csem_transaction2(0/* retentionMillis */,
+ nreaderThreads, 100/* nwriters */, 400/* nreaders */, false/* isolatableIndices */);
- }
-
- }
-
-// public void notest_stress_multiple_csem_transaction_nohistory() throws Exception {
-//
-// final int retentionMillis = 0;
-//
-// for (int i = 0; i< 50; i++) {
-//
-// domultiple_csem_transaction2(retentionMillis, 2/* nreaderThreads */,
-// 1000/* nwriters */, 20 * 1000/* nreaders */);
-//
-// }
-//
-// }
-//
-// public void test_multiple_csem_transaction_onethread_nohistory() throws Exception {
-//
-// domultiple_csem_transaction_onethread(0);
-//
-// }
-//
-//// Open a read committed transaction
-// //do reads
-// //do write without closing read
-// //commit write
-// //close read
-// //repeat
-// public void notest_multiple_csem_transaction_onethread_nohistory_debug() throws Exception {
-// PseudoRandom r = new PseudoRandom(2000);
-//
-// for (int run = 0; run < 200; run++) {
-// final int uris = 1 + r.nextInt(599);
-// final int preds = 1 + r.nextInt(49);
-// try {
-// System.err.println("Testing with " + uris + " uris, " + preds + " preds");
-// domultiple_csem_transaction_onethread(0, uris, preds);
-// } catch (Exception e) {
-// System.err.println("problem with " + uris + " uris, " + preds + " preds");
-// throw e;
-// }
-// }
-// }
+ }
+
+ }
+
+ public void test_multiple_csem_transaction_no_history_stress_readWriteTx()
+ throws Exception {
+
+ final Random r = new Random();
+
+ for (int i = 0; i < 10; i++) {
+
+ final int nreaderThreads = r.nextInt(19) + 1;
+
+ log.warn("Trial: " + i + ", nreaderThreads=" + nreaderThreads);
+
+ domultiple_csem_transaction2(0/* retentionMillis */,
+ nreaderThreads, 100/* nwriters */, 400/* nreaders */, true/* isolatableIndices */);
+
+ }
+
+ }
+
}
Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestMROWTransactionsWithHistory.java 201...
[truncated message content] |