From: <tho...@us...> - 2011-05-19 17:14:01
|
Revision: 4526 http://bigdata.svn.sourceforge.net/bigdata/?rev=4526&view=rev Author: thompsonbry Date: 2011-05-19 17:13:50 +0000 (Thu, 19 May 2011) Log Message: ----------- CI clean up as described at [1] (up to comment 4). [1] https://sourceforge.net/apps/trac/bigdata/ticket/297#comment:4 Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/htree.xls branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/AbstractHTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestCase3.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractBufferStrategyTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractInterruptsTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractRestartSafeTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestGroupCommit.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestLockContention.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestConcurrentJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestDirectJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestDiskJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestMappedJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestNamedIndices.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTemporaryStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransientJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategyNoCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestWORMStrategyOneCacheBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rawstore/AbstractRawStoreTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rawstore/TestSimpleMemoryRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/QUADS_QUERY_BRANCH/bigdata-perf/bsbm/src/test/benchmark/bigdata/TestBSBM.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/lexicon/TestIVCache.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/rules/TestCompareFullAndFastClosure.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestDefaultGraphAccessPath.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractTestCase.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/StressTestCentos.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/AbstractBigdataSailTestCase.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/htree.xls =================================================================== (Binary files differ) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/AbstractHTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/AbstractHTree.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/AbstractHTree.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -273,20 +273,60 @@ * The #of {@link DirectoryPage}s in the {@link HTree} (not buddy hash * tables, but the pages on which they appear). */ - abstract public int getNodeCount(); + abstract public long getNodeCount(); /** * The #of {@link BucketPage}s in the {@link HTree} (not buddy hash buckets, * but the pages on which they appear). */ - abstract public int getLeafCount(); + abstract public long getLeafCount(); /** * The #of tuples in the {@link HTree}. */ - abstract public int getEntryCount(); + abstract public long getEntryCount(); /** + * Fast summary information about the B+Tree. + */ + public String toString() { + + final StringBuilder sb = new StringBuilder(); + + sb.append(getClass().getSimpleName()); + + sb.append("{ "); + + // TODO restore. +// if (metadata.getName() != null) { +// +// sb.append("name=" + metadata.getName()); +// +// } else { +// +// sb.append("uuid=" + metadata.getIndexUUID()); +// +// } + + sb.append(", addressBits=" + getAddressBits()); + +// sb.append(", height=" + getHeight()); + + sb.append(", entryCount=" + getEntryCount()); + + sb.append(", nodeCount=" + getNodeCount()); + + sb.append(", leafCount=" + getLeafCount()); + +// sb.append(", lastCommitTime=" + getLastCommitTime()); TODO restore + + sb.append("}"); + + return sb.toString(); + + } + + /** * @param store * The persistence store. * @param nodeFactory Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -75,6 +75,11 @@ * implementation for int32 keys. This optimization is likely to be * quite worth while as the majority of use cases for the hash tree use * int32 keys. + * + * TODO It is quite possible to define a range query interface for the + * hash tree. You have to use an order preserving hash function, which + * is external to the HTree implementation. Internally, the HTree must + * either double-link the pages or crawl the directory structure. */ public class HTree extends AbstractHTree // implements @@ -97,33 +102,33 @@ * The #of {@link DirectoryPage} in the {@link HTree}. This is ONE (1) for a * new {@link HTree}. */ - protected int nnodes; + protected long nnodes; /** * The #of {@link BucketPage}s in the {@link HTree}. This is one (1) for a * new {@link HTree} (one directory page and one bucket page). */ - protected int nleaves; + protected long nleaves; /** * The #of entries in the {@link HTree}. This is ZERO (0) for a new * {@link HTree}. */ - protected int nentries; + protected long nentries; - final public int getNodeCount() { + final public long getNodeCount() { return nnodes; } - final public int getLeafCount() { + final public long getLeafCount() { return nleaves; } - final public int getEntryCount() { + final public long getEntryCount() { return nentries; @@ -558,9 +563,8 @@ */ // increase prefix length by the #of address bits consumed by the - // buddy hash table. TODO child.globalDepth might always be - // [addressBits] for a directory page... - prefixLength = prefixLength + child.globalDepth; + // buddy hash table in the current directory page as we descend. + prefixLength = prefixLength + current.globalDepth; // find the offset of the buddy hash table in the child. buddyOffset = HTreeUtil @@ -726,8 +730,6 @@ * because we know that the parent and the old bucket are both * mutable. This means that their childRef is defined and their * storage address is NULL. - * - * TODO This logic should be in DirectoryPage#dump() */ int firstPointer = -1; int nfound = 0; @@ -772,10 +774,10 @@ /** * Redistribute the buddy buckets. * <p> - * Note: We are not changing the #of buddy buckets, just their size and the + * Note: We are not changing the #of buckets, just their size and the * page on which they are found. Any tuples in a source bucket will wind up - * in the same bucket afterwards, but the page and offset on the page of the - * buddy bucket may have been changed. + * in the "same" bucket afterwards, but the page and offset on the page of the + * bucket may have been changed and the size of the bucket will have doubled. * <p> * We proceed backwards, moving the upper half of the buddy buckets to the * new bucket page first and then spreading out the lower half of the source @@ -805,10 +807,10 @@ final int slotsPerNewBuddy = (1 << newDepth); // #of buddy tables on the old bucket page. - final int oldBuddyCount = (slotsOnPage) / slotsPerOldBuddy; + final int oldBuddyCount = slotsOnPage / slotsPerOldBuddy; // #of buddy tables on the bucket pages after the split. - final int newBuddyCount = (slotsOnPage) / slotsPerNewBuddy; + final int newBuddyCount = slotsOnPage / slotsPerNewBuddy; final BucketPage srcPage = oldBucket; final MutableKeyBuffer srcKeys = (MutableKeyBuffer) oldBucket.getKeys(); @@ -941,24 +943,190 @@ } - /** - * Split when <code>globalDepth == localDepth</code>. This case requires the - * introduction of a new {@link DirectoryPage}. - * - * @param parent - * The parent. - * @param buddyOffset - * The offset of the buddy hash table within the parent. - * @param oldBucket - * The {@link BucketPage} to be split. - */ - private void addDirectoryPageAndSplitBucketPage(final DirectoryPage parent, - final int buddyOffset, final BucketPage oldBucket) { + /** + * Introduce a new directory level when we need to split a child but + * <code>globalDepth == localDepth</code>. The caller must retry the insert + * after this method makes the structural change. + * + * @param oldParent + * The parent {@link DirectoryPage}. + * @param buddyOffset + * The buddyOffset within the <i>parent</i>. This identifies + * which buddy hash table in the parent must be its pointers + * updated such that it points to both the original child and new + * child. + * @param child + * The child. + * + * @throws IllegalArgumentException + * if any argument is <code>null</code>. + * @throws IllegalStateException + * if the depth of the child is GTE the depth of the parent. + * @throws IllegalStateException + * if the <i>parent<i/> is read-only. + * @throws IllegalStateException + * if the <i>oldBucket</i> is read-only. + * @throws IllegalStateException + * if the parent of the <oldBucket</i> is not the given + * <i>parent</i>. + */ + private void addDirectoryPageAndSplitBucketPage(final DirectoryPage oldParent, + final int buddyOffset, final AbstractPage child) { + if (oldParent == null) + throw new IllegalArgumentException(); + if (child == null) + throw new IllegalArgumentException(); + if (child.globalDepth != oldParent.globalDepth) { + /* + * We only create a new directory page when the global and local + * depth are equal. + */ + throw new IllegalStateException(); + } + if (buddyOffset < 0) + throw new IllegalArgumentException(); + if (buddyOffset >= (1 << addressBits)) { + /* + * Note: This check is against the maximum possible slot index. The + * actual max buddyOffset depends on parent.globalBits also since + * (1<<parent.globalBits) gives the #of slots per buddy and the + * allowable buddyOffset values must fall on an buddy hash table + * boundary. + */ + throw new IllegalArgumentException(); + } + if (oldParent.isReadOnly()) // must be mutable. + throw new IllegalStateException(); + if (child.isReadOnly()) // must be mutable. + throw new IllegalStateException(); + if (child.parent != oldParent.self) // must be same Reference. + throw new IllegalStateException(); - throw new UnsupportedOperationException(); + if (log.isDebugEnabled()) + log.debug("parent=" + oldParent.toShortString() + ", buddyOffset=" + + buddyOffset + ", child=" + child); - } + // Allocate a new directory page. The global depth will be ONE (1). + final DirectoryPage newParent = new DirectoryPage(this, 1/* globalDepth */); + // Set the parent Reference on the new dir page to the old dir page. + newParent.parent = (Reference) oldParent.self; + + // One more directory page. + nnodes++; + +// /* +// * Compute the #of pointers (aka slots) that we need to copy from the +// * old parent. There will be one such pointer for each buddy on the +// * child page. Since global depth (of the parent) == local depth (of the +// * child), we know that there is only one buddy on the child page and +// * hence that we will copy ONE pointer. +// */ +// final int npointersToCopy = 1 << (oldParent.globalDepth - child.globalDepth); +// assert oldParent.globalDepth == child.globalDepth; +// assert npointersToCopy == 1; + + /* + * 1. Locate the slot for the pointer in the old parent to the child + * which is to be split. + * + * Note: Since there is only one pointer in the old parent page to the + * child page, a scan will always find the right slot. + * + * Note: Another way to look at this is that we are locating all + * pointers in the old parent for the child which needs to be split plus + * the buddies of that child. This amounts to all pointers to the child + * since the buddies are (by definition) on the same page as the child. + * + * FIXME Can we do this by indexing using the hash bits? That would be + * faster than scanning for the reference. We could then just validate + * that we have the right reference by testing that slot. + */ + final int slotInParentToUpdate; + { + + // #of address slots in each buddy hash table. + final int slotsPerBuddy = (1 << oldParent.globalDepth); + + // locate the slot for the pointer to be copied + int slot = -1; + for (int i = buddyOffset; i < slotsPerBuddy; i++) { + + if (oldParent.childRefs[i] == child.self) { + slot = i; + break; + } + + } + + if (slot == -1) { + // The child was not found in the parent's buddy bucket. + throw new AssertionError(); + } + + slotInParentToUpdate = slot; + + assert oldParent.childRefs[slot] == child.self; + + } + + /* + * Copy the pointer to the child page into each slot of the new + * directory page. + */ + { + + // #of slots on the new directory page. + final int nslots = 1 << addressBits; + + for (int i = 0; i < nslots; i++) { + + newParent.childRefs[i] = (Reference) child.self; + + } + + } + + /* + * Replace the pointer to the child page in the old parent with the + * pointer to the new directory page. + */ + oldParent.childRefs[slotInParentToUpdate] = (Reference) newParent.self; + + // Update the parent reference on the child. + child.parent = (Reference) newParent.self; + + /* + * Recompute the global depth of the child page whose pointer was just + * moved. It will have changed since the #of pointers to that page just + * changed. This can be done by counting the #of pointers in any buddy + * hash table of the new parent to the child. Since all buddy hash + * tables on the new parent point to the child page, the #of pointers in + * a hash table in the new parent is just the #of slots in a buddy hash + * table for the new parent. Since all the buddies that we are moving + * are on the same child page, we can do this just once. + */ + { + + // #of address slots in each buddy hash table for the new + // parent. + final int slotsPerBuddyInNewParent = (1 << newParent.globalDepth); + + // #of pointers to child in a buddy hash table of the new + // parent. + final int npointers = slotsPerBuddyInNewParent; + + // recompute the local depth of the child page. + final int localDepth = HTreeUtil.getLocalDepth(addressBits, + newParent.globalDepth, npointers); + + // update the cached local depth on the child page. + child.globalDepth = localDepth; + + } + + } + /** * Validate pointers in buddy hash table in the parent against the global * depth as self-reported by the child. By definition, the global depth of @@ -1899,11 +2067,11 @@ final DirectoryPage p = (parent == null ? null : parent.get()); sb.append(", parent=" + (p == null ? "N/A" : p.toShortString())); + sb.append(", globalDepth=" + getGlobalDepth()); + sb.append(", nbuddies=" + (1 << htree.addressBits) / (1 << globalDepth)); + sb.append(", slotsPerBuddy="+(1 << globalDepth)); + if (data == null) { - sb.append(", globalDepth=" + getGlobalDepth()); - - if (data == null) { - // No data record? (Generally, this means it was stolen by copy on // write). sb.append(", data=NA}"); @@ -2211,9 +2379,9 @@ return sb.toString(); } - sb.append(", globalDepth=" + getGlobalDepth()); - + sb.append(", nbuddies=" + (1 << htree.addressBits) / (1 << globalDepth)); + sb.append(", slotsPerBuddy="+(1 << globalDepth)); // sb.append(", minKeys=" + minKeys()); // // sb.append(", maxKeys=" + maxKeys()); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -14,7 +14,6 @@ import org.apache.log4j.Logger; -import com.bigdata.btree.IndexSegment; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.counters.OneShotInstrument; @@ -59,7 +58,7 @@ */ public class DirectBufferPool { - protected static final Logger log = Logger + private static final Logger log = Logger .getLogger(DirectBufferPool.class); /** @@ -332,19 +331,19 @@ * be set. * <p> * Note: This method will block if there are no free buffers in the pool and - * the pool was configured with a maximum capacity. In addition it MAY block - * if there is not enough free memory to fulfill the request. It WILL log an - * error if it blocks. While blocking is not very safe, using a heap - * ByteBuffer is not very safe either since Java NIO will allocate a - * temporary direct {@link ByteBuffer} for IOs and that can both run out of - * memory and leak memory. + * the pool was configured with a maximum capacity. It WILL log an error if + * it blocks. While blocking is not very safe, using a heap ByteBuffer is + * not very safe either since Java NIO will allocate a temporary direct + * {@link ByteBuffer} for IOs and that can both run out of memory and leak + * memory. * * @return A direct {@link ByteBuffer}. * * @throws InterruptedException * if the caller's {@link Thread} is interrupted awaiting a * buffer. - * @throws TimeoutException + * @throws OutOfMemoryError + * if there is not enough free memory to fulfill the request. */ public ByteBuffer acquire() throws InterruptedException { @@ -538,12 +537,17 @@ } catch (OutOfMemoryError err) { + /* + * Note: It is dangerous wait if the JVM is out of memory since this + * could deadlock even when there is an unlimited capacity on the + * pool. It is much safer to throw out an exception. + */ - log.error("Not enough native memory - will await a free buffer: " - + err, err); - - awaitFreeBuffer(timeout, unit); - +// log.error("Not enough native memory - will await a free buffer: " +// + err, err); +// +// awaitFreeBuffer(timeout, unit); + throw err; } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -41,6 +41,7 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Condition; @@ -167,7 +168,8 @@ /** * <code>true</code> until the service is {@link #close() closed}. */ - private volatile boolean open = true; +// private volatile boolean open = true; + private final AtomicBoolean open = new AtomicBoolean(true); /** * <code>true</code> iff record level checksums are enabled. @@ -656,6 +658,13 @@ * write cache service is closing down. */ return null; + } catch(AsynchronousCloseException ex) { + /* + * The service was shutdown. We do not want to log an error + * here since this is normal shutdown. close() will handle + * all of the Condition notifies. + */ + return null; } catch (Throwable t) { /* * Anything else is an error and halts processing. Error @@ -844,9 +853,9 @@ final WriteLock writeLock = lock.writeLock(); writeLock.lockInterruptibly(); try { - if (!open) { + if (!open.get()) { // Reset can not recover from close(). - throw new IllegalStateException(); + throw new IllegalStateException(firstCause.get()); } // cancel the current WriteTask. @@ -945,102 +954,104 @@ } } - public void close() throws InterruptedException { - /* - * Note: The write lock prevents concurrent close by another thread and - * is also required for the operations we take on the dirtyList, the - * cleanList, and current. - */ - final WriteLock writeLock = lock.writeLock(); - writeLock.lockInterruptibly(); - try { + public void close() { //throws InterruptedException { - if (!open) { - // Already closed, so this is a NOP. - return; - } + if (!open.compareAndSet(true/* expect */, false/* update */)) { + // Already closed, so this is a NOP. + return; + } - // Closed. - open = false; - - // Interrupt the write task. - localWriteFuture.cancel(true/* mayInterruptIfRunning */); - final Future<?> rwf = remoteWriteFuture; - if (rwf != null) { - // Note: Cancel of remote Future is RMI! - try { - rwf.cancel(true/* mayInterruptIfRunning */); - } catch (Throwable t) { - log.warn(t, t); - } + /* + * Set [firstCause] and [halt] to ensure that other threads report + * errors. + * + * Note: If the firstCause has not yet been set, then we set it now to a + * stack trace which will indicate that the WriteCacheService was + * asynchronously closed (that is, it was closed by another thread). + */ + if (firstCause.compareAndSet(null/* expect */, + new AsynchronousCloseException()/* update */)) { + halt = true; + } + + // Interrupt the write task. + localWriteFuture.cancel(true/* mayInterruptIfRunning */); + final Future<?> rwf = remoteWriteFuture; + if (rwf != null) { + // Note: Cancel of remote Future is RMI! + try { + rwf.cancel(true/* mayInterruptIfRunning */); + } catch (Throwable t) { + log.warn(t, t); } -// /* -// * If there is an HAConnect running, then interrupt it so it will -// * terminate. -// */ -// { -// final HAConnect cxn = haConnect.getAndSet(null/* clear */); -// if (cxn != null) { -// cxn.interrupt(); -// } -// } + } - // Immediate shutdown of the write service. - localWriteService.shutdownNow(); + // Immediate shutdown of the write service. + localWriteService.shutdownNow(); // // Immediate shutdown of the remote write service (if running). // if (remoteWriteService != null) { // remoteWriteService.shutdownNow(); // } - /* - * Ensure that the WriteCache buffers are close()d in a timely - * manner. - */ + /* + * Ensure that the WriteCache buffers are close()d in a timely + * manner. + */ - // reset buffers on the dirtyList. - dirtyListLock.lockInterruptibly(); - try { - dirtyList.drainTo(new LinkedList<WriteCache>()); - dirtyListEmpty.signalAll(); - dirtyListNotEmpty.signalAll(); - } finally { - dirtyListLock.unlock(); - } + // reset buffers on the dirtyList. + dirtyListLock.lock/*Interruptibly*/(); + try { + dirtyList.drainTo(new LinkedList<WriteCache>()); + dirtyListEmpty.signalAll(); + dirtyListNotEmpty.signalAll(); + } finally { + dirtyListLock.unlock(); + } - // close() buffers on the cleanList. - cleanListLock.lockInterruptibly(); - try { - cleanList.drainTo(new LinkedList<WriteCache>()); - } finally { - cleanListLock.unlock(); - } + // close() buffers on the cleanList. + cleanListLock.lock/*Interruptibly*/(); + try { + cleanList.drainTo(new LinkedList<WriteCache>()); + } finally { + cleanListLock.unlock(); + } - // close all buffers. - for (WriteCache t : buffers) { - t.close(); - } + /* + * Note: The lock protects the [current] reference. + */ + final WriteLock writeLock = lock.writeLock(); + writeLock.lock/*Interruptibly*/(); + try { - // clear reference to the current buffer. - current.getAndSet(null); + // close all buffers. + boolean interrupted = false; + for (WriteCache t : buffers) { + try { + t.close(); + } catch (InterruptedException ex) { + interrupted = true; + continue; + } + } - // clear the service record map. - recordMap.clear(); + // clear reference to the current buffer. + current.getAndSet(null); - // clear the file extent to an illegal value. - fileExtent.set(-1L); + // clear the service record map. + recordMap.clear(); -// /* -// * Stop the HAServer instance if one is running. -// */ -// final HAServer haServer = this.haServer.get(); -// if (haServer != null) -// haServer.interrupt(); + // clear the file extent to an illegal value. + fileExtent.set(-1L); - } finally { + if(interrupted) + Thread.currentThread().interrupt(); + + } finally { writeLock.unlock(); } - } + + } /** * Ensures that {@link #close()} is eventually invoked so the buffers can be @@ -1073,8 +1084,8 @@ */ private void assertOpenForWriter() { - if (!open) - throw new IllegalStateException(); + if (!open.get()) + throw new IllegalStateException(firstCause.get()); if (halt) throw new RuntimeException(firstCause.get()); @@ -1415,8 +1426,8 @@ + ", chk=" + chk + ", useChecksum=" + useChecksum); } - if (!open) - throw new IllegalStateException("WriteCacheService has been closed"); + if (!open.get()) + throw new IllegalStateException(firstCause.get()); if (offset < 0) throw new IllegalArgumentException(); @@ -1911,7 +1922,7 @@ public ByteBuffer read(final long offset) throws InterruptedException, ChecksumError { - if (!open) { + if (!open.get()) { /* * Not open. Return [null] rather than throwing an exception per the * contract for this implementation. @@ -1945,7 +1956,7 @@ * The write cache was closed. Per the API for this method, return * [null] so that the caller will read through to the backing store. */ - assert !open; + assert !open.get(); return null; } @@ -2165,4 +2176,17 @@ } + /** + * An instance of this exception is thrown if a thread notices that the + * {@link WriteCacheService} was closed by a concurrent process. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public static class AsynchronousCloseException extends IllegalStateException { + + private static final long serialVersionUID = 1L; + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -45,6 +45,7 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; @@ -167,7 +168,7 @@ /** * Logger. */ - protected static final Logger log = Logger.getLogger(IJournal.class); + private static final Logger log = Logger.getLogger(AbstractJournal.class); /** * The index of the root address containing the address of the persistent @@ -200,6 +201,26 @@ */ final protected Properties properties; + /** + * The #of open journals (JVM wide). This is package private. It is used to + * chase down unit tests which are not closing() the Journal. + */ + final static AtomicInteger nopen = new AtomicInteger(); + + /** + * The #of closed journals (JVM wide). This is package private. It is used + * to chase down unit tests which are not {@link #close() closing} the + * Journal. + */ + final static AtomicInteger nclose = new AtomicInteger(); + + /** + * The #of destroyed journals (JVM wide). This is package private. It is + * used to chase down unit tests which are not {@link #destroy() destroying} + * the journal. + */ + final static AtomicInteger ndestroy = new AtomicInteger(); + /** * The directory that should be used for temporary files. */ @@ -1051,6 +1072,8 @@ } + nopen.incrementAndGet(); + } /** @@ -1338,6 +1361,8 @@ } + nclose.incrementAndGet(); + } /** @@ -1712,6 +1737,8 @@ } + ndestroy.incrementAndGet(); + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/Journal.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -41,6 +41,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.log4j.Logger; @@ -99,6 +100,11 @@ /*ILocalTransactionManager,*/ IResourceManager { /** + * Logger. + */ + private static final Logger log = Logger.getLogger(Journal.class); + + /** * Object used to manage local transactions. */ private final AbstractLocalTransactionManager localTransactionManager; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -2227,11 +2227,11 @@ private final void releaseWriteCache() { if (writeCacheService != null) { - try { +// try { writeCacheService.close(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2011-05-19 17:13:50 UTC (rev 4526) @@ -18,6 +18,8 @@ log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.rdf.sail.webapp=ALL + #log4j.logger.com.bigdata.rwstore.sector.MemoryManager=INFO #log4j.logger.com.bigdata.rwstore.sector.AllocationContext=INFO #log4j.logger.com.bigdata.rwstore.sector.SectorAllocator=DEBUG @@ -62,7 +64,7 @@ #log4j.logger.com.bigdata.service.AbstractTransactionService=INFO #log4j.logger.com.bigdata.journal.AbstractLocalTransactionManager=INFO log4j.logger.com.bigdata.concurrent.TxDag=WARN -log4j.logger.com.bigdata.concurrent.NonBlockingLockManager=WARN +log4j.logger.com.bigdata.concurrent.NonBlockingLockManagerWithNewDesign=WARN log4j.logger.com.bigdata.concurrent.TestNonBlockingLockManager=INFO log4j.logger.com.bigdata.concurrent.AbstractStressTestNonBlockingLockManager=INFO #log4j.logger.com.bigdata.concurrent.LockManager=INFO @@ -100,9 +102,12 @@ #log4j.logger.com.bigdata.bop=ALL #log4j.logger.com.bigdata.bop.join.PipelineJoin=ALL #log4j.logger.com.bigdata.bop.solutions.SliceOp=ALL,destPlain +#log4j.logger.com.bigdata.bop.controller.SubqueryOp=ALL +#log4j.logger.com.bigdata.bop.controller.SubqueryHashJoinOp=ALL #log4j.logger.com.bigdata.bop.engine=ALL #log4j.logger.com.bigdata.bop.engine.QueryEngine=ALL -#log4j.logger.com.bigdata.bop.engine.RunningQuery=ALL +#log4j.logger.com.bigdata.bop.engine.AbstractRunningQuery=ALL +#log4j.logger.com.bigdata.bop.engine.ChunkedRunningQuery=ALL #log4j.logger.com.bigdata.bop.engine.RunState=INFO log4j.logger.com.bigdata.bop.joinGraph.rto.JGraph=INFO #log4j.logger.com.bigdata.bop.joinGraph.rto.Vertex=ALL @@ -144,7 +149,6 @@ #log4j.logger.com.bigdata.counters.query=INFO #log4j.logger.com.bigdata.counters.XMLUtility=INFO #log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=INFO -#log4j.logger.com.bigdata.util.httpd.NanoHTTPD=INFO #log4j.logger.com.bigdata.journal.TestConcurrentWritersOnNamedIndices=DEBUG #log4j.logger.com.bigdata.concurrent=INFO @@ -188,10 +192,14 @@ #log4j.logger.com.bigdata.rdf.sail.tck.BigdataFederationSparqlTest=INFO #log4j.logger.com.bigdata.bop.fed.shards.Algorithm_NestedLocatorScan=ALL #log4j.logger.com.bigdata.rdf.sail.TestNamedGraphs=DEBUG -#log4j.logger.com.bigdata.rdf.sail.QuadsTestCase=DEBUG +log4j.logger.com.bigdata.rdf.sail.QuadsTestCase=DEBUG #log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryWithJoinThreadsTask=DEBUG #log4j.logger.com.bigdata.rdf.sail.TestNestedUnions=ALL +log4j.logger.com.bigdata.util.httpd.NanoHTTPD=DEBUG +log4j.logger.com.bigdata.util.httpd.AbstractHTTPD=DEBUG +log4j.logger.com.bigdata.rdf.sail.bench.NanoSparqlServer=ALL + # Lehigh benchmark integration log4j.logger.edu.lehigh.swat.bench.ubt.bigdata=INFO @@ -216,6 +224,8 @@ #log4j.logger.junit=DEBUG log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO +#log4j.logger.com.bigdata.rdf.sail.contrib.TestMillisecondPrecisionForInlineDateTimes=ALL + # dest1 log4j.appender.dest1=org.apache.log4j.ConsoleAppender log4j.appender.dest1.layout=org.apache.log4j.PatternLayout Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -179,6 +179,7 @@ final byte[] k8 = new byte[]{0x08}; final byte[] k9 = new byte[]{0x09}; final byte[] k10 = new byte[]{0x0a}; + final byte[] k20 = new byte[]{0x20}; final byte[] v1 = new byte[]{0x01}; final byte[] v2 = new byte[]{0x02}; @@ -190,6 +191,7 @@ final byte[] v8 = new byte[]{0x08}; final byte[] v9 = new byte[]{0x09}; final byte[] v10 = new byte[]{0x0a}; + final byte[] v20 = new byte[]{0x20}; // a key which we never insert and which should never be found by lookup. final byte[] unused = new byte[]{-127}; @@ -385,13 +387,98 @@ .lookupAll(unused)); /* - * 5. Insert 0x05. This goes into the same buddy bucket. The buddy - * bucket is full again. It is only the only buddy bucket on the - * page, e.g., global depth == local depth. Therefore, before we can - * split the buddy bucket we have first introduce a new directory - * page into the hash tree. + * 5. Insert 0x20 (32). This goes into the same buddy bucket. The + * buddy bucket is full from the previous insert. It is the only + * buddy bucket on the page, e.g., global depth == local depth. + * Therefore, before we can split the buddy bucket we have first + * introduce a new directory page into the hash tree. * - * FIXME Update comments to reflect the example (and update the + * Note: The key 0x20 was chosen since it has one bit in the high + * nibble which is set (the 3th bit in the byte). The root directory + * page has globalDepth :=2, so it will consume the first two bits. + * The new directory page has globalDepth := 1 so it will examine + * the next bit in the key. Therefore, based on the 3th bit this + * will select bucket page (e) rather than bucket page (a). + * + * If we were to insert 0x05 (or even 0x10) at this point instead + * then we would have to introduce more than one directory level + * before the insert could succeed. Choosing 0x20 thus minimizes the + * #of unobserved intermediate states of the hash tree. + * + * FIXME This is causing a repeated introduction of a new directory + * level and giving me a hash tree structure which differs from my + * expectations. Work through the example on paper. Make sure that + * the prefixLength gets us into the right bits in the new key such + * that we go to the right bucket page. Also, verify whether or not + * a directory page can have pointers to a child which are not + * contiguous -- I am seeing that in the directory after the second + * split and it looks wrong to me. [Now the directory structure + * looks good, but it is attempting to insert into the wrong bucket + * after a split. I am also seeing some child pages which are not + * loaded, which is wrong since everything should be wired into + * memory] + * + * TODO Do an alternative example where we insert 0x05 at this step + * instead of 0x10. + */ + htree.insert(k20, v20); + assertEquals("nnodes", 2, htree.getNodeCount()); + assertEquals("nleaves", 4, htree.getLeafCount()); + assertEquals("nentries", 5, htree.getEntryCount()); + htree.dump(Level.ALL, System.err, true/* materialize */); + assertTrue(root == htree.getRoot()); + assertEquals(4, root.childRefs.length); + final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); + assertTrue(d == (DirectoryPage) root.childRefs[0].get()); + assertTrue(c == (BucketPage) root.childRefs[1].get()); + assertTrue(b == (BucketPage) root.childRefs[2].get()); + assertTrue(b == (BucketPage) root.childRefs[3].get()); + assertEquals(4, d.childRefs.length); + final BucketPage e = (BucketPage) d.childRefs[1].get(); + assertTrue(a == (BucketPage) d.childRefs[0].get()); // FIXME This shows that the buddy references do not have to be contiguous! + assertTrue(e == (BucketPage) d.childRefs[1].get()); + assertTrue(a == (BucketPage) d.childRefs[2].get()); + assertTrue(a == (BucketPage) d.childRefs[3].get()); + assertEquals(2, root.getGlobalDepth()); + assertEquals(1, d.getGlobalDepth()); + assertEquals(1, a.getGlobalDepth());// recomputed! + assertEquals(1, e.getGlobalDepth());// same as [a] (recomputed). + assertEquals(1, b.getGlobalDepth());// unchanged. + assertEquals(2, c.getGlobalDepth());// recomputed! + assertEquals(2, a.getKeyCount()); + assertEquals(2, a.getValueCount()); + assertEquals(3, e.getKeyCount()); + assertEquals(3, e.getValueCount()); + assertEquals(0, b.getKeyCount()); + assertEquals(0, b.getValueCount()); + assertEquals(0, c.getKeyCount()); + assertEquals(0, c.getValueCount()); + assertTrue(htree.contains(k1)); + assertTrue(htree.contains(k2)); + assertTrue(htree.contains(k3)); + assertTrue(htree.contains(k4)); + assertTrue(htree.contains(k20)); + assertFalse(htree.contains(unused)); + assertEquals(v1, htree.lookupFirst(k1)); + assertEquals(v2, htree.lookupFirst(k2)); + assertEquals(v3, htree.lookupFirst(k3)); + assertEquals(v4, htree.lookupFirst(k4)); + assertEquals(v20, htree.lookupFirst(k20)); + assertNull(htree.lookupFirst(unused)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] { v1 }, htree + .lookupAll(k1)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] { v2 }, htree + .lookupAll(k2)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] { v3 }, htree + .lookupAll(k3)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] { v4 }, htree + .lookupAll(k4)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] { v20 }, htree + .lookupAll(k20)); + AbstractBTreeTestCase.assertSameIterator(new byte[][] {}, htree + .lookupAll(unused)); + + /* FIXME Update comments to reflect the example (and update the * worksheet as well). * * FIXME We MUST NOT decrease the localDepth of (a) since that would Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestCase3.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestCase3.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestCase3.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -29,6 +29,8 @@ import java.nio.ByteBuffer; +import com.bigdata.journal.TestHelper; + import junit.framework.TestCase; import junit.framework.TestCase2; @@ -56,6 +58,14 @@ } + protected void tearDown() throws Exception { + + super.tearDown(); + + TestHelper.checkJournalsClosed(this); + + } + /** * Helper method verifies that the contents of <i>actual</i> from * position() to limit() are consistent with the expected byte[]. A Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -143,11 +143,11 @@ } finally { if (writeCache != null) - try { +// try { writeCache.close(); - } catch (InterruptedException e) { - log.error(e, e); - } +// } catch (InterruptedException e) { +// log.error(e, e); +// } if (opener != null) { opener.destroy(); } @@ -210,11 +210,11 @@ fail("Unexpected Exception", e); } finally { if (writeCache != null) - try { +// try { writeCache.close(); - } catch (InterruptedException e) { - log.error(e, e); - } +// } catch (InterruptedException e) { +// log.error(e, e); +// } if (opener != null) { opener.destroy(); } @@ -394,11 +394,11 @@ } } finally { if (writeCache != null) - try { +// try { writeCache.close(); - } catch (InterruptedException e) { - log.error(e, e); - } +// } catch (InterruptedException e) { +// log.error(e, e); +// } if (opener != null) { opener.destroy(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -2109,11 +2109,11 @@ } finally { if (writeCacheService != null) - try { +// try { writeCacheService.close(); - } catch (InterruptedException e) { - log.error(e, e); - } +// } catch (InterruptedException e) { +// log.error(e, e); +// } if (opener != null) { opener.destroy(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractBufferStrategyTestCase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractBufferStrategyTestCase.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractBufferStrategyTestCase.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -120,7 +120,8 @@ try { - if (! (store.getBufferStrategy() instanceof AbstractBufferStrategy)) return; + if (!(store.getBufferStrategy() instanceof AbstractBufferStrategy)) + return; final AbstractBufferStrategy bufferStrategy = (AbstractBufferStrategy) store .getBufferStrategy(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractInterruptsTestCase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractInterruptsTestCase.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractInterruptsTestCase.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -78,13 +78,13 @@ * @throws InterruptedException * @throws ExecutionException */ - public void test_channelOpenAfterInterrupt() - throws InterruptedException, ExecutionException { - - for(int i=0; i<10; i++) { - + public void test_channelOpenAfterInterrupt() throws InterruptedException, + ExecutionException { + + for (int i = 0; i < 10; i++) { + doChannelOpenAfterInterrupt(); - + } } @@ -106,11 +106,14 @@ try { - // Note: This test requires a journal backed by stable storage. - - if(store.isStable() && store instanceof IJournal) { + if (!store.isStable() || !(store instanceof IJournal)) { - final Journal journal = (Journal)store; + // Note: This test requires a journal backed by stable storage. + return; + + } + + final Journal journal = (Journal) store; final String[] resource = new String[]{"foo"};//,"bar","baz"}; @@ -145,7 +148,7 @@ * "get()" this task since that will block and the 'interrupt' task * will not run. */ - final long maxWaitMillis = 5*1000; + final long maxWaitMillis = 5 * 1000; journal.submit(new AbstractTask(journal,ITx.UNISOLATED,new String[]{}){ protected Object doTask() throws Exception { @@ -214,8 +217,6 @@ journal.write(ByteBuffer.wrap(new byte[]{1,2,3})); journal.force(true); - - } } finally { @@ -311,9 +312,12 @@ final IRawStore store = getStore(); try { - - if (store.isStable()) { + if (!store.isStable()) { + // This test requires storage having a backing FileChannel. + return; + } + final ByteBuffer rec1 = getRandomData(); final long addr1 = store.write(rec1); @@ -366,8 +370,6 @@ AbstractRawStoreTestCase.assertEquals(rec1.array(),actual); - } - } finally { store.destroy(); @@ -397,15 +399,21 @@ * <p> * Note: This test is only for {@link IDiskBasedStrategy} implementations. * Note: This test is not relevant for RWStrategy since it does not buffer - * writes in a reliable way, and furthermore will invalidate the store after - * an interrupt. + * writes in a reliable way. */ public void test_reopenAfterInterrupt_checkWriteBuffer() { final IRawStore store = getStore(); try { - if (store.isStable() && !(store instanceof RWStrategy)) { + if(!store.isStable()) return; + + if(!(store instanceof Journal)) return; + + if(((Journal)store).getBufferStrategy() instanceof RWStrategy) { + return; + } + final ByteBuffer rec1 = getRandomData(); final long addr1 = store.write(rec1); @@ -446,8 +454,6 @@ final ByteBuffer actual = store.read(addr1); AbstractRawStoreTestCase.assertEquals(rec1.array(),actual); - - } } finally { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -68,10 +68,10 @@ /** * Invoked from {@link TestCase#setUp()} for each test in the suite. */ - public void setUp(ProxyTestCase testCase) throws Exception { + public void setUp(final ProxyTestCase testCase) throws Exception { super.setUp(testCase); - + // if(log.isInfoEnabled()) // log.info("\n\n================:BEGIN:" + testCase.getName() // + ":BEGIN:===================="); @@ -81,12 +81,14 @@ /** * Invoked from {@link TestCase#tearDown()} for each test in the suite. */ - public void tearDown(ProxyTestCase testCase) throws Exception { + public void tearDown(final ProxyTestCase testCase) throws Exception { super.tearDown(testCase); - deleteTestFile(); + TestHelper.checkJournalsClosed(testCase, this); +// deleteTestFile(); + } // public void tearDown() throws Exception { @@ -97,49 +99,49 @@ // // } - /** - * Note: your unit must close the store for delete to work. - */ - protected void deleteTestFile() { +// /** +// * Note: your unit must close the store for delete to work. +// */ +// protected void deleteTestFile() { +// +// if(m_properties==null) return; // never requested. +// +// String val; +// +// val = (String) m_properties.getProperty(Options.FILE); +// +// if(val!= null) { +// +// File file = new File(val); +// +// if(file.exists()) { +// +// val = (String) m_properties.getProperty(Options.DELETE_ON_EXIT); +// +// if(val==null) { +// +// val = (String) m_properties.getProperty(Options.DELETE_ON_CLOSE); +// +// } +// +// if(Boolean.parseBoolean(val)) { +// +// System.err.println("Attempting to delete file: "+file); +// +// if(!file.delete()) { +// +// log.warn("Could not delete file: "+file); +// +// } +// +// } +// +// } +// +// } +// +// } - if(m_properties==null) return; // never requested. - - String val; - - val = (String) m_properties.getProperty(Options.FILE); - - if(val!= null) { - - File file = new File(val); - - if(file.exists()) { - - val = (String) m_properties.getProperty(Options.DELETE_ON_EXIT); - - if(val==null) { - - val = (String) m_properties.getProperty(Options.DELETE_ON_CLOSE); - - } - - if(Boolean.parseBoolean(val)) { - - System.err.println("Attempting to delete file: "+file); - - if(!file.delete()) { - - log.warn("Could not delete file: "+file); - - } - - } - - } - - } - - } - // // Properties // Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractRestartSafeTestCase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractRestartSafeTestCase.java 2011-05-19 13:52:16 UTC (rev 4525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractRestartSafeTestCase.java 2011-05-19 17:13:50 UTC (rev 4526) @@ -137,6 +137,7 @@ IAtomicStore store = (IAtomicStore) getStore(); try { + assertTrue(store.isStable()); final Random r = new Random(); @@ -212,58 +213,58 @@ IAtomicStore store = (IAtomicStore)getStore(); try { - - assertTrue(store.isStable()); - - Random r = new Random(); - - final int len = 100; - - byte[] expected = new byte[len]; - - r.nextBytes(expected); - - ByteBuffer tmp = ByteBuffer.wrap(expected); - - long addr1 = store.write(tmp); - // verify that the position is advanced to the limit. - assertEquals(len,tmp.position()); - assertEquals(tmp.position(),tmp.limit()); + assertTrue(store.isStable()); - // read the data back. - ByteBuffer actual = store.read(addr1); - - assertEquals(expected,actual); - - /* - * verify the position and limit after the read. - */ - assertEquals(0,actual.position()); - assertEquals(expected.length,actual.limit()); + final Random r = new Random(); - /* - * Commit the changes - if you do not commit the changes then the root - * blocks are not updated and your... [truncated message content] |