From: <mar...@us...> - 2011-05-17 15:50:23
|
Revision: 4513 http://bigdata.svn.sourceforge.net/bigdata/?rev=4513&view=rev Author: martyncutcher Date: 2011-05-17 15:50:17 +0000 (Tue, 17 May 2011) Log Message: ----------- Fix shadow allocation contexts and ensure that correct BTree checkpoint is used for IsolatedActionJournal in AbstractTask.getIndex Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/sector/TestMemoryManager.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -586,6 +586,8 @@ throw new NoSuchIndexException(name); } + + long checkpointAddr = entry.checkpointAddr; /* * Note: At this point we have an exclusive lock on the named @@ -628,14 +630,22 @@ * isolated. [Also, we do not want N2A to cache references to a * B+Tree backed by a different shadow journal.] */ - + if ((resourceManager.getLiveJournal().getBufferStrategy() instanceof RWStrategy)) { /* * Note: Do NOT use the name2Addr cache for the RWStore. * Each unisolated index view MUST be backed by a shadow * journal! + * + * But, fetch the btree from the cache to ensure we use the + * most recent checkpoint */ btree = null; + + final BTree tmpbtree = name2Addr.getIndexCache(name); + if (tmpbtree != null) + checkpointAddr = tmpbtree.getCheckpoint().getCheckpointAddr(); + } else { // recover from unisolated index cache. btree = name2Addr.getIndexCache(name); @@ -650,7 +660,7 @@ // re-load btree from the store. btree = BTree.load(// tmp, // backing store. - entry.checkpointAddr,// + checkpointAddr,// false// readOnly ); @@ -1086,9 +1096,11 @@ // clear the commit list. commitList.clear(); - // Detach the allocation context used by the operation. - ((IsolatedActionJournal) getJournal()).detachContext(); + // Detach the allocation context used by the operation, but increment + // txCount + ((IsolatedActionJournal) getJournal()).prepareCommit(); + final long elapsed = System.nanoTime() - begin; if(INFO) { @@ -1119,16 +1131,9 @@ * either the code path where the commit succeeds or the code path where it * fails. The boolean argument indicates whether or not the group commit * succeeded. Throws exceptions are trapped and logged. - * <p> - * Note: This method is NOT invoked if a task fails before joining the group - * commit. In that case, {@link #abortTask()} will be invoked. If the task - * succeeds then {@link #checkpointTask()} will be called and the task will - * eventually join a commit group. When the commit runs for that commit - * group, this method will be invoked on either the success or failure path - * for the group commit. */ void afterTaskHook(boolean abort) { - + ((IsolatedActionJournal) getJournal()).completeTask(); } /* @@ -2211,7 +2216,23 @@ } - /** + public void prepareCommit() { +// final IBufferStrategy bufferStrategy = delegate.getBufferStrategy(); +// if (bufferStrategy instanceof RWStrategy) { +// ((RWStrategy) bufferStrategy).getRWStore().activateTx(); +// } + // now detach! + detachContext(); + } + + public void completeTask() { + final IBufferStrategy bufferStrategy = delegate.getBufferStrategy(); + if (bufferStrategy instanceof RWStrategy) { + ((RWStrategy) bufferStrategy).getRWStore().deactivateTx(); + } + } + + /** * This class prevents {@link ITx#UNISOLATED} tasks from having direct * access to the {@link AbstractJournal} using * {@link AbstractTask#getJournal()}. @@ -2241,6 +2262,10 @@ final IBufferStrategy bufferStrategy = source.getBufferStrategy(); if (bufferStrategy instanceof RWStrategy) { + // must grab the tx BEFORE registering the context to correctly + // bracket, since the tx count is decremented AFTER the + // context is released + ((RWStrategy) bufferStrategy).getRWStore().activateTx(); ((RWStrategy) bufferStrategy).getRWStore().registerContext(this); } } @@ -2587,6 +2612,8 @@ public void abortContext() { delegate.abortContext(this); + + completeTask(); } public ScheduledFuture<?> addScheduledTask(final Runnable task, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -140,8 +140,8 @@ public boolean freeBit(final int bit, final boolean sessionProtect) { if (!RWStore.tstBit(m_live, bit)) { - if (sessionProtect && RWStore.tstBit(m_transients, bit)) - return false; +// if (sessionProtect && RWStore.tstBit(m_transients, bit)) +// return false; throw new IllegalArgumentException("Freeing bit not set"); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -120,9 +120,15 @@ * where committed data is accessed even if has been marked as ready to * be recycled after the next commit */ + final long paddr = RWStore.convertAddr(block.m_addr) + ((long) m_size * bit); if (RWStore.tstBit(block.m_transients, bit)) { - return RWStore.convertAddr(block.m_addr) + ((long) m_size * bit); + return paddr; } else { + if (RWStore.tstBit(block.m_commit, bit)) { + throw new IllegalStateException("Address committed but not set in transients"); + } + m_store.showWriteCacheDebug(paddr); + return 0L; } } @@ -234,9 +240,8 @@ try { str.writeInt(m_size); -// if (!m_sessionActive) -// System.out.println("Committing allocator, protection: " + m_sessionActive + ", transient frees: " + m_freeTransients); - + boolean protectTransients = m_sessionActive || m_store.isSessionProtected(); + final Iterator<AllocBlock> iter = m_allocBlocks.iterator(); while (iter.hasNext()) { final AllocBlock block = iter.next(); @@ -246,7 +251,7 @@ str.writeInt(block.m_live[i]); } - if (!m_sessionActive) { + if (!protectTransients) { block.m_transients = block.m_live.clone(); } @@ -553,13 +558,8 @@ final int block = offset/nbits; m_sessionActive = m_store.isSessionProtected(); -// if (!m_sessionActive) { -// System.out.println("Freeing " + addr + " without protection"); -// } + try { -// if (!m_sessionActive) { -// System.out.println("NO SESSION PROTECT FOR " + addr + "[" + size + "]"); -// } if (((AllocBlock) m_allocBlocks.get(block)) .freeBit(offset % nbits, m_sessionActive && !overideSession)) { // bit adjust @@ -847,10 +847,6 @@ final boolean ret = !isCommitted(offset); -// if (ret) { -// System.out.println("CAN FREE " + addr + "[" + size + "]"); -// } - return ret; } else { return false; @@ -880,7 +876,7 @@ checkFreeList(); - m_sessionActive = false; + m_sessionActive = m_store.isSessionProtected(); } } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -1684,6 +1684,8 @@ assert(m_activeTxCount == 0 && m_contexts.isEmpty()); if (m_minReleaseAge == 0) { + if (log.isDebugEnabled()) + log.debug("RELEASE SESSIONS"); for (FixedAllocator fa : m_allocs) { fa.releaseSession(m_writeCache); } @@ -1990,7 +1992,7 @@ directWrite(pa, buf, 0, size, chk); } else { try { - m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); + m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); } catch (InterruptedException e) { throw new RuntimeException("Closed Store?", e); } @@ -3549,6 +3551,8 @@ if (alloc != null) { m_contextRemovals++; alloc.release(); + } else { + throw new IllegalStateException("Multiple call to detachContext"); } if (m_contexts.isEmpty() && this.m_activeTxCount == 0) { @@ -3578,6 +3582,7 @@ m_contextRemovals++; alloc.abort(); } + } finally { m_allocationLock.unlock(); } @@ -4462,6 +4467,9 @@ public void deactivateTx() { m_allocationLock.lock(); try { + if (m_activeTxCount == 0) { + throw new IllegalStateException("Tx count must be positive!"); + } m_activeTxCount--; if(log.isInfoEnabled()) log.info("#activeTx="+m_activeTxCount); @@ -4528,5 +4536,9 @@ } } + public void showWriteCacheDebug(long paddr) { + log.warn("WriteCacheDebug: " + paddr + " - " + m_writeCache.addrDebugInfo(paddr)); + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -826,6 +826,43 @@ } } + public void testAllocationContexts() { + + Journal store = (Journal) getStore(); + + try { + + RWStrategy bufferStrategy = (RWStrategy) store.getBufferStrategy(); + + RWStore rw = bufferStrategy.getRWStore(); + + IAllocationContext cntxt1 = new IAllocationContext() {}; + + IAllocationContext cntxt2 = new IAllocationContext() {}; + + // allocate a global address + int gaddr = rw.alloc(412, null); + + store.commit(); + + // allocate a context address grabbing previous global allocation + int c1addr = rw.alloc(412, cntxt1); + // imagine we are re-allocating the original address + rw.free(gaddr, 412, cntxt1); + // now abort context + rw.abortContext(cntxt1); + + store.commit(); + + long paddr = rw.physicalAddress(gaddr); + + assertTrue("Global allocation must be protected", paddr != 0); + + } finally { + store.destroy(); + } + } + void showStore(Journal store) { RWStrategy bufferStrategy = (RWStrategy) store.getBufferStrategy(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/sector/TestMemoryManager.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/sector/TestMemoryManager.java 2011-05-16 20:28:56 UTC (rev 4512) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/rwstore/sector/TestMemoryManager.java 2011-05-17 15:50:17 UTC (rev 4513) @@ -197,6 +197,8 @@ public void test_largeBlobAllocation() { final int sectorSize = manager.getSectorSize(); + + assertTrue(manager.getSlotBytes() == 0L); final long addr = manager.allocate(sectorSize, false/* blocks */); @@ -207,6 +209,10 @@ assertEquals("allocationSize", sectorSize, manager.allocationSize(addr)); + manager.free(addr); + + assertTrue(manager.getSlotBytes() == 0L); + } /** @@ -225,11 +231,11 @@ public void test_blockingAllocation() throws InterruptedException, ExecutionException, TimeoutException { - final int sectorSize = manager.getSectorSize(); + final int sectorSize = manager.getSectorSize(); // allow for blob // grab all the memory (for this size of allocation). final List<Long> addrs = new LinkedList<Long>(); - while(true) { + while (true) { try { final long addr = manager.allocate(sectorSize, false/* blocks */); addrs.add(addr); @@ -319,6 +325,8 @@ log.info("freeing: addr=" + addr + ", sizeof(addr)=" + manager.allocationSize(addr) + ", slotBytes=" + manager.getSlotBytes()); + + assertTrue(manager.getSlotBytes() == 0); } finally { @@ -341,6 +349,12 @@ log.info("Manager slotBytes: " + manager.getSlotBytes()); } + + public void test_stressBlockingAllocation() throws InterruptedException, ExecutionException, TimeoutException { + for (int i = 0; i < 50; i++) { + test_blockingAllocation(); + } + } /** * Unit test for reading a copy of the data from the {@link IMemoryManager}. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-19 20:03:09
|
Revision: 4528 http://bigdata.svn.sourceforge.net/bigdata/?rev=4528&view=rev Author: thompsonbry Date: 2011-05-19 20:03:02 +0000 (Thu, 19 May 2011) Log Message: ----------- Broke out unit tests for splitting a directory page when the bucket page beneath it has the same depth as the directory page and introduced an argument (splitBits) to generalize the #of bits to be used in that split (rather than assuming ONE or [addressBits]). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-19 17:25:26 UTC (rev 4527) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-19 20:03:02 UTC (rev 4528) @@ -89,6 +89,14 @@ private static final transient Logger log = Logger.getLogger(HTree.class); + /** + * The #of bits of distinction to be made each time we split a directory + * page in the {@link HTree}. See + * {@link #splitDirectoryPage(DirectoryPage, int, AbstractPage)} for a write + * up on this. + */ + private final int splitBits; + /* * metadata about the index. * @@ -180,7 +188,9 @@ // super(store, nodeFactory, readOnly, addressBits, metadata, recordCompressorFactory); super(store, false/*readOnly*/, addressBits); - + + this.splitBits = addressBits; + // if (pageSize <= 0) // throw new IllegalArgumentException("pageSize must be positive."); // @@ -535,9 +545,9 @@ * global depth of a directory is always the same as * address bits, but maybe I am missing something.... */ - addDirectoryPageAndSplitBucketPage(current, - buddyOffset, bucketPage); - + splitDirectoryPage(current, buddyOffset, splitBits, + bucketPage); + // The children of [current] have changed so we will // search current again. continue; @@ -944,10 +954,103 @@ } /** - * Introduce a new directory level when we need to split a child but + * Splits a {@link DirectoryPage} when we need to split a child but * <code>globalDepth == localDepth</code>. The caller must retry the insert * after this method makes the structural change. * + * <h2>Design discussion</h2> + * + * This method must maintain the invariant that the tree of page references + * for the hash tree is a strict tree. That is, you can not have two + * different pages each of which points to the same child. This would be a + * concurrency nightmare as, e.g., splitting the child could require us to + * propagate updates to multiple parents. However, even with that constraint + * we have two options. + * <p> + * Given addressBits := 2, the following hash tree state is induced by + * inserting the key sequence (0x01, 0x02, 0x03, 0x04). + * + * <pre> + * root := [2] (a,c,b,b) + * a := [2] (1,2,3,4) + * c := [2] (-,-,-,-) + * b := [1] (-,-;-,-) + * </pre> + * + * where [x] is the depth of the buddies on the corresponding page and ";" + * indicates a buddy bucket boundary while "," indicates a tuple boundary. + * <p> + * If we then attempt to insert a key which would be directed into (a) + * + * <pre> + * insert(0x20,...) + * </pre> + * + * then we must split (a) since depth(a):=2 and depth(root):=2. This will + * introduce a new directory page (d). + * + * <h3>depth(d) := 1</h3> + * + * This gives us the following post-condition. + * + * <pre> + * root := [2] (d,d,b,b) + * d := [1] (a,a;c,c) // two ptrs to (d) so 2 buddies on the page + * a := [1] (1,2;3,4) // depth changes since now 2 ptrs to (a) + * c := [1] (-,-;-,-) // depth changes since now 2 ptrs to (c) + * b := [1] (-,-;-,-) + * </pre> + * + * Regardless of the value of [addressBits], this design gives us + * [addressBits] buddies on (d) and each buddy has two slots (since the + * depth of (d) is ONE, each buddy on (d) has a one bit address space and + * hence uses two slots). The depth(a) will always be reset to ONE by this + * design since there will always be TWO pointers to (a) in (d). This design + * provides ONE (1) bit of additional distinctions along the path for which + * we have exhausted the hash tree address space. + * + * <h3>depth(d) := addressBits</h3> + * + * This gives us the following post-condition. + * + * <pre> + * root := [2] (a,c,b,b) + * d := [2] (a,a,a,a) // one ptr to (d) so 1 buddy on the page + * a := [0] (1;2;3;4) // depth changes since now 4 ptrs to (a). + * c := [2] (-,-,-,-) + * b := [1] (-,-;-,-) + * </pre> + * + * In this design, we always wind up with ONE buddy on (d), the depth(d) is + * [addressBits], and the depth(a) is reset to ZERO(0). This design focuses + * the expansion in the address space of the hash tree narrowly on the + * specific key prefix for which we have run out of distinctions and gives + * us [addressBits] of additional distinctions along that path. + * + * <h3>Conclusion</h3> + * + * Both designs would appear to be valid. Neither one can lead to a + * situation in which we have multiple parents for a child. In the first + * design, the one-bit expansion means that we never have pointers to the + * same child in more than one buddy bucket, and hence they will all be on + * the same page. In the second design, the depth of the new directory page + * is already at the maximum possible value so it can not be split again and + * thus the pointers to the child will always remain on the same page. + * <p> + * It seems that the first design has the advantage of growing the #of + * distinctions more slowly and sharing the new directory page among + * multiple such distinctions (all keys having the same leading bit). In the + * second design, we add a full [addressBits] at once to keys having the + * same [addressBits] leading bits). + * <p> + * It would appear that any choice in the inclusive range (1:addressBits) is + * permissible as in all cases the pointers to (a) will lie within a single + * buddy bucket. By factoring out the #of additional bits of distinction to + * be made when we split a directory page, we can defer this design either + * to construction time (or perhaps even to runtime) decision. I have + * therefore introduced an additional parameter on the {@link HTree} for + * this purpose. + * * @param oldParent * The parent {@link DirectoryPage}. * @param buddyOffset @@ -956,10 +1059,15 @@ * updated such that it points to both the original child and new * child. * @param child - * The child. + * The child, which can be a {@link DirectoryPage} or a + * {@link BucketPage}. * * @throws IllegalArgumentException * if any argument is <code>null</code>. + * @throws IllegalArgumentException + * if <i>splitBits</i> is non-positive. + * @throws IllegalArgumentException + * if <i>splitBits</i> is GT {@link #getAddressBits()}. * @throws IllegalStateException * if the depth of the child is GTE the depth of the parent. * @throws IllegalStateException @@ -970,8 +1078,9 @@ * if the parent of the <oldBucket</i> is not the given * <i>parent</i>. */ - private void addDirectoryPageAndSplitBucketPage(final DirectoryPage oldParent, - final int buddyOffset, final AbstractPage child) { + // Note: package private for unit tests. + void splitDirectoryPage(final DirectoryPage oldParent, + final int buddyOffset, final int splitBits, final AbstractPage child) { if (oldParent == null) throw new IllegalArgumentException(); if (child == null) @@ -995,6 +1104,10 @@ */ throw new IllegalArgumentException(); } + if (splitBits <= 0) + throw new IllegalArgumentException(); + if (splitBits > addressBits) + throw new IllegalArgumentException(); if (oldParent.isReadOnly()) // must be mutable. throw new IllegalStateException(); if (child.isReadOnly()) // must be mutable. @@ -1006,8 +1119,10 @@ log.debug("parent=" + oldParent.toShortString() + ", buddyOffset=" + buddyOffset + ", child=" + child); - // Allocate a new directory page. The global depth will be ONE (1). - final DirectoryPage newParent = new DirectoryPage(this, 1/* globalDepth */); + assert splitBits == addressBits : "FIXME Handle splitBits NE addressBits."; + + // Allocate a new directory page. . + final DirectoryPage newParent = new DirectoryPage(this, splitBits/* globalDepth */); // Set the parent Reference on the new dir page to the old dir page. newParent.parent = (Reference) oldParent.self; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-19 17:25:26 UTC (rev 4527) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-19 20:03:02 UTC (rev 4528) @@ -405,6 +405,7 @@ * before the insert could succeed. Choosing 0x20 thus minimizes the * #of unobserved intermediate states of the hash tree. * + * * FIXME This is causing a repeated introduction of a new directory * level and giving me a hash tree structure which differs from my * expectations. Work through the example on paper. Make sure that @@ -420,194 +421,265 @@ * * TODO Do an alternative example where we insert 0x05 at this step * instead of 0x10. - */ - htree.insert(k20, v20); - assertEquals("nnodes", 2, htree.getNodeCount()); - assertEquals("nleaves", 4, htree.getLeafCount()); - assertEquals("nentries", 5, htree.getEntryCount()); - htree.dump(Level.ALL, System.err, true/* materialize */); - assertTrue(root == htree.getRoot()); - assertEquals(4, root.childRefs.length); - final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); - assertTrue(d == (DirectoryPage) root.childRefs[0].get()); - assertTrue(c == (BucketPage) root.childRefs[1].get()); - assertTrue(b == (BucketPage) root.childRefs[2].get()); - assertTrue(b == (BucketPage) root.childRefs[3].get()); - assertEquals(4, d.childRefs.length); - final BucketPage e = (BucketPage) d.childRefs[1].get(); - assertTrue(a == (BucketPage) d.childRefs[0].get()); // FIXME This shows that the buddy references do not have to be contiguous! - assertTrue(e == (BucketPage) d.childRefs[1].get()); - assertTrue(a == (BucketPage) d.childRefs[2].get()); - assertTrue(a == (BucketPage) d.childRefs[3].get()); - assertEquals(2, root.getGlobalDepth()); - assertEquals(1, d.getGlobalDepth()); - assertEquals(1, a.getGlobalDepth());// recomputed! - assertEquals(1, e.getGlobalDepth());// same as [a] (recomputed). - assertEquals(1, b.getGlobalDepth());// unchanged. - assertEquals(2, c.getGlobalDepth());// recomputed! - assertEquals(2, a.getKeyCount()); - assertEquals(2, a.getValueCount()); - assertEquals(3, e.getKeyCount()); - assertEquals(3, e.getValueCount()); - assertEquals(0, b.getKeyCount()); - assertEquals(0, b.getValueCount()); - assertEquals(0, c.getKeyCount()); - assertEquals(0, c.getValueCount()); - assertTrue(htree.contains(k1)); - assertTrue(htree.contains(k2)); - assertTrue(htree.contains(k3)); - assertTrue(htree.contains(k4)); - assertTrue(htree.contains(k20)); - assertFalse(htree.contains(unused)); - assertEquals(v1, htree.lookupFirst(k1)); - assertEquals(v2, htree.lookupFirst(k2)); - assertEquals(v3, htree.lookupFirst(k3)); - assertEquals(v4, htree.lookupFirst(k4)); - assertEquals(v20, htree.lookupFirst(k20)); - assertNull(htree.lookupFirst(unused)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] { v1 }, htree - .lookupAll(k1)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] { v2 }, htree - .lookupAll(k2)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] { v3 }, htree - .lookupAll(k3)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] { v4 }, htree - .lookupAll(k4)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] { v20 }, htree - .lookupAll(k20)); - AbstractBTreeTestCase.assertSameIterator(new byte[][] {}, htree - .lookupAll(unused)); - - /* FIXME Update comments to reflect the example (and update the - * worksheet as well). * - * FIXME We MUST NOT decrease the localDepth of (a) since that would - * place duplicate keys into different buckets (lost retrival). - * Since (a) can not have its localDepth decreased, we need to have - * localDepth(d=2) with one pointer to (a) to get localDepth(a:=2). - * That makes this a very complicated example. It would be a lot - * easier if we started with distinct keys such that the keys in (a) - * could be redistributed. This case should be reserved for later - * once the structural mutations are better understood. - * - * 5. Insert 0x20. This key is directed into the same buddy bucket - * as the 0x01 keys that we have been inserting. That buddy bucket - * is already full and, further, it is the only buddy bucket on the - * page. Since we are not inserting a duplicate key we can split the - * buddy bucket (rather than doubling the size of the bucket). - * However, since global depth == local depth (i.e., only one buddy - * bucket on the page), this split will introduce a new directory - * page. The new directory page basically codes for the additional - * prefix bits required to differentiate the two distinct keys such - * that they are directed into the appropriate buckets. - * - * The new directory page (d) is inserted one level below the root - * on the path leading to (a). The new directory must have a local - * depth of ONE (1), since it will add a one bit distinction. Since - * we know the global depth of the root and the local depth of the - * new directory page, we solve for npointers := 1 << (globalDepth - - * localDepth). This tells us that we will have 2 pointers in the - * root to the new directory page. - * - * The precondition state of root is {a,c,b,b}. Since we know that - * we need npointers:=2 pointers to (d), this means that we will - * copy the {a,c} references into (d) and replace those references - * in the root with references to (d). The root is now {d,d,b,b}. d - * is now {a,a;c,c}. Since d has a local depth of 1 and address bits - * of 2, it is comprised of two buddy hash tables {a,a} and {c,c}. - * - * Linking in (d) has also changes the local depths of (a) and (c). - * Since they each now have npointers:=2, their localDepth is has - * been reduced from TWO (2) to ONE (1) (and their transient cached - * depth values must be either invalidated or recomputed). Note that - * the local depth of (d) and its children (a,c)) are ONE after this - * operation so if we force another split in (a) that will force a - * split in (d). - * - * Having introduced a new directory page into the hash tree, we now - * retry the insert. Once again, the insert is directed into (a). - * Since (a) is still full it is split. (d) is now the parent of - * (a). Once again, we have globalDepth(d=1)==localDepth(a=1) so we - * need to split (d). However, since localDepth(d=1) is less than - * globalDepth(root=2) we can split the buddy hash tables in (d). - * This will require us to allocate a new page (f) which will be the - * right sibling of (d). There are TWO (2) buddy hash tables in (d). - * They are now redistributed between (d) and (f). We also have to - * update the pointers to (d) in the parent such that 1/2 of them - * point to the new right sibling of (d). Since we have changed the - * #of pointers to (d) (from 2 to 1) the local depth of (d) (and of - * f) is now TWO (2). Since globalDepth(d=2) is greater than - * localDepth(a=1) we can now split (a) into (a,e), redistribute the - * tuples in the sole buddy page (a) between (a,e) and update the - * pointers in (d) to (a,a,e,e). - * - * Having split (d) into (d,f), we now retry the insert. This time - * the insert is directed into (e). There is room in (e) (it is - * empty) and the tuple is inserted without further mutation to the - * structure of the hash tree. - * - * TODO At this point we should also prune the buckets [b] and [c] - * since they are empty and replace them with null references. - * - * TODO The #of new directory levels which have to be introduced - * here is a function of the #of prefix bits which have to be - * consumed before a distinction can be made between the existing - * key (0x01) and the new key (0x20). With an address space of 2 - * bits, each directory level examines the next 2-bits of the key. - * The key (x20) was chosen since the distinction can be made by - * adding only one directory level (the keys differ in the first 4 - * bits). [Do an alternative example which requires recursive splits - * in order to verify that we reenter the logic correctly each time. - * E.g., by inserting 0x02 rather than 0x20.] - * - * TODO Do an example in which we explore an insert which introduces - * a new directory level in a 3-level tree. This should be a driven - * by a single insert so we can examine in depth how the new - * directory is introduced and verify whether it is introduced below - * the root or above the bucket. [I believe that it is introduced - * immediately below below the root. Note that a balanced tree, - * e.g., a B-Tree, introduces the new level above the root. However, - * the HTree is intended to be unbalanced in order to optimize - * storage and access times to the parts of the index which - * correspond to unequal distributions in the hash codes.] + * TODO This version tunnels to package private methods in order to + * test the intermediate states: (A) Do an alternative version with + * a different value for [splitBits]; (B) Do an alternative using + * htree.insert() (high level API); (C) Do low level tests of the + * BucketPage insert/lookup API. */ -// htree.insert(new byte[] { 0x20 }, new byte[] { 0x20 }); -// assertEquals("nnodes", 2, htree.getNodeCount()); -// assertEquals("nleaves", 4, htree.getLeafCount()); -// assertEquals("nentries", 5, htree.getEntryCount()); -// htree.dump(Level.ALL, System.err, true/* materialize */); -// assertTrue(root == htree.getRoot()); -// assertEquals(4, root.childRefs.length); -// final DirectoryPage d = (DirectoryPage)root.childRefs[0].get(); -// assertTrue(d == (DirectoryPage) root.childRefs[0].get()); -// assertTrue(d == (DirectoryPage) root.childRefs[1].get()); -// assertTrue(b == (BucketPage) root.childRefs[2].get()); -// assertTrue(b == (BucketPage) root.childRefs[3].get()); -// assertEquals(4, d.childRefs.length); -// final BucketPage e = (BucketPage)d.childRefs[1].get(); -// assertTrue(a == (BucketPage) d.childRefs[0].get()); -// assertTrue(e == (BucketPage) d.childRefs[1].get()); -// assertTrue(c == (BucketPage) d.childRefs[2].get()); -// assertTrue(c == (BucketPage) d.childRefs[3].get()); -// assertEquals(2, root.getGlobalDepth()); -// assertEquals(2, d.getGlobalDepth()); -// assertEquals(2, a.getGlobalDepth());// unchanged -// assertEquals(2, e.getGlobalDepth());// same as [a]. -// assertEquals(1, b.getGlobalDepth());// unchanged. -// assertEquals(2, c.getGlobalDepth());// unchanged. -// assertTrue(htree.contains(new byte[] { 0x01 })); -// assertFalse(htree.contains(new byte[] { 0x02 })); -// assertEquals(new byte[] { 0x01 }, htree -// .lookupFirst(new byte[] { 0x01 })); -// assertNull(htree.lookupFirst(new byte[] { 0x02 })); -// AbstractBTreeTestCase.assertSameIterator( -// // -// new byte[][] { new byte[] { 0x01 }, new byte[] { 0x01 }, -// new byte[] { 0x01 }, new byte[] { 0x01 } }, htree -// .lookupAll(new byte[] { 0x01 })); -// AbstractBTreeTestCase.assertSameIterator(// -// new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); +// // htree.insert(k20, v20); +// // verify that [a] will not accept an insert. +// assertFalse(a.insert(k20, v20, root/* parent */, 0/* buddyOffset */)); +// // split the root directory page. +// htree.splitDirectoryPage(root/* oldParent */, +// 0/* buddyOffset */, 2/*splitBits*/, a/* child */); +// assertEquals("nnodes", 2, htree.getNodeCount()); +// assertEquals("nleaves", 3, htree.getLeafCount()); // unchange +// assertEquals("nentries", 4, htree.getEntryCount()); // unchanged +// htree.dump(Level.ALL, System.err, true/* materialize */); +// assertTrue(root == htree.getRoot()); +// assertEquals(4, root.childRefs.length); +// final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); +// assertTrue(d == (DirectoryPage) root.childRefs[0].get()); +// assertTrue(d == (DirectoryPage) root.childRefs[1].get()); +// assertTrue(b == (BucketPage) root.childRefs[2].get()); +// assertTrue(b == (BucketPage) root.childRefs[3].get()); +// assertEquals(4, d.childRefs.length); +//// final BucketPage e = (BucketPage) d.childRefs[1].get(); +// assertTrue(a == (BucketPage) d.childRefs[0].get()); +// assertTrue(a == (BucketPage) d.childRefs[1].get()); +// assertTrue(c == (BucketPage) d.childRefs[2].get()); +// assertTrue(c == (BucketPage) d.childRefs[3].get()); +// assertEquals(2, root.getGlobalDepth()); +// assertEquals(1, d.getGlobalDepth()); +// assertEquals(1, a.getGlobalDepth());// recomputed! +// assertEquals(1, e.getGlobalDepth());// same as [a] (recomputed). +// assertEquals(1, b.getGlobalDepth());// unchanged. +// assertEquals(2, c.getGlobalDepth());// recomputed! +// assertEquals(2, a.getKeyCount()); +// assertEquals(2, a.getValueCount()); +//// assertEquals(3, e.getKeyCount()); +//// assertEquals(3, e.getValueCount()); +// assertEquals(0, b.getKeyCount()); +// assertEquals(0, b.getValueCount()); +// assertEquals(0, c.getKeyCount()); +// assertEquals(0, c.getValueCount()); +// assertTrue(htree.contains(k1)); +// assertTrue(htree.contains(k2)); +// assertTrue(htree.contains(k3)); +// assertTrue(htree.contains(k4)); +// assertTrue(htree.contains(k20)); +// assertFalse(htree.contains(unused)); +// assertEquals(v1, htree.lookupFirst(k1)); +// assertEquals(v2, htree.lookupFirst(k2)); +// assertEquals(v3, htree.lookupFirst(k3)); +// assertEquals(v4, htree.lookupFirst(k4)); +// assertEquals(v20, htree.lookupFirst(k20)); +// assertNull(htree.lookupFirst(unused)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v1 }, htree +// .lookupAll(k1)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v2 }, htree +// .lookupAll(k2)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v3 }, htree +// .lookupAll(k3)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v4 }, htree +// .lookupAll(k4)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v20 }, htree +// .lookupAll(k20)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] {}, htree +// .lookupAll(unused)); +// TODO Continue test (perhaps only for the high level variant). +// /* +// * htree.insert() variant. +// */ +// assertEquals("nnodes", 2, htree.getNodeCount()); +// assertEquals("nleaves", 4, htree.getLeafCount()); +// assertEquals("nentries", 5, htree.getEntryCount()); +// htree.dump(Level.ALL, System.err, true/* materialize */); +// assertTrue(root == htree.getRoot()); +// assertEquals(4, root.childRefs.length); +// final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); +// assertTrue(d == (DirectoryPage) root.childRefs[0].get()); +// assertTrue(c == (BucketPage) root.childRefs[1].get()); +// assertTrue(b == (BucketPage) root.childRefs[2].get()); +// assertTrue(b == (BucketPage) root.childRefs[3].get()); +// assertEquals(4, d.childRefs.length); +// final BucketPage e = (BucketPage) d.childRefs[1].get(); +// assertTrue(a == (BucketPage) d.childRefs[0].get()); // FIXME This shows that the buddy references do not have to be contiguous! +// assertTrue(e == (BucketPage) d.childRefs[1].get()); +// assertTrue(a == (BucketPage) d.childRefs[2].get()); +// assertTrue(a == (BucketPage) d.childRefs[3].get()); +// assertEquals(2, root.getGlobalDepth()); +// assertEquals(1, d.getGlobalDepth()); +// assertEquals(1, a.getGlobalDepth());// recomputed! +// assertEquals(1, e.getGlobalDepth());// same as [a] (recomputed). +// assertEquals(1, b.getGlobalDepth());// unchanged. +// assertEquals(2, c.getGlobalDepth());// recomputed! +// assertEquals(2, a.getKeyCount()); +// assertEquals(2, a.getValueCount()); +// assertEquals(3, e.getKeyCount()); +// assertEquals(3, e.getValueCount()); +// assertEquals(0, b.getKeyCount()); +// assertEquals(0, b.getValueCount()); +// assertEquals(0, c.getKeyCount()); +// assertEquals(0, c.getValueCount()); +// assertTrue(htree.contains(k1)); +// assertTrue(htree.contains(k2)); +// assertTrue(htree.contains(k3)); +// assertTrue(htree.contains(k4)); +// assertTrue(htree.contains(k20)); +// assertFalse(htree.contains(unused)); +// assertEquals(v1, htree.lookupFirst(k1)); +// assertEquals(v2, htree.lookupFirst(k2)); +// assertEquals(v3, htree.lookupFirst(k3)); +// assertEquals(v4, htree.lookupFirst(k4)); +// assertEquals(v20, htree.lookupFirst(k20)); +// assertNull(htree.lookupFirst(unused)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v1 }, htree +// .lookupAll(k1)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v2 }, htree +// .lookupAll(k2)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v3 }, htree +// .lookupAll(k3)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v4 }, htree +// .lookupAll(k4)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] { v20 }, htree +// .lookupAll(k20)); +// AbstractBTreeTestCase.assertSameIterator(new byte[][] {}, htree +// .lookupAll(unused)); +// +// /* FIXME Update comments to reflect the example (and update the +// * worksheet as well). +// * +// * FIXME We MUST NOT decrease the localDepth of (a) since that would +// * place duplicate keys into different buckets (lost retrival). +// * Since (a) can not have its localDepth decreased, we need to have +// * localDepth(d=2) with one pointer to (a) to get localDepth(a:=2). +// * That makes this a very complicated example. It would be a lot +// * easier if we started with distinct keys such that the keys in (a) +// * could be redistributed. This case should be reserved for later +// * once the structural mutations are better understood. +// * +// * 5. Insert 0x20. This key is directed into the same buddy bucket +// * as the 0x01 keys that we have been inserting. That buddy bucket +// * is already full and, further, it is the only buddy bucket on the +// * page. Since we are not inserting a duplicate key we can split the +// * buddy bucket (rather than doubling the size of the bucket). +// * However, since global depth == local depth (i.e., only one buddy +// * bucket on the page), this split will introduce a new directory +// * page. The new directory page basically codes for the additional +// * prefix bits required to differentiate the two distinct keys such +// * that they are directed into the appropriate buckets. +// * +// * The new directory page (d) is inserted one level below the root +// * on the path leading to (a). The new directory must have a local +// * depth of ONE (1), since it will add a one bit distinction. Since +// * we know the global depth of the root and the local depth of the +// * new directory page, we solve for npointers := 1 << (globalDepth - +// * localDepth). This tells us that we will have 2 pointers in the +// * root to the new directory page. +// * +// * The precondition state of root is {a,c,b,b}. Since we know that +// * we need npointers:=2 pointers to (d), this means that we will +// * copy the {a,c} references into (d) and replace those references +// * in the root with references to (d). The root is now {d,d,b,b}. d +// * is now {a,a;c,c}. Since d has a local depth of 1 and address bits +// * of 2, it is comprised of two buddy hash tables {a,a} and {c,c}. +// * +// * Linking in (d) has also changes the local depths of (a) and (c). +// * Since they each now have npointers:=2, their localDepth is has +// * been reduced from TWO (2) to ONE (1) (and their transient cached +// * depth values must be either invalidated or recomputed). Note that +// * the local depth of (d) and its children (a,c)) are ONE after this +// * operation so if we force another split in (a) that will force a +// * split in (d). +// * +// * Having introduced a new directory page into the hash tree, we now +// * retry the insert. Once again, the insert is directed into (a). +// * Since (a) is still full it is split. (d) is now the parent of +// * (a). Once again, we have globalDepth(d=1)==localDepth(a=1) so we +// * need to split (d). However, since localDepth(d=1) is less than +// * globalDepth(root=2) we can split the buddy hash tables in (d). +// * This will require us to allocate a new page (f) which will be the +// * right sibling of (d). There are TWO (2) buddy hash tables in (d). +// * They are now redistributed between (d) and (f). We also have to +// * update the pointers to (d) in the parent such that 1/2 of them +// * point to the new right sibling of (d). Since we have changed the +// * #of pointers to (d) (from 2 to 1) the local depth of (d) (and of +// * f) is now TWO (2). Since globalDepth(d=2) is greater than +// * localDepth(a=1) we can now split (a) into (a,e), redistribute the +// * tuples in the sole buddy page (a) between (a,e) and update the +// * pointers in (d) to (a,a,e,e). +// * +// * Having split (d) into (d,f), we now retry the insert. This time +// * the insert is directed into (e). There is room in (e) (it is +// * empty) and the tuple is inserted without further mutation to the +// * structure of the hash tree. +// * +// * TODO At this point we should also prune the buckets [b] and [c] +// * since they are empty and replace them with null references. +// * +// * TODO The #of new directory levels which have to be introduced +// * here is a function of the #of prefix bits which have to be +// * consumed before a distinction can be made between the existing +// * key (0x01) and the new key (0x20). With an address space of 2 +// * bits, each directory level examines the next 2-bits of the key. +// * The key (x20) was chosen since the distinction can be made by +// * adding only one directory level (the keys differ in the first 4 +// * bits). [Do an alternative example which requires recursive splits +// * in order to verify that we reenter the logic correctly each time. +// * E.g., by inserting 0x02 rather than 0x20.] +// * +// * TODO Do an example in which we explore an insert which introduces +// * a new directory level in a 3-level tree. This should be a driven +// * by a single insert so we can examine in depth how the new +// * directory is introduced and verify whether it is introduced below +// * the root or above the bucket. [I believe that it is introduced +// * immediately below below the root. Note that a balanced tree, +// * e.g., a B-Tree, introduces the new level above the root. However, +// * the HTree is intended to be unbalanced in order to optimize +// * storage and access times to the parts of the index which +// * correspond to unequal distributions in the hash codes.] +// */ +//// htree.insert(new byte[] { 0x20 }, new byte[] { 0x20 }); +//// assertEquals("nnodes", 2, htree.getNodeCount()); +//// assertEquals("nleaves", 4, htree.getLeafCount()); +//// assertEquals("nentries", 5, htree.getEntryCount()); +//// htree.dump(Level.ALL, System.err, true/* materialize */); +//// assertTrue(root == htree.getRoot()); +//// assertEquals(4, root.childRefs.length); +//// final DirectoryPage d = (DirectoryPage)root.childRefs[0].get(); +//// assertTrue(d == (DirectoryPage) root.childRefs[0].get()); +//// assertTrue(d == (DirectoryPage) root.childRefs[1].get()); +//// assertTrue(b == (BucketPage) root.childRefs[2].get()); +//// assertTrue(b == (BucketPage) root.childRefs[3].get()); +//// assertEquals(4, d.childRefs.length); +//// final BucketPage e = (BucketPage)d.childRefs[1].get(); +//// assertTrue(a == (BucketPage) d.childRefs[0].get()); +//// assertTrue(e == (BucketPage) d.childRefs[1].get()); +//// assertTrue(c == (BucketPage) d.childRefs[2].get()); +//// assertTrue(c == (BucketPage) d.childRefs[3].get()); +//// assertEquals(2, root.getGlobalDepth()); +//// assertEquals(2, d.getGlobalDepth()); +//// assertEquals(2, a.getGlobalDepth());// unchanged +//// assertEquals(2, e.getGlobalDepth());// same as [a]. +//// assertEquals(1, b.getGlobalDepth());// unchanged. +//// assertEquals(2, c.getGlobalDepth());// unchanged. +//// assertTrue(htree.contains(new byte[] { 0x01 })); +//// assertFalse(htree.contains(new byte[] { 0x02 })); +//// assertEquals(new byte[] { 0x01 }, htree +//// .lookupFirst(new byte[] { 0x01 })); +//// assertNull(htree.lookupFirst(new byte[] { 0x02 })); +//// AbstractBTreeTestCase.assertSameIterator( +//// // +//// new byte[][] { new byte[] { 0x01 }, new byte[] { 0x01 }, +//// new byte[] { 0x01 }, new byte[] { 0x01 } }, htree +//// .lookupAll(new byte[] { 0x01 })); +//// AbstractBTreeTestCase.assertSameIterator(// +//// new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); + // TODO REMOVE (or test suite for remove). // TODO Continue progression here? @@ -621,6 +693,198 @@ } /** + * Unit test for + * {@link HTree#splitDirectoryPage(DirectoryPage, int, int, com.bigdata.htree.HTree.AbstractPage)} + * . + */ + public void test_splitDir_addressBits2_splitBits1() { + + final int addressBits = 2; + + final IRawStore store = new SimpleMemoryRawStore(); + + try { + + final byte[] k1 = new byte[]{0x01}; + final byte[] k2 = new byte[]{0x02}; + final byte[] k3 = new byte[]{0x03}; + final byte[] k4 = new byte[]{0x04}; + final byte[] k20 = new byte[]{0x20}; + + final byte[] v1 = new byte[]{0x01}; + final byte[] v2 = new byte[]{0x02}; + final byte[] v3 = new byte[]{0x03}; + final byte[] v4 = new byte[]{0x04}; + final byte[] v20 = new byte[]{0x20}; + + final HTree htree = new HTree(store, addressBits); + + final DirectoryPage root = htree.getRoot(); + + htree.insert(k1, v1); + htree.insert(k2, v2); + htree.insert(k3, v3); + htree.insert(k4, v4); + + /* + * Verify preconditions for the unit test. + */ + assertTrue(root == htree.getRoot()); + final BucketPage a = (BucketPage) root.childRefs[0].get(); + final BucketPage c = (BucketPage) root.childRefs[1].get(); + final BucketPage b = (BucketPage) root.childRefs[2].get(); + assertTrue(a == root.childRefs[0].get()); + assertTrue(c == root.childRefs[1].get()); + assertTrue(b == root.childRefs[2].get()); + assertTrue(b == root.childRefs[3].get()); + assertEquals(2, root.globalDepth); + assertEquals(2, a.globalDepth); + assertEquals(2, c.globalDepth); + assertEquals(1, b.globalDepth); + + /* + * Force a split of the root directory page. Note that (a) has the + * same depth as the directory page we want to split, which is a + * precondition for being able to split the directory. + */ + + // verify that [a] will not accept an insert. + assertFalse(a + .insert(k20, v20, root/* parent */, 0/* buddyOffset */)); + + // split the root directory page. + htree.splitDirectoryPage(root/* oldParent */, 0/* buddyOffset */, + 1/* splitBits */, a/* child */); + + /* + * Verify post-conditions. + */ + + assertEquals("nnodes", 2, htree.getNodeCount()); + assertEquals("nleaves", 3, htree.getLeafCount()); // unchanged + assertEquals("nentries", 4, htree.getEntryCount()); // unchanged + + assertTrue(root == htree.getRoot()); + final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); + assertTrue(d == root.childRefs[0].get()); + assertTrue(d == root.childRefs[1].get()); + assertTrue(b == root.childRefs[2].get()); + assertTrue(b == root.childRefs[3].get()); + assertTrue(a == d.childRefs[0].get()); + assertTrue(a == d.childRefs[1].get()); + assertTrue(c == d.childRefs[2].get()); + assertTrue(c == d.childRefs[3].get()); + assertEquals(2, root.globalDepth); + assertEquals(1, d.globalDepth); + assertEquals(1, a.globalDepth); + assertEquals(1, c.globalDepth); + assertEquals(1, b.globalDepth); + + } finally { + + store.destroy(); + + } + + } + + /** + * Unit test for + * {@link HTree#splitDirectoryPage(DirectoryPage, int, int, com.bigdata.htree.HTree.AbstractPage)} + * . + */ + public void test_splitDir_addressBits2_splitBits2() { + + final int addressBits = 2; + + final IRawStore store = new SimpleMemoryRawStore(); + + try { + + final byte[] k1 = new byte[]{0x01}; + final byte[] k2 = new byte[]{0x02}; + final byte[] k3 = new byte[]{0x03}; + final byte[] k4 = new byte[]{0x04}; + final byte[] k20 = new byte[]{0x20}; + + final byte[] v1 = new byte[]{0x01}; + final byte[] v2 = new byte[]{0x02}; + final byte[] v3 = new byte[]{0x03}; + final byte[] v4 = new byte[]{0x04}; + final byte[] v20 = new byte[]{0x20}; + + final HTree htree = new HTree(store, addressBits); + + final DirectoryPage root = htree.getRoot(); + + htree.insert(k1, v1); + htree.insert(k2, v2); + htree.insert(k3, v3); + htree.insert(k4, v4); + + /* + * Verify preconditions for the unit test. + */ + assertTrue(root == htree.getRoot()); + final BucketPage a = (BucketPage) root.childRefs[0].get(); + final BucketPage c = (BucketPage) root.childRefs[1].get(); + final BucketPage b = (BucketPage) root.childRefs[2].get(); + assertTrue(a == root.childRefs[0].get()); + assertTrue(c == root.childRefs[1].get()); + assertTrue(b == root.childRefs[2].get()); + assertTrue(b == root.childRefs[3].get()); + assertEquals(2, root.globalDepth); + assertEquals(2, a.globalDepth); + assertEquals(2, c.globalDepth); + assertEquals(1, b.globalDepth); + + /* + * Force a split of the root directory page. Note that (a) has the + * same depth as the directory page we want to split, which is a + * precondition for being able to split the directory. + */ + + // verify that [a] will not accept an insert. + assertFalse(a + .insert(k20, v20, root/* parent */, 0/* buddyOffset */)); + + // split the root directory page. + htree.splitDirectoryPage(root/* oldParent */, 0/* buddyOffset */, + 2/* splitBits */, a/* child */); + + /* + * Verify post-conditions. + */ + + assertEquals("nnodes", 2, htree.getNodeCount()); + assertEquals("nleaves", 3, htree.getLeafCount()); // unchanged + assertEquals("nentries", 4, htree.getEntryCount()); // unchanged + + assertTrue(root == htree.getRoot()); + final DirectoryPage d = (DirectoryPage) root.childRefs[0].get(); + assertTrue(d == root.childRefs[0].get()); + assertTrue(c == root.childRefs[1].get()); + assertTrue(b == root.childRefs[2].get()); + assertTrue(b == root.childRefs[3].get()); + assertTrue(a == d.childRefs[0].get()); + assertTrue(a == d.childRefs[1].get()); + assertTrue(a == d.childRefs[2].get()); + assertTrue(a == d.childRefs[3].get()); + assertEquals(2, root.globalDepth); + assertEquals(2, d.globalDepth); + assertEquals(0, a.globalDepth); + assertEquals(2, c.globalDepth); + assertEquals(1, b.globalDepth); + + } finally { + + store.destroy(); + + } + + } + + /** * Test of basic insert, lookup, and split page operations (including when a * new directory page must be introduced) using an address space with only * TWO (2) bits. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-20 17:54:39
|
Revision: 4533 http://bigdata.svn.sourceforge.net/bigdata/?rev=4533&view=rev Author: thompsonbry Date: 2011-05-20 17:54:32 +0000 (Fri, 20 May 2011) Log Message: ----------- Fixed the split directory code and tests. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTreeUtil.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTreeUtil.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-20 16:06:11 UTC (rev 4532) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-20 17:54:32 UTC (rev 4533) @@ -189,15 +189,8 @@ // super(store, nodeFactory, readOnly, addressBits, metadata, recordCompressorFactory); super(store, false/*readOnly*/, addressBits); - this.splitBits = addressBits; + this.splitBits = 1;// in [1:addressBits]; -// if (pageSize <= 0) -// throw new IllegalArgumentException("pageSize must be positive."); -// -// if ((pageSize & -pageSize) != pageSize) -// throw new IllegalArgumentException("pageSize not power of 2: " -// + pageSize); - // @todo from IndexMetadata this.versionTimestamps = false; this.deleteMarkers = false; @@ -996,18 +989,19 @@ * <pre> * root := [2] (d,d,b,b) * d := [1] (a,a;c,c) // two ptrs to (d) so 2 buddies on the page - * a := [1] (1,2;3,4) // depth changes since now 2 ptrs to (a) - * c := [1] (-,-;-,-) // depth changes since now 2 ptrs to (c) + * a := [0] (1,2;3,4) // depth changes since now 2 ptrs to (a) + * c := [0] (-,-;-,-) // depth changes since now 2 ptrs to (c) * b := [1] (-,-;-,-) * </pre> * * Regardless of the value of [addressBits], this design gives us * [addressBits] buddies on (d) and each buddy has two slots (since the * depth of (d) is ONE, each buddy on (d) has a one bit address space and - * hence uses two slots). The depth(a) will always be reset to ONE by this - * design since there will always be TWO pointers to (a) in (d). This design - * provides ONE (1) bit of additional distinctions along the path for which - * we have exhausted the hash tree address space. + * hence uses two slots). The depth(a) and depth(c) will always be reset to + * ZERO (0) by this design since there will always be TWO pointers to (a) + * and TWO pointers to (c) in (d). This design provides ONE (1) bit of + * additional distinctions along the path for which we have exhausted the + * hash tree address space. * * <h3>depth(d) := addressBits</h3> * @@ -1058,7 +1052,7 @@ * which buddy hash table in the parent must be its pointers * updated such that it points to both the original child and new * child. - * @param child + * @param childIsUnused * The child, which can be a {@link DirectoryPage} or a * {@link BucketPage}. * @@ -1077,15 +1071,22 @@ * @throws IllegalStateException * if the parent of the <oldBucket</i> is not the given * <i>parent</i>. + * + * FIXME Add a unit test for a directory split when the global + * depth == local depth, but global depth != address bits. In + * this case, we should do a normal buddy table style "split" of + * the directory page. That code has not been written, but it + * will be very similar to the logic for splitting a bucket + * page. */ // Note: package private for unit tests. void splitDirectoryPage(final DirectoryPage oldParent, - final int buddyOffset, final int splitBits, final AbstractPage child) { + final int buddyOffset, final int splitBits, final AbstractPage childIsUnused) { if (oldParent == null) throw new IllegalArgumentException(); - if (child == null) + if (childIsUnused == null) throw new IllegalArgumentException(); - if (child.globalDepth != oldParent.globalDepth) { + if (childIsUnused.globalDepth != oldParent.globalDepth) { /* * We only create a new directory page when the global and local * depth are equal. @@ -1108,19 +1109,26 @@ throw new IllegalArgumentException(); if (splitBits > addressBits) throw new IllegalArgumentException(); + if ((buddyOffset + splitBits) >= (1 << addressBits)) { + /* + * [buddyOffset] is the slot index of the first slot for the buddy + * hash table in the parent. [splitBits] is the #of address bits to + * copy into the new directory page. Therefore, [buddyOffset + + * splitBits] must be GTE ZERO (0) and LT [addressBits]. + */ + throw new IllegalArgumentException(); + } if (oldParent.isReadOnly()) // must be mutable. throw new IllegalStateException(); - if (child.isReadOnly()) // must be mutable. + if (childIsUnused.isReadOnly()) // must be mutable. throw new IllegalStateException(); - if (child.parent != oldParent.self) // must be same Reference. + if (childIsUnused.parent != oldParent.self) // must be same Reference. throw new IllegalStateException(); if (log.isDebugEnabled()) log.debug("parent=" + oldParent.toShortString() + ", buddyOffset=" - + buddyOffset + ", child=" + child); + + buddyOffset + ", child=" + childIsUnused); - assert splitBits == addressBits : "FIXME Handle splitBits NE addressBits."; - // Allocate a new directory page. . final DirectoryPage newParent = new DirectoryPage(this, splitBits/* globalDepth */); @@ -1129,115 +1137,119 @@ // One more directory page. nnodes++; + + assert splitBits == newParent.globalDepth; -// /* -// * Compute the #of pointers (aka slots) that we need to copy from the -// * old parent. There will be one such pointer for each buddy on the -// * child page. Since global depth (of the parent) == local depth (of the -// * child), we know that there is only one buddy on the child page and -// * hence that we will copy ONE pointer. -// */ -// final int npointersToCopy = 1 << (oldParent.globalDepth - child.globalDepth); -// assert oldParent.globalDepth == child.globalDepth; -// assert npointersToCopy == 1; + // #of buddy hash tables on the new directory page. + final int nbuddies = (1 << addressBits) / (1 << newParent.globalDepth); + + // #of address slots in each buddy hash table for the new dir page. + final int nslots = (1 << newParent.globalDepth); /* - * 1. Locate the slot for the pointer in the old parent to the child - * which is to be split. + * This is a nested loop which copies the pointers to the relevant child + * pages into the new directory page. We then go through and set each of + * the slots from which we copied a pointer to be a pointer to the new + * directory page. * - * Note: Since there is only one pointer in the old parent page to the - * child page, a scan will always find the right slot. + * The #of pointers to be copied depends on [splitBits] and defines the + * local depth of the new directory page. If the local depth of the new + * directory page is to be ONE (1), then we must copy 1/2 of the + * pointers from the parent. If the local depth of the new directory + * page is to be [addressBis], then we must copy 1 of the pointers from + * the parent. * - * Note: Another way to look at this is that we are locating all - * pointers in the old parent for the child which needs to be split plus - * the buddies of that child. This amounts to all pointers to the child - * since the buddies are (by definition) on the same page as the child. + * The outer loop visits the slots we need to copy in the parent. * - * FIXME Can we do this by indexing using the hash bits? That would be - * faster than scanning for the reference. We could then just validate - * that we have the right reference by testing that slot. + * The inner loop fills each the buddy hash table in the new directory + * with the current pointer from the outer loop. */ - final int slotInParentToUpdate; { + final int lastSrc = (buddyOffset + nbuddies); - // #of address slots in each buddy hash table. - final int slotsPerBuddy = (1 << oldParent.globalDepth); + // for each pointer to be copied from the parent. + int dst = 0; // target slot in the new directory page. + for (int src = buddyOffset; src < lastSrc; src++) { - // locate the slot for the pointer to be copied - int slot = -1; - for (int i = buddyOffset; i < slotsPerBuddy; i++) { + // pointer to be copied. + final Reference<AbstractPage> ref = oldParent.childRefs[src]; - if (oldParent.childRefs[i] == child.self) { - slot = i; - break; - } + // fill the buddy hash table on the new parent with that ptr. + for (int i = 0; i < nslots; i++) { - } + newParent.childRefs[dst] = ref; - if (slot == -1) { - // The child was not found in the parent's buddy bucket. - throw new AssertionError(); - } + dst++; - slotInParentToUpdate = slot; + } - assert oldParent.childRefs[slot] == child.self; - - } + } - /* - * Copy the pointer to the child page into each slot of the new - * directory page. - */ - { + /* + * Replace the pointer to the child page in the old parent with the + * pointer to the new directory page. + */ + for (int src = buddyOffset; src < lastSrc; src++) { - // #of slots on the new directory page. - final int nslots = 1 << addressBits; + oldParent.childRefs[src] = (Reference) newParent.self; - for (int i = 0; i < nslots; i++) { - - newParent.childRefs[i] = (Reference) child.self; - } } /* - * Replace the pointer to the child page in the old parent with the - * pointer to the new directory page. + * We need to update the parent reference on each page whose pointer was + * moved into the new directory page and recompute the global depth of + * the page as well. Both of these pieces of information are transient, + * so we only do this for pointers to pages that are currently + * materialized. + * + * The parent of a page whose pointer was moved needs to be updated + * because the parent is now the new directory page. + * + * The global depth of a page whose pointer was moved needs to be + * updated since the #of pointers to that page changed. This can be done + * by counting the #of pointers in any buddy hash table of the new + * parent to the child. Since all pointers in a buddy hash table on the + * new parent point to the child page, the #of pointers in a buddy hash + * table in the new parent is just the #of slots in a buddy hash table + * for the new parent. + * + * Note: We have to do this for each buddy hash table on the new + * directory page. */ - oldParent.childRefs[slotInParentToUpdate] = (Reference) newParent.self; + { - // Update the parent reference on the child. - child.parent = (Reference) newParent.self; + int aBuddyOffset = 0; + for (int i = 0; i < nbuddies; i++) { - /* - * Recompute the global depth of the child page whose pointer was just - * moved. It will have changed since the #of pointers to that page just - * changed. This can be done by counting the #of pointers in any buddy - * hash table of the new parent to the child. Since all buddy hash - * tables on the new parent point to the child page, the #of pointers in - * a hash table in the new parent is just the #of slots in a buddy hash - * table for the new parent. Since all the buddies that we are moving - * are on the same child page, we can do this just once. - */ - { + final Reference<AbstractPage> ref = newParent.childRefs[aBuddyOffset]; - // #of address slots in each buddy hash table for the new - // parent. - final int slotsPerBuddyInNewParent = (1 << newParent.globalDepth); + final AbstractPage aChild = ref == null ? null : ref.get(); - // #of pointers to child in a buddy hash table of the new - // parent. - final int npointers = slotsPerBuddyInNewParent; + if (aChild == null) { + // Only update materialized pages. + continue; + } + + // Each buddy hash table in the new parent was filled by a single + // pointer so npointers := nslots + final int npointers = nslots; - // recompute the local depth of the child page. - final int localDepth = HTreeUtil.getLocalDepth(addressBits, - newParent.globalDepth, npointers); + // recompute the local depth of the child page. + final int localDepth = HTreeUtil.getLocalDepth(addressBits, + newParent.globalDepth, npointers); - // update the cached local depth on the child page. - child.globalDepth = localDepth; + // update the cached local depth on the child page. + aChild.globalDepth = localDepth; + // Update the parent reference on the child. + aChild.parent = (Reference) newParent.self; + + aBuddyOffset += nslots; + + } + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTreeUtil.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTreeUtil.java 2011-05-20 16:06:11 UTC (rev 4532) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTreeUtil.java 2011-05-20 17:54:32 UTC (rev 4533) @@ -73,6 +73,20 @@ } + /** + * Return <code>true</code> if the argument is a power of TWO (2). + * + * @param v + * The argument. + * + * @return <code>true</code> if the argument is a power of TWO (2). + */ + static public boolean isPowerOf2(final int v) { + + return ((v & -v) == v); + + } + /** * Return the #of entries in the address map for a page having the given * local depth. This is <code>2^(globalHashBits - localHashBits)</code>. The @@ -202,33 +216,34 @@ } - /** - * Find the offset of the buddy hash table or buddy bucket in the child. - * Each page of the hash tree is logically an ordered array of "buddies" - * sharing the same physical page. When the page is a directory page, the - * buddies are buddy hash tables. When the page is a bucket page, the - * buddies are buddy hash buckets. The returned value is the offset required - * to index into the appropriate buddy hash table or buddy hash bucket in - * the child page. - * - * @param hashBits - * The relevant bits of the hash code used to lookup the child - * within the parent. - * @param globalDepth - * The global depth of the parent. - * @param localDepth - * The local depth of the child page within the parent. - * - * @return The offset of the start of the buddy hash table or buddy hash - * bucket within the child. This is an index into the slots of the - * child. - * - * @throws IllegalArgumentException - * if the <i>globalDepth</i> is negative. - * @throws IllegalArgumentException - * if the <i>localDepth</i> is greater than the - * <i>globalDepth</i>. - */ + /** + * Find the offset of the buddy hash table or buddy bucket in the child. + * Each page of the hash tree is logically an ordered array of "buddies" + * sharing the same physical page. When the page is a directory page, the + * buddies are buddy hash tables. When the page is a bucket page, the + * buddies are buddy hash buckets. The returned value is the offset required + * to index into the appropriate buddy hash table or buddy hash bucket in + * the child page. + * + * @param hashBits + * The relevant bits of the hash code used to lookup the child + * within the parent. + * @param globalDepth + * The global depth of the parent. + * @param localDepth + * The local depth of the child page within the parent. + * + * @return The offset of the start of the buddy hash table or buddy hash + * bucket within the child. This is an index into the slots of the + * child. There are m:=(2^addressBits)-1 slots in the child, so the + * returned index is in the half open range [0:m). + * + * @throws IllegalArgumentException + * if the <i>globalDepth</i> is negative. + * @throws IllegalArgumentException + * if the <i>localDepth</i> is greater than the + * <i>globalDepth</i>. + */ public static int getBuddyOffset(final int hashBits, final int globalDepth, final int localDepth) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-20 16:06:11 UTC (rev 4532) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-20 17:54:32 UTC (rev 4533) @@ -776,8 +776,8 @@ assertTrue(c == d.childRefs[3].get()); assertEquals(2, root.globalDepth); assertEquals(1, d.globalDepth); - assertEquals(1, a.globalDepth); - assertEquals(1, c.globalDepth); + assertEquals(0, a.globalDepth); + assertEquals(0, c.globalDepth); assertEquals(1, b.globalDepth); } finally { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTreeUtil.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTreeUtil.java 2011-05-20 16:06:11 UTC (rev 4532) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTreeUtil.java 2011-05-20 17:54:32 UTC (rev 4533) @@ -73,6 +73,22 @@ } + /** Unit test for {@link HTreeUtil#isPowerOf2(int)}. */ + public void test_isPowerOf2() { + + for (int i = 0; i < 32; i++) { + + final int v = 1<<i; + + assertTrue(HTreeUtil.isPowerOf2(v)); + + if (v > 1) + assertFalse(HTreeUtil.isPowerOf2(v + 1)); + + } + + } + /** * Prints various tables and runs consistency tests on the htree math * operations dealing with addressBits, globalDepth, localDepth, etc. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-20 19:02:38
|
Revision: 4537 http://bigdata.svn.sourceforge.net/bigdata/?rev=4537&view=rev Author: thompsonbry Date: 2011-05-20 19:02:32 +0000 (Fri, 20 May 2011) Log Message: ----------- Hopefully a fix for [1]. I have removed the hard reference in WriteExecutorService.MyLockManager and enabled TestJournalShutdown and TestQueryEngineFactory. I've run through the transient and worm journal test suites and TestLocalTripleStore, and TestBigdataSailWithQuads. All seems good, so I have committed this to see the impact in CI. [1] https://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory) Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestQueryEngineFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransientJournal.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2011-05-20 18:57:12 UTC (rev 4536) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java 2011-05-20 19:02:32 UTC (rev 4537) @@ -221,12 +221,12 @@ private static class MyLockManager<R extends Comparable<R>> extends NonBlockingLockManagerWithNewDesign<R> { - /* - * FIXME restored hard reference since introducing just a weak reference - * here appears to be causing some odd behaviors. Track these behaviors - * down and sort this all out. - */ - private final WriteExecutorService service; +// /* +// * FIXME restored hard reference since introducing just a weak reference +// * here appears to be causing some odd behaviors. Track these behaviors +// * down and sort this all out. +// */ +// private final WriteExecutorService service; private final WeakReference<WriteExecutorService> serviceRef; public MyLockManager(final int capacity, final int maxLockTries, @@ -235,7 +235,7 @@ super(capacity, maxLockTries, predeclareLocks); - this.service = service; +// this.service = service; this.serviceRef = new WeakReference<WriteExecutorService>(service); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestQueryEngineFactory.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestQueryEngineFactory.java 2011-05-20 18:57:12 UTC (rev 4536) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestQueryEngineFactory.java 2011-05-20 19:02:32 UTC (rev 4537) @@ -100,15 +100,9 @@ */ public void test_memoryLeak() throws InterruptedException { - if (true) { - /* - * FIXME Disabled for now since causing CI to fail. - */ - log.error("Enable test."); +// // This test currently fails.... +// fail("See https://sourceforge.net/apps/trac/bigdata/ticket/196."); - return; - } - final int limit = 200; final Properties properties = new Properties(); @@ -144,9 +138,9 @@ } catch (OutOfMemoryError err) { - System.err.println("Out of memory after creating " + ncreated + log.error("Out of memory after creating " + ncreated + " query controllers."); - + } // Demand a GC. @@ -155,12 +149,14 @@ // Wait for it. Thread.sleep(1000/*ms*/); - System.err.println("Created " + ncreated + " query controllers."); + if(log.isInfoEnabled()) + log.info("Created " + ncreated + " query controllers."); final int nalive = QueryEngineFactory.getQueryControllerCount(); - System.err.println("There are " + nalive - + " query controllers which are still alive."); + if (log.isInfoEnabled()) + log.info("There are " + nalive + + " query controllers which are still alive."); if (nalive == ncreated) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java 2011-05-20 18:57:12 UTC (rev 4536) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestJournalShutdown.java 2011-05-20 19:02:32 UTC (rev 4537) @@ -84,8 +84,8 @@ public void test_memoryLeakWithoutExplicitClose() throws InterruptedException { - // This test currently fails.... - fail("See https://sourceforge.net/apps/trac/bigdata/ticket/196."); +// // This test currently fails.... +// fail("See https://sourceforge.net/apps/trac/bigdata/ticket/196."); doMemoryLeakTest(false); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransientJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransientJournal.java 2011-05-20 18:57:12 UTC (rev 4536) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTransientJournal.java 2011-05-20 19:02:32 UTC (rev 4537) @@ -122,9 +122,11 @@ final Properties properties = getProperties(); - Journal journal = new Journal(properties); + final Journal journal = new Journal(properties); - TransientBufferStrategy bufferStrategy = (TransientBufferStrategy) journal.getBufferStrategy(); + try { + + final TransientBufferStrategy bufferStrategy = (TransientBufferStrategy) journal.getBufferStrategy(); assertFalse("isStable",bufferStrategy.isStable()); assertTrue("isFullyBuffered",bufferStrategy.isFullyBuffered()); @@ -137,6 +139,12 @@ assertEquals("userExtent", bufferStrategy.getExtent(), bufferStrategy .getUserExtent()); + } finally { + + journal.destroy(); + + } + } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-27 13:46:05
|
Revision: 4556 http://bigdata.svn.sourceforge.net/bigdata/?rev=4556&view=rev Author: thompsonbry Date: 2011-05-27 13:45:58 +0000 (Fri, 27 May 2011) Log Message: ----------- Modified the DirectBufferPool to verify that the caller is not double-releasing a buffer. Added a few more tests (recyling and double release). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-27 13:28:06 UTC (rev 4555) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-27 13:45:58 UTC (rev 4556) @@ -463,12 +463,18 @@ } /** - * Release a direct {@link ByteBuffer} allocated by this pool back to - * the pool. + * Release a direct {@link ByteBuffer} allocated by this pool back to the + * pool. * * @param b * The buffer. - * + * + * @throws IllegalArgumentException + * if the buffer is <code>null</code>. + * @throws IllegalArgumentException + * if the buffer does not belong to this pool. + * @throws IllegalArgumentException + * if the buffer has already been released. * @throws InterruptedException */ public void release(final ByteBuffer b) throws InterruptedException { @@ -481,8 +487,23 @@ } - public boolean release(final ByteBuffer b, long timeout, TimeUnit units) - throws InterruptedException { + /** + * Release a direct {@link ByteBuffer} allocated by this pool back to the + * pool. + * + * @param b + * The buffer. + * + * @throws IllegalArgumentException + * if the buffer is <code>null</code>. + * @throws IllegalArgumentException + * if the buffer does not belong to this pool. + * @throws IllegalArgumentException + * if the buffer has already been released. + * @throws InterruptedException + */ + public boolean release(final ByteBuffer b, final long timeout, + final TimeUnit units) throws InterruptedException { if(log.isInfoEnabled()) log.info(""); @@ -496,6 +517,9 @@ assertOurBuffer(b); + if (pool.contains(b)) + throw new IllegalArgumentException("buffer already released."); + // add to the pool. if(!pool.offer(b, timeout, units)) return false; @@ -635,7 +659,7 @@ * * @param b */ - private void assertOurBuffer(ByteBuffer b) { + private void assertOurBuffer(final ByteBuffer b) { assert lock.isHeldByCurrentThread(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-27 13:28:06 UTC (rev 4555) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-27 13:45:58 UTC (rev 4556) @@ -30,15 +30,26 @@ import java.nio.ByteBuffer; -import junit.framework.TestCase; +import junit.framework.TestCase2; /** * Test suite for {@link DirectBufferPool}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * TODO Unit test to verify that a pool will reject a buffer not + * acquired from that pool. + * + * TODO Write a unit test to verify that buffers (up to the size of the + * pool, but not more than 10) are recycled when they are released + * (that is, they are made available again to subsequent acquires). + * + * <pre> + * final int limit = Math.min(10, DirectBufferPool.INSTANCE.getPoolCapacity()); + * </pre> */ -public class TestDirectBufferPool extends TestCase { +public class TestDirectBufferPool extends TestCase2 { /** * @@ -71,16 +82,25 @@ final int poolAcquiredBefore = DirectBufferPool.INSTANCE .getAcquiredBufferCount(); - final ByteBuffer b = DirectBufferPool.INSTANCE.acquire(); + final int poolSizeDuring; + final int poolAcquiredDuring; + { + ByteBuffer b = null; + try { + b = DirectBufferPool.INSTANCE.acquire(); - final int poolSizeDuring = DirectBufferPool.INSTANCE.getPoolSize(); - final int poolAcquiredDuring = DirectBufferPool.INSTANCE - .getAcquiredBufferCount(); + poolSizeDuring = DirectBufferPool.INSTANCE.getPoolSize(); + poolAcquiredDuring = DirectBufferPool.INSTANCE + .getAcquiredBufferCount(); - assertEquals(poolSizeBefore + 1, poolSizeDuring); - assertEquals(poolAcquiredBefore + 1, poolAcquiredDuring); + assertEquals(poolSizeBefore + 1, poolSizeDuring); + assertEquals(poolAcquiredBefore + 1, poolAcquiredDuring); - DirectBufferPool.INSTANCE.release(b); + } finally { + if (b != null) + DirectBufferPool.INSTANCE.release(b); + } + } final int poolSizeAfter = DirectBufferPool.INSTANCE.getPoolSize(); final int poolAcquiredAfter = DirectBufferPool.INSTANCE @@ -94,4 +114,58 @@ } + /** + * Test verifies that a pool will not allocate a new buffer when it can + * recycle one instead. + * + * @throws InterruptedException + */ + public void test_buffersRecycled() throws InterruptedException { + + final int poolSizeBefore = DirectBufferPool.INSTANCE.getPoolSize(); + for (int i = 0; i < 10; i++) { + ByteBuffer b = null; + try { + b = DirectBufferPool.INSTANCE.acquire(); + // pool size remains constant. + assertEquals(poolSizeBefore, DirectBufferPool.INSTANCE + .getPoolSize()); + } finally { + if (b != null) + DirectBufferPool.INSTANCE.release(b); + } + } + + } + + /** + * Unit test to verify that a pool will reject an attempt to + * "double release" a buffer (only currently acquired buffers can be + * released). + * + * @throws InterruptedException + */ + public void test_doubleRelease() throws InterruptedException { + + ByteBuffer b = null; + try { + b = DirectBufferPool.INSTANCE.acquire(); + } finally { + if (b != null) + DirectBufferPool.INSTANCE.release(b); + } + + if (b != null) { + try { + // Attempt to double-release the buffer. + DirectBufferPool.INSTANCE.release(b); + fail("Expecting: " + IllegalArgumentException.class); + } catch (IllegalArgumentException ex) { + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-27 14:40:29
|
Revision: 4557 http://bigdata.svn.sourceforge.net/bigdata/?rev=4557&view=rev Author: thompsonbry Date: 2011-05-27 14:40:22 +0000 (Fri, 27 May 2011) Log Message: ----------- Modified the DirectBufferPool to explicitly track the state of the allocated direct ByteBuffers in order to perform a fast correct rejection when a buffer is double released or when an attempt is made to release a buffer to a different pool. Added more unit tests for the DirectBufferPool. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-27 13:45:58 UTC (rev 4556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-05-27 14:40:22 UTC (rev 4557) @@ -2,6 +2,7 @@ import java.nio.ByteBuffer; import java.util.Collections; +import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.concurrent.BlockingQueue; @@ -63,6 +64,77 @@ .getLogger(DirectBufferPool.class); /** + * Object tracking state for allocated buffer instances. This is used to + * reject double-release of a buffer back to the pool, which is critical. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class BufferState { + + /** + * The buffer instance. + */ + private final ByteBuffer buf; + + /** + * <code>true</code> iff the buffer is currently acquired. + */ + private boolean acquired; + +// /** +// * The #of times this buffer has been acquired. +// */ +// private long nacquired = 0L; + + BufferState(final ByteBuffer buf, final boolean acquired) { + if (buf == null) + throw new IllegalArgumentException(); + this.buf = buf; + this.acquired = acquired; + } + + /** + * The hash code depends only on the object id (NOT the buffer's data). + * <p> + * Note: {@link ByteBuffer#hashCode()} is a very heavy operator whose + * result depends on the data actually in the buffer at the time the + * operation is evaluated! + */ + public int hashCode() { + return super.hashCode(); + } + + /** + * Equality depends only on a reference checks. + * <p> + * Note: {@link ByteBuffer#equals(Object)} is very heavy operator whose + * result depends on the data actually in the buffer at the time the + * operation is evaluated! + */ + public boolean equals(Object o) { + if (this == o) { + // Same BufferState, must be the same buffer. + return true; + } + if (!(o instanceof BufferState)) { + return false; + } + if (this.buf == ((BufferState) o).buf) { + return true; + } + /* + * We have two distinct BufferState references for the same + * ByteBuffer reference. This is an error. There should be a + * one-to-one correspondence. + */ + throw new AssertionError(); + } + + + } + + /** * The name of the buffer pool. */ final private String name; @@ -73,22 +145,18 @@ * Note: This is NOT a weak reference collection since the JVM will leak * native memory. */ - final private BlockingQueue<ByteBuffer> pool; + final private BlockingQueue<BufferState> pool; /** - * Used to recognize {@link ByteBuffer}s allocated by this pool so that - * we can refuse offered buffers that were allocated elsewhere (a - * paranoia feature which could be dropped). + * Used to recognize {@link ByteBuffer}s allocated by this pool so that we + * can refuse offered buffers that were allocated elsewhere (a paranoia + * feature which could be dropped). * <p> - * Note: YOU CAN NOT use a hash-based collection here. hashCode() and - * equals() for a {@link ByteBuffer} are very heavy operations that are - * dependent on the data actually in the buffer at the time the - * operation is evaluated! - * <p> - * Note: if you set [allocated := null] in the ctor then tests of the - * allocated list are disabled. + * Note: {@link LinkedHashSet} is used here for its fast iterator semantics + * since we need to do a linear scan of this collection in + * {@link #getBufferState(ByteBuffer)}. */ - final private List<ByteBuffer> allocated; + final private LinkedHashSet<BufferState> allocated; /** * The number {@link ByteBuffer}s allocated (must use {@link #lock} for @@ -358,10 +426,10 @@ this.bufferCapacity = bufferCapacity; - this.allocated = null; // Note: disables assertion - // this.allocated = new LinkedList<ByteBuffer>(); + // Note: This is required in order to detect double-opens. + this.allocated = new LinkedHashSet<BufferState>(); - this.pool = new LinkedBlockingQueue<ByteBuffer>(poolCapacity); + this.pool = new LinkedBlockingQueue<BufferState>(poolCapacity); pools.add(this); @@ -442,17 +510,19 @@ } // the head of the pool must exist. - final ByteBuffer b = pool.take(); + final BufferState state = pool.take(); + if (state.acquired) + throw new RuntimeException("Buffer already acquired"); + + state.acquired = true; acquired++; totalAcquireCount.increment(); - - assertOurBuffer(b); // limit -> capacity; pos-> 0; mark cleared. - b.clear(); + state.buf.clear(); - return b; + return state.buf; } finally { @@ -515,15 +585,19 @@ try { - assertOurBuffer(b); + final BufferState state = getBufferState(b); - if (pool.contains(b)) + // Check for double-release! + if (!state.acquired) { + log.error("Buffer already released."); throw new IllegalArgumentException("buffer already released."); - + } + // add to the pool. - if(!pool.offer(b, timeout, units)) + if(!pool.offer(state, timeout, units)) return false; + state.acquired = false; acquired--; totalReleaseCount.increment(); @@ -591,15 +665,14 @@ // update the pool size. size++; + // wrap with state metadata. + final BufferState state = new BufferState(b, false/* acquired */); + // add to the set of known buffers - if (allocated != null) { + allocated.add(state); - allocated.add(b); - - } - // add to the pool. - pool.add(b); + pool.add(state); /* * There is now a buffer in the pool and the caller will get it @@ -658,8 +731,9 @@ * {@link DirectBufferPool}. * * @param b + * The buffer. */ - private void assertOurBuffer(final ByteBuffer b) { + private BufferState getBufferState(final ByteBuffer b) { assert lock.isHeldByCurrentThread(); @@ -672,21 +746,20 @@ if(!b.isDirect()) throw new IllegalArgumentException("not direct"); - if (allocated == null) { + /* + * Linear scan for a BufferState object having that ByteBuffer + * reference. + */ + for (BufferState x : allocated) { - // test is disabled. + if (x.buf == b) { + + return x; + + } - return; - } - for (ByteBuffer x : allocated) { - - if (x == b) - return; - - } - throw new IllegalArgumentException("Buffer not allocated by this pool."); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-27 13:45:58 UTC (rev 4556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-05-27 14:40:22 UTC (rev 4557) @@ -37,17 +37,6 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ - * - * TODO Unit test to verify that a pool will reject a buffer not - * acquired from that pool. - * - * TODO Write a unit test to verify that buffers (up to the size of the - * pool, but not more than 10) are recycled when they are released - * (that is, they are made available again to subsequent acquires). - * - * <pre> - * final int limit = Math.min(10, DirectBufferPool.INSTANCE.getPoolCapacity()); - * </pre> */ public class TestDirectBufferPool extends TestCase2 { @@ -65,15 +54,13 @@ } @Override - protected void setUp() throws Exception { - super.setUp(); - DirectBufferPoolTestHelper.checkBufferPools(this); - } + protected void tearDown() throws Exception { - @Override - protected void tearDown() throws Exception { + // Verify that all allocated buffers were released. DirectBufferPoolTestHelper.checkBufferPools(this); + super.tearDown(); + } public void test_allocateRelease() throws InterruptedException { @@ -122,7 +109,24 @@ */ public void test_buffersRecycled() throws InterruptedException { + /* + * Acquire/release one buffer before we look at the pool size. This + * should give us at least one available buffer in the pool. That way + * when we run through the allocation loop the pool size should not + * change. + */ + { + ByteBuffer b = null; + try { + b = DirectBufferPool.INSTANCE.acquire(); + } finally { + if (b != null) + DirectBufferPool.INSTANCE.release(b); + } + } + final int poolSizeBefore = DirectBufferPool.INSTANCE.getPoolSize(); + for (int i = 0; i < 10; i++) { ByteBuffer b = null; try { @@ -136,6 +140,9 @@ } } + // pool size remains constant. + assertEquals(poolSizeBefore, DirectBufferPool.INSTANCE.getPoolSize()); + } /** @@ -168,4 +175,33 @@ } + /** + * Unit test to verify that a pool will reject a buffer not acquired from + * that pool. + */ + public void test_rejectBufferFromAnotherPool() throws InterruptedException { + + // A distinct pool with the same buffer capacity + final DirectBufferPool testPool = new DirectBufferPool("test", + 1/* poolCapacity */, DirectBufferPool.INSTANCE + .getBufferCapacity()); + + ByteBuffer b = null; + try { + b = DirectBufferPool.INSTANCE.acquire(); + try { + testPool.release(b); + fail("Release should not be permitted to a different pool. Expecting: " + + IllegalArgumentException.class); + } catch (IllegalArgumentException ex) { + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } + } finally { + if (b != null) + DirectBufferPool.INSTANCE.release(b); + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2011-05-27 15:04:09
|
Revision: 4558 http://bigdata.svn.sourceforge.net/bigdata/?rev=4558&view=rev Author: martyncutcher Date: 2011-05-27 15:04:03 +0000 (Fri, 27 May 2011) Log Message: ----------- Add checkdata method to WriteCache to confirm state of buffer on checksum failure and simple test Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-05-27 14:40:22 UTC (rev 4557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-05-27 15:04:03 UTC (rev 4558) @@ -838,7 +838,7 @@ if (chk != ChecksumUtility.threadChk.get().checksum(b, 0/* offset */, reclen)) { // Note: [offset] is a (possibly relative) file offset. - throw new ChecksumError("offset=" + offset); + throw new ChecksumError(checkdata()); } @@ -1077,6 +1077,59 @@ } /** + * Locks + * @return + * @throws InterruptedException + * @throws IllegalStateException + */ + public String checkdata() throws IllegalStateException, InterruptedException { + + if (!useChecksum) { + return "Unable to check since checksums are not enabled"; + } + + ByteBuffer tmp = acquire(); + try { + int nerrors = 0; + int nrecords = recordMap.size(); + + for (Entry<Long, RecordMetadata> ent : recordMap.entrySet()) { + RecordMetadata md = ent.getValue(); + + // length of the record w/o checksum field. + final int reclen = md.recordLength - 4; + + // the start of the record in writeCache. + final int pos = md.bufferOffset; + + final int chk = tmp.getInt(pos + reclen); + + // create a view with same offset, limit and position. + final ByteBuffer view = tmp.duplicate(); + + // adjust the view to just the record of interest. + view.limit(pos + reclen); + view.position(pos); + + final byte[] b = new byte[reclen]; + + final ByteBuffer dst = ByteBuffer.wrap(b); + + // copy the data into [dst] (and the backing byte[]). + dst.put(view); + if (chk != ChecksumUtility.threadChk.get().checksum(b, 0/* offset */, reclen)) { + log.error("Bad data for address: " + ent.getKey()); + nerrors++; + } + + } + return "WriteCache checkdata - records: " + nrecords + ", errors: " + nerrors; + } finally { + release(); + } + } + + /** * Write the data from the buffer onto the channel. This method provides a * uniform means to request that the buffer write itself onto the backing * channel, regardless of whether the channel is backed by a file, a socket, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-05-27 14:40:22 UTC (rev 4557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-05-27 15:04:03 UTC (rev 4558) @@ -44,6 +44,7 @@ import com.bigdata.io.TestCase3; import com.bigdata.io.writecache.WriteCache; import com.bigdata.rawstore.Bytes; +import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; /** @@ -85,7 +86,62 @@ * contents and metadata to the disk as necessary. */ final private static String mode = "rw"; + + /** + * Confirm checksum errors + */ + public void test_writeCacheChecksums() { + + try { + final File file = File.createTempFile(getName(), ".tmp"); + + final boolean isHighlyAvailable = false; + + final ReopenFileChannel opener = new ReopenFileChannel(file, mode); + + final ByteBuffer buf = DirectBufferPool.INSTANCE.acquire(); + + try { + + // The buffer size must be at least 1k for these tests. + assertTrue(DirectBufferPool.INSTANCE.getBufferCapacity() >= Bytes.kilobyte32); + + WriteCache writeCache = new WriteCache.FileChannelWriteCache(0, buf, + true, isHighlyAvailable, false, opener); + + + long addr1 = 0; + long addr2 = 12800; + long addr3 = 24800; + ByteBuffer data1 = getRandomData(512); + int chk1 = ChecksumUtility.threadChk.get().checksum(data1, 0/* offset */, data1.limit()); + + writeCache.write(addr1, data1, chk1); + data1.flip(); + writeCache.write(addr2, data1, 23); // bad checksum + data1.flip(); + writeCache.write(addr3, data1, chk1); // bad checksum + + writeCache.read(addr1); + writeCache.read(addr3); + + try { + writeCache.read(addr2); + + fail("Expected ChecksumError"); + } catch (ChecksumError ce) { + System.out.println("Expected: " + ce.getMessage()); + } + + } finally { + DirectBufferPool.INSTANCE.release(buf); + } + } catch (Exception e) { + fail("Unexpected exception", e); + } + } + /** * Exercises most of the API. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-28 12:37:48
|
Revision: 4560 http://bigdata.svn.sourceforge.net/bigdata/?rev=4560&view=rev Author: thompsonbry Date: 2011-05-28 12:37:41 +0000 (Sat, 28 May 2011) Log Message: ----------- Made the logger in RootBlockView private. Modified TestRootBlockView to temporarily suppress log messages @ WARN since it was cluttering the CI log with 5000+ such messages in its text_ctor() method. Removed FIXME in TextTx describing a B+Tree concurrent modification problem which I believe was fixed by a set of changes incorporated into the last release. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockView.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestRootBlockView.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTx.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockView.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockView.java 2011-05-28 12:03:51 UTC (rev 4559) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RootBlockView.java 2011-05-28 12:37:41 UTC (rev 4560) @@ -54,7 +54,7 @@ /** * Logger. */ - public static final Logger log = Logger.getLogger(RootBlockView.class); + private static final Logger log = Logger.getLogger(RootBlockView.class); static final transient short SIZEOF_TIMESTAMP = Bytes.SIZEOF_LONG; static final transient short SIZEOF_MAGIC = Bytes.SIZEOF_INT; @@ -669,7 +669,7 @@ if (checker == null) { log.warn("Checksum will not be validated"); - + } // Check sum checker. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestRootBlockView.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestRootBlockView.java 2011-05-28 12:03:51 UTC (rev 4559) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestRootBlockView.java 2011-05-28 12:37:41 UTC (rev 4560) @@ -33,6 +33,9 @@ import junit.framework.TestCase2; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + import com.bigdata.quorum.Quorum; import com.bigdata.rawstore.TestWormAddressManager; import com.bigdata.rawstore.WormAddressManager; @@ -50,6 +53,9 @@ */ public class TestRootBlockView extends TestCase2 { + private static final transient Logger log = Logger + .getLogger(TestRootBlockView.class); + /** * */ @@ -94,6 +100,16 @@ * Constructor correct acceptance stress test. */ public void test_ctor() { + + /* + * Note: This temporarily suppresses the WARN messages which are + * otherwise generated when we do not compute the checksum of the root + * block. Those messages completely clutter the CI log. + */ + final Logger log2 = Logger.getLogger(RootBlockView.class); + final Level c = log2.getLevel(); + try { + log2.setLevel(Level.ERROR); final Random r = new Random(); @@ -246,13 +262,12 @@ log.info("Ignoring expected exception: " + ex); } - + /* - * verify that we can read that root block anyway if we provide + * Verify that we can read that root block anyway if we provide * a [null] checksum utility. */ - - new RootBlockView(rootBlock0,modified,null/*checker*/); + new RootBlockView(rootBlock0, modified, null/* checker */); } @@ -305,8 +320,10 @@ } } - + } finally { + log2.setLevel(c); } + } /** * Correct rejection tests for the constructor. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTx.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTx.java 2011-05-28 12:03:51 UTC (rev 4559) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestTx.java 2011-05-28 12:37:41 UTC (rev 4560) @@ -45,7 +45,6 @@ import com.bigdata.btree.Tuple; import com.bigdata.btree.isolation.IsolatedFusedView; import com.bigdata.util.InnerCause; -import com.bigdata.util.InnerCause; /** * Test suite for fully-isolated read-write transactions. @@ -1071,16 +1070,6 @@ /** * Stress test for concurrent transactions against a single named index. - * - * FIXME This demonstrates a problem when there are a modest number (10s) of - * concurrent transactions with a moderately large write sets (1000s). The - * problem is not deterministic, but you can always demonstrate it if you - * raise the parameters high enough. The issue shows up as a concurrent - * modification of the unisolated B+Tree during validation. The problem - * appears to be unrelated to the one documented in - * {@link JournalTransactionService}, as that has to do with the atomicity - * of the LocalTx state changes but the issue here is concurrent - * modification to the B+Tree. */ public void testStress() throws InterruptedException, ExecutionException { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-05-28 20:17:05
|
Revision: 4569 http://bigdata.svn.sourceforge.net/bigdata/?rev=4569&view=rev Author: thompsonbry Date: 2011-05-28 20:16:58 +0000 (Sat, 28 May 2011) Log Message: ----------- Bug fix to distributed transaction service snapshot introduced by the INT64_BRANCH Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestSnapshotHelper.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java 2011-05-28 18:46:31 UTC (rev 4568) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java 2011-05-28 20:16:58 UTC (rev 4569) @@ -308,7 +308,7 @@ try { // read most recent image. - final int entryCount = SnapshotHelper.read(commitTimeIndex, + final long entryCount = SnapshotHelper.read(commitTimeIndex, file); log.warn("Read snapshot: entryCount=" + entryCount + ", file=" @@ -459,7 +459,7 @@ */ public static class SnapshotHelper { - static public int read(CommitTimeIndex ndx, File file) + static public long read(CommitTimeIndex ndx, File file) throws IOException { final FileInputStream is = new FileInputStream(file); @@ -480,10 +480,10 @@ } - static public int read(CommitTimeIndex ndx, DataInputStream is) + static public long read(CommitTimeIndex ndx, DataInputStream is) throws IOException { - final int n = is.readInt(); + final long n = is.readLong(); for (int i = 0; i < n; i++) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestSnapshotHelper.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestSnapshotHelper.java 2011-05-28 18:46:31 UTC (rev 4568) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestSnapshotHelper.java 2011-05-28 20:16:58 UTC (rev 4569) @@ -62,19 +62,20 @@ final File testFile = File.createTempFile(getName(), ".snapshot"); - if (!testFile.delete()) { + try { + + if (!testFile.delete()) { - fail("Could not delete test file: " + testFile); + fail("Could not delete test file: " + testFile); + + } - } - - try { - // test empty snapshot. { // populate and write. { - CommitTimeIndex ndx = CommitTimeIndex.createTransient(); + + final CommitTimeIndex ndx = CommitTimeIndex.createTransient(); SnapshotHelper.write(ndx, testFile); } @@ -82,7 +83,7 @@ // read and verify. { - CommitTimeIndex ndx = CommitTimeIndex.createTransient(); + final CommitTimeIndex ndx = CommitTimeIndex.createTransient(); SnapshotHelper.read(ndx, testFile); @@ -99,23 +100,28 @@ // populate and write. { - CommitTimeIndex ndx = CommitTimeIndex.createTransient(); + final CommitTimeIndex ndx = CommitTimeIndex.createTransient(); ndx.add(10L); ndx.add(20L); - SnapshotHelper.write(ndx, testFile); + final long nwritten = SnapshotHelper.write(ndx, testFile); + + assertEquals(2L, nwritten); + } // read and verify. { - CommitTimeIndex ndx = CommitTimeIndex.createTransient(); + final CommitTimeIndex ndx = CommitTimeIndex.createTransient(); - SnapshotHelper.read(ndx, testFile); + final long nread = SnapshotHelper.read(ndx, testFile); + + assertEquals(2L, nread); - assertEquals(new long[]{10,20},toArray(ndx)); + assertEquals(new long[] { 10, 20 }, toArray(ndx)); } @@ -137,7 +143,7 @@ * * @return The array. */ - long[] toArray(CommitTimeIndex ndx) { + long[] toArray(final CommitTimeIndex ndx) { synchronized(ndx) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-01 16:50:53
|
Revision: 4592 http://bigdata.svn.sourceforge.net/bigdata/?rev=4592&view=rev Author: thompsonbry Date: 2011-06-01 16:50:47 +0000 (Wed, 01 Jun 2011) Log Message: ----------- Added CI check for unclosed TemporaryRawStores Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java 2011-06-01 16:41:13 UTC (rev 4591) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryRawStore.java 2011-06-01 16:50:47 UTC (rev 4592) @@ -32,6 +32,7 @@ import java.io.Serializable; import java.nio.ByteBuffer; import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Logger; @@ -96,6 +97,20 @@ private final long createTime; /** + * The #of open {@link TemporaryRawStore}s (JVM wide). This is package + * private. It is used to chase down unit tests which are not closing() the + * store. + */ + final static AtomicInteger nopen = new AtomicInteger(); + + /** + * The #of closed {@link TemporaryRawStore}s (JVM wide). This is package + * private. It is used to chase down unit tests which are not + * {@link #close() closing} the store. + */ + final static AtomicInteger nclose = new AtomicInteger(); + + /** * Return an empty {@link File} created using the temporary file name * mechanism. The file name will begin with <code>bigdata</code> and end * with <code>.tmp</code>. The file is marked for eventual deletion. @@ -293,6 +308,8 @@ // Long.valueOf(Options.DEFAULT_MINIMUM_EXTENSION), md); + nopen.incrementAndGet(); + } /** @@ -354,6 +371,8 @@ buf.destroy(); + nclose.incrementAndGet(); + } finally { // if (writeCache != null) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java 2011-06-01 16:41:13 UTC (rev 4591) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestHelper.java 2011-06-01 16:50:47 UTC (rev 4592) @@ -109,10 +109,49 @@ } + checkTempStoresClosed(test, testClass); + // Also check the direct buffer pools. DirectBufferPoolTestHelper.checkBufferPools(test, testClass); } + /** + * Verify that any {@link TemporaryRawStore}s created by the test have been + * destroyed. + * <p> + * Note: This clears the counter as a side effect to prevent a cascade of + * tests from being failed. + * + * @param test + * The unit test instance. + * @param testClass + * The instance of the delegate test class for a proxy test + * suite. For example, TestWORMStrategy. + */ + private static void checkTempStoresClosed(final TestCase test, + final TestCase testClass) { + + final int nopen = TemporaryRawStore.nopen.getAndSet(0); + final int nclose = TemporaryRawStore.nclose.getAndSet(0); + + if (nopen != nclose) { + + /* + * At least one temporary store was opened which was never closed. + */ + + Assert.fail("Test did not close temp store(s)"// + + ": nopen=" + nopen // + + ", nclose=" + nclose// + + ", test=" + test.getClass() + "." + test.getName()// + + (testClass == null ? "" : ", testClass=" + + testClass.getClass().getName())// + ); + + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-01 19:09:48
|
Revision: 4595 http://bigdata.svn.sourceforge.net/bigdata/?rev=4595&view=rev Author: thompsonbry Date: 2011-06-01 19:09:40 +0000 (Wed, 01 Jun 2011) Log Message: ----------- Modified IndexSegmentPlan, IndexSegmentBuilder, etc. to permit the build of an index with an empty root leaf. Previously, the root leaf was not written out at all for this case. However, this was causing problems with the IndexSegmentTupleCursor which does not do top-down navigation and did not automatically provide a false empty root. https://sourceforge.net/apps/trac/bigdata/ticket/149 Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTreeUtilizationReport.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentPlan.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/Leaf.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestAll_IndexSegment.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithBlobCapacity.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentPlan.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilder_EmptyIndex.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTreeUtilizationReport.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTreeUtilizationReport.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BTreeUtilizationReport.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -70,7 +70,8 @@ nodeUtilization = (int) (nnodes == 0 ? 100 : (100L * numNonRootNodes) / (nnodes * (long) branchingFactor)); - leafUtilization = (int) ((100L * nentries) / (nleaves * (long) branchingFactor)); + leafUtilization = (int) (nleaves == 0 ? 0 : (100L * nentries) + / (nleaves * (long) branchingFactor)); totalUtilization = (nodeUtilization + leafUtilization) / 2; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -1718,6 +1718,20 @@ // // Flag used to flush the last leaf iff it is dirty. // boolean needsFlush = false; + + if (plan.nentries == 0) { + + /* + * A single empty root leaf. + */ + + leaf.reset(plan.numInNode[leaf.level][0]); + + flushNodeOrLeaf(leaf); + + return; + + } // For each leaf in the plan while tuples remain. for (int i = 0; i < plan.nleaves && entryIterator.hasNext(); i++) { @@ -2486,9 +2500,10 @@ * immediately above. * * Note: We only invoke flush() if a leaf has data so we should - * never be in a position of writing out an empty leaf. + * never be in a position of writing out an empty leaf (with the + * exception of a B+Tree which has no tuples). */ - assert lastLeafData.getKeyCount() > 0 : "Last leaf is empty?"; + assert plan.nentries == 0 || lastLeafData.getKeyCount() > 0 : "Last leaf is empty?"; if (log.isDebugEnabled()) log.debug("updating last leaf"// Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -663,51 +663,52 @@ if (nentries < 0) throw new RootBlockException("nentries=" + nentries); - if (nentries == 0) { - - /* - * Empty index segment. - */ - - if (nleaves != 0) - throw new RootBlockException("empty index but nleaves=" - + nleaves); - - if (nnodes != 0) - throw new RootBlockException("empty index but nnodes=" - + nnodes); +// if (nentries == 0) { +// +// /* +// * Empty index segment. +// */ +// +// if (nleaves != 0) +// throw new RootBlockException("empty index but nleaves=" +// + nleaves); +// +// if (nnodes != 0) +// throw new RootBlockException("empty index but nnodes=" +// + nnodes); +// +// if (maxNodeOrLeafLength != 0) +// throw new RootBlockException( +// "empty index but maxNodeOrLeafLength=" +// + maxNodeOrLeafLength); +// +// if (extentLeaves != 0L) +// throw new RootBlockException("empty index but extentLeaves=" +// + extentLeaves); +// +// if (offsetLeaves != 0L) +// throw new RootBlockException("empty index but offsetLeaves=" +// + offsetLeaves); +// +// if (extentNodes != 0L) +// throw new RootBlockException("empty index but extentNodes=" +// + extentNodes); +// +// if (offsetNodes != 0L) +// throw new RootBlockException("empty index but offsetNodes=" +// + offsetNodes); +// +// if (addrFirstLeaf != 0L) +// throw new RootBlockException("empty index but addrFirstLeaf=" +// + addrFirstLeaf); +// +// if (addrLastLeaf != 0L) +// throw new RootBlockException("empty index but addrLastLeaf=" +// + addrLastLeaf); +// +// } else { + { - if (maxNodeOrLeafLength != 0) - throw new RootBlockException( - "empty index but maxNodeOrLeafLength=" - + maxNodeOrLeafLength); - - if (extentLeaves != 0L) - throw new RootBlockException("empty index but extentLeaves=" - + extentLeaves); - - if (offsetLeaves != 0L) - throw new RootBlockException("empty index but offsetLeaves=" - + offsetLeaves); - - if (extentNodes != 0L) - throw new RootBlockException("empty index but extentNodes=" - + extentNodes); - - if (offsetNodes != 0L) - throw new RootBlockException("empty index but offsetNodes=" - + offsetNodes); - - if (addrFirstLeaf != 0L) - throw new RootBlockException("empty index but addrFirstLeaf=" - + addrFirstLeaf); - - if (addrLastLeaf != 0L) - throw new RootBlockException("empty index but addrLastLeaf=" - + addrLastLeaf); - - } else { - if (nleaves <= 0) throw new RootBlockException("nleaves=" + nleaves); @@ -1015,7 +1016,7 @@ sb.append(", nnodes=" + nnodes); sb.append(", nentries=" + nentries); sb.append(", maxNodeOrLeafLength=" + maxNodeOrLeafLength); - sb.append(", leavesRegion={extent=" + extentLeaves+", offset="+offsetLeaves+"}, avgLeafSize="+(extentLeaves/nleaves)); + sb.append(", leavesRegion={extent=" + extentLeaves+", offset="+offsetLeaves+"}, avgLeafSize="+(nleaves==0?0:(extentLeaves/nleaves))); sb.append(", nodesRegion={extent=" + extentNodes+", offset="+offsetNodes+"}, avgNodeSize="+(nnodes==0?0:(extentNodes/nnodes))); sb.append(", blobsRegion={extent=" + extentBlobs+", offset="+offsetBlobs+"}"); sb.append(", addrRoot=" + am.toString(addrRoot)); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentPlan.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentPlan.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentPlan.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -156,11 +156,11 @@ if (log.isInfoEnabled()) log.info("Empty tree."); - nleaves = 0; + nleaves = 1; height = 0; - numInLeaf = new int[]{}; - numInNode = new int[][]{}; - numInLevel = new long[]{}; + numInLeaf = new int[]{0}; + numInNode = new int[][]{new int[]{0}}; + numInLevel = new long[]{1}; nnodes = 0; return; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/Leaf.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/Leaf.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/Leaf.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -1991,8 +1991,16 @@ final int nkeys = this.getKeyCount(); final int minKeys = this.minKeys(); final int maxKeys = this.maxKeys(); - - if ((btree.root != this) && (nkeys < minKeys)) { + + /* + * Since the index segment does not materialize the root when running a + * leaf cursor we can not rely on [btree.root != this]. + */ + final boolean isRoot = (btree.root == this) + || ((btree instanceof IndexSegment) && btree.getEntryCount() == 0); + + if (!isRoot + && (nkeys < minKeys)) { /* * Min keys failure. * @@ -2000,7 +2008,7 @@ */ out.println(indent(height) + "ERROR: too few keys: m=" + branchingFactor + ", minKeys=" + minKeys + ", nkeys=" - + nkeys + ", isLeaf=" + isLeaf()); + + nkeys + ", isLeaf=" + isLeaf() + ", isRoot=" + isRoot); ok = false; } @@ -2008,7 +2016,7 @@ // max keys failure. out.println(indent(height) + "ERROR: too many keys: m=" + branchingFactor + ", maxKeys=" + maxKeys + ", nkeys=" - + nkeys + ", isLeaf=" + isLeaf()); + + nkeys + ", isLeaf=" + isLeaf() + ", isRoot=" + isRoot); ok = false; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestAll_IndexSegment.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestAll_IndexSegment.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestAll_IndexSegment.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -72,6 +72,8 @@ suite.addTestSuite(TestIndexSegmentAddressManager.class); // test write and read back of the index segment metadata record. suite.addTestSuite(TestIndexSegmentCheckpoint.class); + // test with an empty B+Tree. + suite.addTestSuite(TestIndexSegmentBuilder_EmptyIndex.class); // test with small known examples in detail. suite.addTestSuite(TestIndexSegmentBuilderWithSmallTree.class); // and add in a stress test suite for those small examples. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsFromByteArray.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -384,8 +384,8 @@ final Random r = new Random(); - // #of - final int limit = 1000; + // #of iterations + final long limit = 1000000; // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. final int len = r.nextInt(Bytes.kilobyte32 * 8) + 1; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithBlobCapacity.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithBlobCapacity.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithBlobCapacity.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -81,6 +81,10 @@ } } + protected boolean useRawRecords() { + return true; + } + protected IndexSegmentCheckpoint doBuildAndDiscardCache(final BTree btree, final int m) throws IOException, Exception { @@ -108,8 +112,4 @@ } - protected boolean useRawRecords() { - return true; - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -26,8 +26,6 @@ import java.io.File; import java.io.IOException; -import junit.framework.TestSuite; - import org.apache.log4j.Level; import com.bigdata.LRUNexus; @@ -43,12 +41,6 @@ public class TestIndexSegmentBuilderWithSmallTree extends AbstractIndexSegmentTestCase { - private File outFile; - - private File tmpDir; - - private boolean bufferNodes; - public TestIndexSegmentBuilderWithSmallTree() { } @@ -56,6 +48,12 @@ super(name); } + private File outFile; + + private File tmpDir; + + private boolean bufferNodes; + public void setUp() throws Exception { super.setUp(); Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilder_EmptyIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilder_EmptyIndex.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilder_EmptyIndex.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -0,0 +1,194 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jun 1, 2011 + */ + +package com.bigdata.btree; + +import java.io.File; +import java.io.IOException; + +import com.bigdata.LRUNexus; + +/** + * Test suite for building an {@link IndexSegment} from an empty {@link BTree}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestIndexSegmentBuilder_EmptyIndex extends + AbstractIndexSegmentTestCase { + + /** + * + */ + public TestIndexSegmentBuilder_EmptyIndex() { + } + + /** + * @param name + */ + public TestIndexSegmentBuilder_EmptyIndex(String name) { + super(name); + } + + private File outFile; + + private File tmpDir; + + private boolean bufferNodes; + + public void setUp() throws Exception { + + super.setUp(); + + // random choice. + bufferNodes = r.nextBoolean(); + + outFile = new File(getName() + ".seg"); + + if (outFile.exists() && !outFile.delete()) { + + throw new RuntimeException("Could not delete file: " + outFile); + + } + + tmpDir = outFile.getAbsoluteFile().getParentFile(); + + } + + public void tearDown() throws Exception { + + if (outFile != null && outFile.exists() && !outFile.delete()) { + + log.warn("Could not delete file: " + outFile); + + } + + super.tearDown(); + + // clear references. + outFile = null; + tmpDir = null; + + } + + /** + * Test ability to build an index segment from an empty {@link BTree}. + */ + public void test_buildOrder3_emptyIndex() throws Exception { + + final BTree btree = getBTree(3); + + final IndexSegmentCheckpoint checkpoint = doBuildAndDiscardCache(btree, + 3/* m */); + + /* + * Verify can load the index file and that the metadata associated with + * the index file is correct (we are only checking those aspects that + * are easily defined by the test case and not, for example, those + * aspects that depend on the specifics of the length of serialized + * nodes or leaves). + */ + + final IndexSegmentStore segStore = new IndexSegmentStore(outFile); + + assertEquals(checkpoint.commitTime, segStore.getCheckpoint().commitTime); + assertEquals(0, segStore.getCheckpoint().height); + assertEquals(1, segStore.getCheckpoint().nleaves); + assertEquals(0, segStore.getCheckpoint().nnodes); + assertEquals(0, segStore.getCheckpoint().nentries); + + final IndexSegment seg = segStore.loadIndexSegment(); + + try { + + assertEquals(3, seg.getBranchingFactor()); + assertEquals(0, seg.getHeight()); + assertEquals(1, seg.getLeafCount()); + assertEquals(0, seg.getNodeCount()); + assertEquals(0, seg.getEntryCount()); + + testForwardScan(seg); + testReverseScan(seg); + + // test index segment structure. + dumpIndexSegment(seg); + + /* + * Test the tree in detail. + */ + { + + final Leaf a = (Leaf) seg.getRoot(); + + assertKeys(new int[] {}, a); + + // Note: values are verified by testing the total order. + + } + + /* + * Verify the total index order. + */ + assertSameBTree(btree, seg); + + } finally { + + // close so we can delete the backing store. + seg.close(); + + } + + } + + protected IndexSegmentCheckpoint doBuildAndDiscardCache(final BTree btree, + final int m) throws IOException, Exception { + + final long commitTime = System.currentTimeMillis(); + + final IndexSegmentCheckpoint checkpoint = IndexSegmentBuilder + .newInstance(outFile, tmpDir, btree.getEntryCount(), + btree.rangeIterator(), m, btree.getIndexMetadata(), + commitTime, true/* compactingMerge */, bufferNodes) + .call(); + + if (LRUNexus.INSTANCE != null) { + + /* + * Clear the records for the index segment from the cache so we will + * read directly from the file. This is necessary to ensure that the + * data on the file is good rather than just the data in the cache. + */ + + LRUNexus.INSTANCE.deleteCache(checkpoint.segmentUUID); + + } + + return checkpoint; + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilder_EmptyIndex.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentPlan.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentPlan.java 2011-06-01 17:55:48 UTC (rev 4594) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestIndexSegmentPlan.java 2011-06-01 19:09:40 UTC (rev 4595) @@ -27,52 +27,10 @@ package com.bigdata.btree; - /** * Test suite for efficient post-order rebuild of an index in an external index * segment. * - * @todo verify post-conditions for files (temp file is deleted, perhaps the - * index segment is read only). - * - * @todo try building large indices, exporting them into index segments, and - * then verifying that the index segments have the correct data. We can - * run a variety of index stress tests to build the index, sweep in data - * from the file system, etc., and then generate the corresponding index - * segment and validate it against the in memory {@link BTree}. - * - * @todo The notion of merging multiple index segments requires a notion of - * which index segments are more recent or alternatively which values are - * more recent so that we can reconcile values for the same key. this is - * linked to how we will handle transactional isolation. - * - * @todo Handle "delete" markers. For full transactional isolation we need to - * keep delete markers around until there are no more live transactions - * that could read the index entry. This suggests that we probably want to - * use the transaction timestamp rather than a version counter. Consider - * that a read by tx1 needs to check the index on the journal and then - * each index segment in turn in reverse historical order until an entry - * (potentially a delete marker) is found that is equal to or less than - * the timestamp of the committed state from which tx1 was born. This - * means that an index miss must test the journal and all live segments - * for that index (hence the use of bloom filters to filter out index - * misses). It also suggests that we should keep the timestamp as part of - * the key, except in the ground state index on the journal where the - * timestamp is the timestamp of the last commit of the journal. This - * probably will also address VLR TX that would span a freeze of the - * journal. We expunge the isolated index into a segment and do a merge - * when the transaction finally commits. We wind up doing the same - * validation and merge steps as when the isolation occurs within a more - * limited buffer, but in more of a batch fashion. This might work nicely - * if we buffer the isolatation index out to a certain size in memory and - * then start to spill it onto the journal. If fact, the hard reference - * queue already does this so we can just test to see if (a) anything has - * been written out from the isolation index; and (b) whether or not the - * journal was frozen since the isolation index was created. - * - * Should the merge down should impose the transaction commit timestamp on the - * items in the index? - * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ @@ -368,6 +326,48 @@ /** * Tests {@link IndexSegmentPlan} for a tree with a branching factor of + * (m=10) and (n=10) entries (everything fits into the root leaf) + */ + public void test_plan_m10_n10_everythingInTheRootLeaf() { + + IndexSegmentPlan plan = new IndexSegmentPlan(10,10); + + assertEquals("m",10,plan.m); + assertEquals("(m+1/2)",5,plan.m2); + assertEquals("nentries",10,plan.nentries); + assertEquals("nleaves",1,plan.nleaves); + assertEquals("nnodes",0,plan.nnodes); + assertEquals("height",0,plan.height); + assertEquals("numInLeaf[]",new int[]{10},plan.numInLeaf); + assertEquals("numInLevel[]",new long[]{1},plan.numInLevel); + assertEquals("numInNode[][]",plan.height+1,plan.numInNode.length); + assertEquals("numInNode[0][]",new int[]{10},plan.numInNode[0]); + + } + + /** + * Tests {@link IndexSegmentPlan} for a tree with a branching factor of + * (m=3) and (n=0) entries. + */ + public void test_plan_m3_n0_emptyRootLeaf() { + + final IndexSegmentPlan plan = new IndexSegmentPlan(3, 0); + + assertEquals("m",3,plan.m); + assertEquals("(m+1/2)",2,plan.m2); + assertEquals("nentries",0,plan.nentries); + assertEquals("nleaves",1,plan.nleaves); + assertEquals("nnodes",0,plan.nnodes); + assertEquals("height",0,plan.height); + assertEquals("numInLeaf[]",new int[]{0},plan.numInLeaf); + assertEquals("numInLevel[]",new long[]{1},plan.numInLevel); + assertEquals("numInNode[][]",plan.height+1,plan.numInNode.length); + assertEquals("numInNode[0][]",new int[]{0},plan.numInNode[0]); + + } + + /** + * Tests {@link IndexSegmentPlan} for a tree with a branching factor of * (m=3) and (n=20) entries. */ public void test_plan_m3_n20() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-02 13:10:27
|
Revision: 4601 http://bigdata.svn.sourceforge.net/bigdata/?rev=4601&view=rev Author: thompsonbry Date: 2011-06-02 13:10:20 +0000 (Thu, 02 Jun 2011) Log Message: ----------- Code cleanup and javadoc Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-02 12:52:57 UTC (rev 4600) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-02 13:10:20 UTC (rev 4601) @@ -1076,13 +1076,16 @@ } - /** - * Locks - * @return - * @throws InterruptedException - * @throws IllegalStateException - */ - public String checkdata() throws IllegalStateException, InterruptedException { + /** + * Debug routine logs @ ERROR additional information when a checksum error + * has been encountered. + * + * @return An informative error message. + * + * @throws InterruptedException + * @throws IllegalStateException + */ + private String checkdata() throws IllegalStateException, InterruptedException { if (!useChecksum) { return "Unable to check since checksums are not enabled"; @@ -1094,8 +1097,9 @@ int nrecords = recordMap.size(); for (Entry<Long, RecordMetadata> ent : recordMap.entrySet()) { - RecordMetadata md = ent.getValue(); + final RecordMetadata md = ent.getValue(); + // length of the record w/o checksum field. final int reclen = md.recordLength - 4; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-06-02 12:52:57 UTC (rev 4600) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2011-06-02 13:10:20 UTC (rev 4601) @@ -2271,13 +2271,13 @@ } /* - * Overriden methods for registering or dropping indices. + * Overridden methods for registering or dropping indices. */ /** * Delegates to the {@link AbstractTask}. */ - public void dropIndex(String name) { + public void dropIndex(final String name) { AbstractTask.this.dropIndex(name); @@ -2656,7 +2656,7 @@ } @SuppressWarnings("unchecked") - public ReadOnlyJournal(AbstractJournal source) { + public ReadOnlyJournal(final AbstractJournal source) { if (source == null) throw new IllegalArgumentException(); @@ -2666,7 +2666,7 @@ /* * Setup a locator for resources. Resources that correspond to * indices declared by the task are accessible via the task itself. - * Other resources are assessible via the locator on the underlying + * Other resources are accessible via the locator on the underlying * journal. When the journal is part of a federation, that locator * will be the federation's locator. */ @@ -2679,7 +2679,7 @@ } /* - * Index access methods (overriden or disallowed depending on what they + * Index access methods (overridden or disallowed depending on what they * do). */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java 2011-06-02 12:52:57 UTC (rev 4600) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java 2011-06-02 13:10:20 UTC (rev 4601) @@ -50,14 +50,13 @@ */ public class TemporaryStoreFactory { - protected static final transient Logger log = Logger + private static final transient Logger log = Logger .getLogger(TemporaryStoreFactory.class); /** * Configuration options for the {@link TemporaryStoreFactory}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface Options { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-06-02 12:52:57 UTC (rev 4600) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-06-02 13:10:20 UTC (rev 4601) @@ -107,15 +107,14 @@ // The buffer size must be at least 1k for these tests. assertTrue(DirectBufferPool.INSTANCE.getBufferCapacity() >= Bytes.kilobyte32); - WriteCache writeCache = new WriteCache.FileChannelWriteCache(0, buf, + final WriteCache writeCache = new WriteCache.FileChannelWriteCache(0, buf, true, isHighlyAvailable, false, opener); - - long addr1 = 0; - long addr2 = 12800; - long addr3 = 24800; - ByteBuffer data1 = getRandomData(512); - int chk1 = ChecksumUtility.threadChk.get().checksum(data1, 0/* offset */, data1.limit()); + final long addr1 = 0; + final long addr2 = 12800; + final long addr3 = 24800; + final ByteBuffer data1 = getRandomData(512); + final int chk1 = ChecksumUtility.threadChk.get().checksum(data1, 0/* offset */, data1.limit()); writeCache.write(addr1, data1, chk1); data1.flip(); @@ -131,8 +130,9 @@ fail("Expected ChecksumError"); } catch (ChecksumError ce) { - System.out.println("Expected: " + ce.getMessage()); - } + if (log.isInfoEnabled()) + log.info("Expected: " + ce.getMessage()); + } } finally { DirectBufferPool.INSTANCE.release(buf); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2011-06-06 17:37:49
|
Revision: 4634 http://bigdata.svn.sourceforge.net/bigdata/?rev=4634&view=rev Author: mrpersonick Date: 2011-06-06 17:37:43 +0000 (Mon, 06 Jun 2011) Log Message: ----------- fixed a test case Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestPrefixSearch.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-06-06 16:01:07 UTC (rev 4633) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-06-06 17:37:43 UTC (rev 4634) @@ -920,6 +920,24 @@ } + public Hiterator search(final String query, final String languageCode, + final double minCosine, final boolean prefixMatch) { + + return search( // + query,// + languageCode,// + prefixMatch,// + minCosine, // minCosine + 1.0d, // maxCosine + 1, // minRank + 10000, // maxRank + false, // matchAllTerms + this.timeout,// + TimeUnit.MILLISECONDS// + ); + + } + /** * Performs a full text search against indexed documents returning a hit * list using the configured default timeout. @@ -1263,7 +1281,20 @@ } + /** + * Used to support test cases. + */ public int count(final String query, final String languageCode, + final boolean prefixMatch) { + + return count(query, languageCode, prefixMatch, 0.0d, 1.0d, 1, 10000, + false, this.timeout,// + TimeUnit.MILLISECONDS); + + } + + + public int count(final String query, final String languageCode, final boolean prefixMatch, final double minCosine, final double maxCosine, final int minRank, final int maxRank, final boolean matchAllTerms, @@ -1456,6 +1487,12 @@ Hit[] a = hits.values().toArray(new Hit[nhits]); Arrays.sort(a); + + if (log.isDebugEnabled()) { + log.debug("before min/max cosine/rank pruning:"); + for (Hit h : a) + log.debug(h); + } /* * If maxCosine is specified, prune the hits that are above the max @@ -1541,6 +1578,9 @@ final int newMax = maxRank-minRank+1; + if (log.isDebugEnabled()) + log.debug("new max rank: " + newMax); + /* * If maxRank is specified, prune the hits that rank lower than the max */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestPrefixSearch.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestPrefixSearch.java 2011-06-06 16:01:07 UTC (rev 4633) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestPrefixSearch.java 2011-06-06 17:37:43 UTC (rev 4634) @@ -107,7 +107,8 @@ if (log.isInfoEnabled()) log.info("hits:" + itr); - assertEquals(2, itr.size()); + assertEquals(2, ndx.count("The quick brown dog", + languageCode, false/* prefixMatch */)); assertTrue(itr.hasNext()); @@ -134,7 +135,8 @@ if(log.isInfoEnabled()) log.info("hits:" + itr); - assertEquals(2, itr.size()); + assertEquals(2, ndx.count("The qui bro do", + languageCode, true/*prefixMatch*/)); assertTrue(itr.hasNext()); @@ -162,7 +164,8 @@ if(log.isInfoEnabled()) log.info("hits:" + itr); - assertEquals(2, itr.size()); + assertEquals(2, ndx + .count("brown", languageCode, false/* prefixMatch */)); } @@ -176,7 +179,8 @@ if(log.isInfoEnabled()) log.info("hits:" + itr); - assertEquals(2, itr.size()); + assertEquals(2, ndx + .count("brown", languageCode, true/* prefixMatch */)); } @@ -190,7 +194,8 @@ if(log.isInfoEnabled()) log.info("hits:" + itr); - assertEquals(2, itr.size()); + assertEquals(2, ndx + .count("bro", languageCode, true/* prefixMatch */)); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2011-06-07 09:13:34
|
Revision: 4636 http://bigdata.svn.sourceforge.net/bigdata/?rev=4636&view=rev Author: martyncutcher Date: 2011-06-07 09:13:25 +0000 (Tue, 07 Jun 2011) Log Message: ----------- Implement IBufferAccess interface to isolate management of direct ByteBuffers Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/raba/codec/TestCanonicalHuffmanRabaCoder.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/AbstractJournalTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/ProxyTestCase.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/StressTestGroupCommit.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestCompactJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/journal/TestConcurrentJournal.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestReceiveBuffer.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsApplication.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCacheServiceLifetime.java Property Changed: ---------------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/BytesUtil.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -1195,7 +1195,7 @@ return (ret & bitmasks[l]) << shift; } - public static long tstGetBits64(final byte[] a, final int off, final int len) { + public static long altGetBits64(final byte[] a, final int off, final int len) { if (a == null || off < 0 || len < 0 || len > 64) throw new IllegalArgumentException(); @@ -1209,7 +1209,7 @@ */ // byte in which the bit range begins. - int bi = byteIndexForBit(off); + int bi = off/8; // start bit offset int bo = off % 8; // bits remaining in current byte @@ -1236,7 +1236,7 @@ /* * Is there any advantage in 32-bit math over 64-bit? */ - public static int tstGetBits32(final byte[] a, final int off, final int len) { + public static int altGetBits32(final byte[] a, final int off, final int len) { if (a == null || off < 0 || len < 0 || len > 64) throw new IllegalArgumentException(); @@ -1250,7 +1250,7 @@ */ // byte in which the bit range begins. - int bi = byteIndexForBit(off); + int bi = off/8; // start bit offset int bo = off % 8; // bits remaining in current byte @@ -1287,7 +1287,21 @@ return ret; } + + /** + * Some benchmarks seem to indicate that altGetBits32 is faster than getBits + * for smaller byte counts. OTOH the cost of the redirection may outweigh + * any benefit. + */ + public static int optGetBits(final byte[] a, final int off, final int len) { + if (len <= 16) { + return altGetBits32(a, off, len); + } else { + return getBits(a, off, len); + } + } + /** * Return the n-bit integer corresponding to the inclusive bit range of the * byte[]. Bit ZERO (0) is the Most Significant Bit (MSB). Bit positions Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -10,6 +10,7 @@ import com.bigdata.btree.IndexSegment.ImmutableNodeFactory.ImmutableLeaf; import com.bigdata.btree.data.ILeafData; import com.bigdata.io.DirectBufferPool; +import com.bigdata.io.IBufferAccess; /** * A fast iterator based on multi-block IO for the {@link IndexSegment}. This @@ -75,7 +76,7 @@ /** * The buffer. */ - private volatile ByteBuffer buffer; + private volatile IBufferAccess buffer; /** * The inclusive lower bound -or- <code>null</code> if there is no lower @@ -274,13 +275,13 @@ throw new RuntimeException(e); } } - return buffer; + return buffer.buffer(); } private void releaseBuffer() { if (buffer != null) { try { - pool.release(buffer); + buffer.release(); } catch (InterruptedException e) { // Propagate interrupt. Thread.currentThread().interrupt(); @@ -373,7 +374,7 @@ // acquire the buffer from the pool. acquireBuffer(); // Read the first block. - nextBlock(firstLeafAddr, buffer); + nextBlock(firstLeafAddr, buffer.buffer()); // Extract the first leaf. final ImmutableLeaf leaf = getLeaf(firstLeafAddr); // Return the first leaf. @@ -408,7 +409,7 @@ } if (offset + nbytes > blockOffset + blockLength) { // read the next block. - nextBlock(nextLeafAddr, buffer); + nextBlock(nextLeafAddr, buffer.buffer()); } } // extract the next leaf. @@ -444,7 +445,7 @@ final int offsetWithinBuffer = (int)(offset - blockOffset); // read only view of the leaf in the buffer. - final ByteBuffer tmp = buffer.asReadOnlyBuffer(); + final ByteBuffer tmp = buffer.buffer().asReadOnlyBuffer(); tmp.limit(offsetWithinBuffer + nbytes); tmp.position(offsetWithinBuffer); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentStore.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/IndexSegmentStore.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -47,6 +47,7 @@ import com.bigdata.counters.OneShotInstrument; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.io.SerializerUtil; import com.bigdata.journal.AbstractJournal; @@ -120,7 +121,7 @@ * the use of this buffer means that reading a node that has fallen off of * the queue does not require any IO. */ - private volatile ByteBuffer buf_nodes; + private volatile IBufferAccess buf_nodes; /** * The random access file used to read the index segment. This is @@ -704,7 +705,7 @@ try { // release the buffer back to the pool. - DirectBufferPool.INSTANCE.release(buf_nodes); + buf_nodes.release(); } catch (Throwable t) { @@ -1020,7 +1021,7 @@ final ByteBuffer tmp; synchronized(this) { - tmp = buf_nodes.asReadOnlyBuffer(); + tmp = buf_nodes.buffer().asReadOnlyBuffer(); } @@ -1351,13 +1352,14 @@ + ", #bytes=" + checkpoint.extentNodes + ", file=" + file); // #of bytes to read. - buf_nodes.limit((int)checkpoint.extentNodes); + final ByteBuffer tmp = buf_nodes.buffer(); + tmp.limit((int)checkpoint.extentNodes); // attempt to read the nodes into the buffer. - FileChannelUtility.readAll(opener, buf_nodes, + FileChannelUtility.readAll(opener, tmp, checkpoint.offsetNodes); - buf_nodes.flip(); + tmp.flip(); } catch (Throwable t1) { @@ -1374,7 +1376,7 @@ try { // release buffer back to the pool. - DirectBufferPool.INSTANCE.release(buf_nodes); + buf_nodes.release(); } catch (Throwable t) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -22,6 +22,7 @@ import com.bigdata.ha.pipeline.HASendService; import com.bigdata.ha.pipeline.HAReceiveService.IHAReceiveCallback; import com.bigdata.io.DirectBufferPool; +import com.bigdata.io.IBufferAccess; import com.bigdata.journal.ha.HAWriteMessage; import com.bigdata.quorum.QuorumMember; import com.bigdata.quorum.QuorumStateChangeListener; @@ -158,7 +159,7 @@ * that do not relay will have a <code>null</code> at their * corresponding index. */ - private ByteBuffer receiveBuffer; + private IBufferAccess receiveBuffer; /** * Cached metadata about the downstream service. @@ -362,7 +363,7 @@ /* * Release the buffer back to the pool. */ - DirectBufferPool.INSTANCE.release(receiveBuffer); + receiveBuffer.release(); } catch (InterruptedException e) { throw new RuntimeException(e); } finally { @@ -510,7 +511,7 @@ * if this is the leader. */ protected ByteBuffer getReceiveBuffer() { - return receiveBuffer; + return receiveBuffer.buffer(); } public HASendService getHASendService() { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -70,28 +70,17 @@ * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> */ - private static class BufferState { + private class BufferState implements IBufferAccess { /** * The buffer instance. */ - private final ByteBuffer buf; - - /** - * <code>true</code> iff the buffer is currently acquired. - */ - private boolean acquired; - -// /** -// * The #of times this buffer has been acquired. -// */ -// private long nacquired = 0L; + private ByteBuffer buf; - BufferState(final ByteBuffer buf, final boolean acquired) { + BufferState(final ByteBuffer buf) { if (buf == null) throw new IllegalArgumentException(); this.buf = buf; - this.acquired = acquired; } /** @@ -130,8 +119,37 @@ */ throw new AssertionError(); } + + // Implement IDirectBuffer methods + public ByteBuffer buffer() { + return buf; + } + + public void release() throws InterruptedException { + release(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + } + + public void release(long timeout, TimeUnit units) throws InterruptedException { + if (buf != null) { + DirectBufferPool.this.release(buf, timeout, units); + buf = null; + } else { + throw new IllegalStateException("Buffer has already been released"); + } + } + protected void finalize() { + if (buf != null) { + try { + DirectBufferPool.this.release(buf); + } catch (InterruptedException e) { + // ignore + } + log.warn("Buffer released on finalize"); + } + } + } /** @@ -145,20 +163,9 @@ * Note: This is NOT a weak reference collection since the JVM will leak * native memory. */ - final private BlockingQueue<BufferState> pool; + final private BlockingQueue<ByteBuffer> pool; /** - * Used to recognize {@link ByteBuffer}s allocated by this pool so that we - * can refuse offered buffers that were allocated elsewhere (a paranoia - * feature which could be dropped). - * <p> - * Note: {@link LinkedHashSet} is used here for its fast iterator semantics - * since we need to do a linear scan of this collection in - * {@link #getBufferState(ByteBuffer)}. - */ - final private LinkedHashSet<BufferState> allocated; - - /** * The number {@link ByteBuffer}s allocated (must use {@link #lock} for * updates or reads to be atomic). This counter is incremented each time a * buffer is allocated. Since we do not free buffers when they are released @@ -426,11 +433,8 @@ this.bufferCapacity = bufferCapacity; - // Note: This is required in order to detect double-opens. - this.allocated = new LinkedHashSet<BufferState>(); + this.pool = new LinkedBlockingQueue<ByteBuffer>(poolCapacity); - this.pool = new LinkedBlockingQueue<BufferState>(poolCapacity); - pools.add(this); } @@ -456,7 +460,7 @@ * @throws OutOfMemoryError * if there is not enough free memory to fulfill the request. */ - public ByteBuffer acquire() throws InterruptedException { + public IBufferAccess acquire() throws InterruptedException { try { @@ -493,7 +497,7 @@ * @throws InterruptedException * @throws TimeoutException */ - public ByteBuffer acquire(final long timeout, final TimeUnit unit) + public IBufferAccess acquire(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException { if(log.isInfoEnabled()) @@ -510,17 +514,13 @@ } // the head of the pool must exist. - final BufferState state = pool.take(); + final ByteBuffer buf = pool.take(); - if (state.acquired) - throw new RuntimeException("Buffer already acquired"); - - state.acquired = true; acquired++; totalAcquireCount.increment(); // limit -> capacity; pos-> 0; mark cleared. - state.buf.clear(); + buf.clear(); if (log.isTraceEnabled()) { final Throwable t = new RuntimeException( @@ -528,7 +528,7 @@ log.trace(t, t); } - return state.buf; + return new BufferState(buf); } finally { @@ -553,7 +553,7 @@ * if the buffer has already been released. * @throws InterruptedException */ - public void release(final ByteBuffer b) throws InterruptedException { + final protected void release(final ByteBuffer b) throws InterruptedException { if (!release(b, Long.MAX_VALUE, TimeUnit.MILLISECONDS)) { @@ -578,7 +578,7 @@ * if the buffer has already been released. * @throws InterruptedException */ - public boolean release(final ByteBuffer b, final long timeout, + final private boolean release(final ByteBuffer b, final long timeout, final TimeUnit units) throws InterruptedException { if(log.isInfoEnabled()) @@ -590,20 +590,10 @@ lock.lock(); try { - - final BufferState state = getBufferState(b); - - // Check for double-release! - if (!state.acquired) { - log.error("Buffer already released."); - throw new IllegalArgumentException("buffer already released."); - } - // add to the pool. - if(!pool.offer(state, timeout, units)) + if(!pool.offer(b, timeout, units)) return false; - state.acquired = false; acquired--; totalReleaseCount.increment(); @@ -677,14 +667,9 @@ // update the pool size. size++; - // wrap with state metadata. - final BufferState state = new BufferState(b, false/* acquired */); - // add to the set of known buffers - allocated.add(state); - // add to the pool. - pool.add(state); + pool.add(b); /* * There is now a buffer in the pool and the caller will get it @@ -738,45 +723,6 @@ } /** - * Note: There is really no reason why we could not accept "donated" direct - * {@link ByteBuffer}s as long as they conform with the constraints on the - * {@link DirectBufferPool}. - * - * @param b - * The buffer. - */ - private BufferState getBufferState(final ByteBuffer b) { - - assert lock.isHeldByCurrentThread(); - - if (b == null) - throw new IllegalArgumentException("null reference"); - - if (b.capacity() != bufferCapacity) - throw new IllegalArgumentException("wrong capacity"); - - if(!b.isDirect()) - throw new IllegalArgumentException("not direct"); - - /* - * Linear scan for a BufferState object having that ByteBuffer - * reference. - */ - for (BufferState x : allocated) { - - if (x.buf == b) { - - return x; - - } - - } - - throw new IllegalArgumentException("Buffer not allocated by this pool."); - - } - - /** * Return the {@link CounterSet} for the {@link DirectBufferPool}. * * @return The counters. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -384,7 +384,7 @@ * context. Last element in the list is the {@link ByteBuffer} against * which allocations are currently being made. */ - private final LinkedList<ByteBuffer> nativeBuffers = new LinkedList<ByteBuffer>(); + private final LinkedList<IBufferAccess> directBuffers = new LinkedList<IBufferAccess>(); /** * The set of native {@link ByteBuffer}s in use by this allocation @@ -407,7 +407,7 @@ return getClass().getName() + "{key=" + key + ",#allocations=" + allocations.size() + ",#nativeBuffers=" - + nativeBuffers.size() + "}"; + + directBuffers.size() + "}"; } @@ -432,9 +432,9 @@ while (nbytes > 0) { - ByteBuffer nativeBuffer = nativeBuffers.peekLast(); + IBufferAccess directBuffer = directBuffers.peekLast(); - if (nativeBuffer == null) { + if (directBuffer == null) { if (!open.get()) { /* @@ -448,11 +448,12 @@ throw new IllegalStateException(); } - nativeBuffers.add(nativeBuffer = directBufferPool + directBuffers.add(directBuffer = directBufferPool .acquire()); } + final ByteBuffer nativeBuffer = directBuffer.buffer(); final int remaining = nativeBuffer.remaining(); final int allocSize = Math.min(remaining, nbytes); @@ -523,12 +524,12 @@ allocations.clear(); // release backing buffers. - final Iterator<ByteBuffer> bitr = nativeBuffers.iterator(); + final Iterator<IBufferAccess> bitr = directBuffers.iterator(); while(bitr.hasNext()) { - final ByteBuffer b = bitr.next(); + final IBufferAccess b = bitr.next(); while (true) { try { - directBufferPool.release(b); + b.release(); break; } catch (InterruptedException e) { interrupted = true; Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -0,0 +1,37 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.io; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; + +public interface IBufferAccess { + // return the byte buffer + public ByteBuffer buffer(); + + // release the ByteBuffer, returning to owning pool + public void release() throws InterruptedException; + public void release(long time, TimeUnit unit) throws InterruptedException; +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -33,6 +33,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.rwstore.RWStore; @@ -66,7 +67,7 @@ * into a single IO. */ // private final ByteBuffer m_data; - private final AtomicReference<ByteBuffer> m_data = new AtomicReference<ByteBuffer>(); + private final AtomicReference<IBufferAccess> m_data = new AtomicReference<IBufferAccess>(); /** * The offset on the backing channel at which the data in {@link #m_data} @@ -120,7 +121,7 @@ // synchronized public void release() throws InterruptedException { - final ByteBuffer tmp = m_data.get(); + final IBufferAccess tmp = m_data.get(); if (tmp == null) { @@ -131,7 +132,7 @@ if (m_data.compareAndSet(tmp/* expected */, null/* update */)) { - DirectBufferPool.INSTANCE.release(tmp); + tmp.release(); } @@ -161,7 +162,7 @@ final int slot_len = m_store.getSlotSize(data_len); int nwrites = 0; - final ByteBuffer m_data = this.m_data.get(); + final ByteBuffer m_data = this.m_data.get().buffer(); if (slot_len > m_data.remaining()) { /* * There is not enough room in [m_data] to absorb the caller's data @@ -211,7 +212,7 @@ public int flush(final IReopenChannel<FileChannel> opener) throws IOException { - final ByteBuffer m_data = this.m_data.get(); + final ByteBuffer m_data = this.m_data.get().buffer(); if (m_data.position() == 0) { // NOP. @@ -238,7 +239,7 @@ synchronized public void reset() { - final ByteBuffer m_data = this.m_data.get(); + final ByteBuffer m_data = this.m_data.get().buffer(); // reset the buffer state. m_data.position(0); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -53,6 +53,7 @@ import com.bigdata.counters.Instrument; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.journal.AbstractBufferStrategy; import com.bigdata.journal.StoreTypeEnum; @@ -121,7 +122,7 @@ * Note: This is an {@link AtomicReference} since we want to clear this * field in {@link #close()}. */ - final private AtomicReference<ByteBuffer> buf; + final private AtomicReference<IBufferAccess> buf; /** * The read lock allows concurrent {@link #acquire()}s while the write lock @@ -167,7 +168,7 @@ // latch.inc(); - final ByteBuffer tmp = buf.get(); + final IBufferAccess tmp = buf.get(); if (tmp == null) { @@ -178,7 +179,7 @@ } // Note: The ReadLock is still held! - return tmp; + return tmp.buffer(); } catch (Throwable t) { @@ -216,7 +217,7 @@ */ ByteBuffer peek() { - final ByteBuffer b = buf.get(); + final ByteBuffer b = buf.get().buffer(); return b == null ? null : b.asReadOnlyBuffer(); @@ -387,7 +388,7 @@ * * @throws InterruptedException */ - public WriteCache(ByteBuffer buf, final boolean scatteredWrites, final boolean useChecksum, + public WriteCache(IBufferAccess buf, final boolean scatteredWrites, final boolean useChecksum, final boolean isHighlyAvailable, final boolean bufferHasData) throws InterruptedException { if (bufferHasData && buf == null) @@ -421,17 +422,17 @@ } // save reference to the write cache. - this.buf = new AtomicReference<ByteBuffer>(buf); + this.buf = new AtomicReference<IBufferAccess>(buf); // the capacity of the buffer in bytes. - this.capacity = buf.capacity(); + this.capacity = buf.buffer().capacity(); /* * Discard anything in the buffer, resetting the position to zero, the * mark to zero, and the limit to the capacity. */ if (!bufferHasData) { - buf.clear(); + buf.buffer().clear(); } /* @@ -533,7 +534,7 @@ */ final int remaining() { - final int remaining = capacity - buf.get().position(); + final int remaining = capacity - buf.get().buffer().position(); return remaining; @@ -544,7 +545,7 @@ */ public final int bytesWritten() { - return buf.get().position(); + return buf.get().buffer().position(); } @@ -1011,7 +1012,7 @@ try { - final ByteBuffer tmp = this.buf.get(); + final ByteBuffer tmp = this.buf.get().buffer(); if (tmp == null) throw new IllegalStateException(); @@ -1196,7 +1197,7 @@ // // wait until there are no readers using the buffer. // latch.await(); - final ByteBuffer tmp = buf.get(); + final ByteBuffer tmp = buf.get().buffer(); if (tmp == null) { @@ -1246,7 +1247,7 @@ */ // position := 0; limit := capacity. - final ByteBuffer tmp = buf.get(); + final IBufferAccess tmp = buf.get(); if (tmp == null) { @@ -1259,13 +1260,13 @@ try { - _resetState(tmp); + _resetState(tmp.buffer()); } finally { if (releaseBuffer) { - DirectBufferPool.INSTANCE.release(tmp); + tmp.release(); } @@ -1572,7 +1573,7 @@ * * @throws InterruptedException */ - public FileChannelWriteCache(final long baseOffset, final ByteBuffer buf, final boolean useChecksum, + public FileChannelWriteCache(final long baseOffset, final IBufferAccess buf, final boolean useChecksum, final boolean isHighlyAvailable, final boolean bufferHasData, final IReopenChannel<FileChannel> opener) throws InterruptedException { @@ -1659,7 +1660,7 @@ * * @throws InterruptedException */ - public FileChannelScatteredWriteCache(final ByteBuffer buf, final boolean useChecksum, + public FileChannelScatteredWriteCache(final IBufferAccess buf, final boolean useChecksum, final boolean isHighlyAvailable, final boolean bufferHasData, final IReopenChannel<FileChannel> opener, final BufferedWrite bufferedWrite) throws InterruptedException { @@ -2243,7 +2244,7 @@ writeLock.lockInterruptibly(); try { - resetRecordMapFromBuffer(buf.get(), recordMap); + resetRecordMapFromBuffer(buf.get().buffer(), recordMap); } finally { writeLock.unlock(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -60,6 +60,7 @@ import com.bigdata.ha.HAPipelineGlue; import com.bigdata.ha.QuorumPipeline; import com.bigdata.io.DirectBufferPool; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.io.writecache.WriteCache.WriteCacheCounters; import com.bigdata.journal.AbstractBufferStrategy; @@ -833,7 +834,7 @@ * * @throws InterruptedException */ - abstract public WriteCache newWriteCache(ByteBuffer buf, + abstract public WriteCache newWriteCache(IBufferAccess buf, boolean useChecksum, boolean bufferHasData, IReopenChannel<? extends Channel> opener) throws InterruptedException; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -1363,8 +1363,11 @@ } - nclose.incrementAndGet(); - + int cl = nclose.incrementAndGet(); + if (cl > nopen.get()) { + throw new IllegalStateException("More calls to close than open!"); + } + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -45,6 +45,7 @@ import com.bigdata.counters.OneShotInstrument; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.journal.WORMStrategy.StoreCounters; import com.bigdata.rawstore.Bytes; @@ -281,7 +282,7 @@ * condition without the write cache as measured by * {@link AbstractMRMWTestCase}. */ - private ByteBuffer buf; + private IBufferAccess buf; /** * An index into the write cache used for read through on the cache. The @@ -323,7 +324,7 @@ * * @param capacity */ - public WriteCache(final ByteBuffer writeCache) { + public WriteCache(final IBufferAccess writeCache) { if (writeCache == null) throw new IllegalArgumentException(); @@ -332,13 +333,13 @@ this.buf = writeCache; // the capacity of the buffer in bytes. - final int capacity = writeCache.capacity(); + final int capacity = writeCache.buffer().capacity(); /* * Discard anything in the buffer, resetting the position to zero, * the mark to zero, and the limit to the capacity. */ - writeCache.clear(); + writeCache.buffer().clear(); /* * An estimate of the #of records that might fit within the write @@ -358,7 +359,7 @@ */ final int position() { - return buf.position(); + return buf.buffer().position(); } @@ -367,25 +368,25 @@ */ final int capacity() { - return buf.capacity(); + return buf.buffer().capacity(); } void flush() { // #of bytes to write on the disk. - final int nbytes = buf.position(); + final int nbytes = buf.buffer().position(); if (nbytes == 0) return; // limit := position; position := 0; - buf.flip(); + buf.buffer().flip(); // write the data on the disk file. - writeOnDisk(buf, writeCacheOffset, true/*append*/); + writeOnDisk(buf.buffer(), writeCacheOffset, true/*append*/); // position := 0; limit := capacity. - buf.clear(); + buf.buffer().clear(); // clear the index since all records were flushed to disk. writeCacheIndex.clear(); @@ -404,10 +405,10 @@ void write(final long addr, final ByteBuffer data) { // the position() at which the record is cached. - final int position = buf.position(); + final int position = buf.buffer().position(); // copy the record into the cache. - buf.put(data); + buf.buffer().put(data); // add the record to the write cache index for read(addr). writeCacheIndex.put(Long.valueOf(addr), Integer.valueOf(position)); @@ -456,7 +457,7 @@ final int pos = writeCachePosition; // create a view with same offset, limit and position. - final ByteBuffer tmp = buf.duplicate(); + final ByteBuffer tmp = buf.buffer().duplicate(); // adjust the view to just the record of interest. tmp.limit(pos + nbytes); @@ -1305,7 +1306,7 @@ if (fileMetadata.writeCacheEnabled && !fileMetadata.readOnly && fileMetadata.closeTime == 0L) { - final ByteBuffer tmp; + final IBufferAccess tmp; try { /* * Note: a timeout here is not such a good idea. It could be @@ -1318,7 +1319,7 @@ } if (log.isInfoEnabled()) - log.info("Enabling writeCache: capacity=" + tmp.capacity()); + log.info("Enabling writeCache: capacity=" + tmp.buffer().capacity()); writeCache = new WriteCache(tmp); @@ -2531,14 +2532,14 @@ synchronized private final void releaseWriteCache() { - final ByteBuffer tmp = writeCache == null ? null : writeCache.buf; + final IBufferAccess tmp = writeCache == null ? null : writeCache.buf; if (tmp == null) return; try { - DirectBufferPool.INSTANCE.release(tmp); + tmp.release(); } catch (InterruptedException e) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -30,6 +30,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import com.bigdata.io.IBufferAccess; import com.bigdata.journal.ha.HAWriteMessage; import com.bigdata.quorum.Quorum; @@ -48,7 +49,7 @@ * @throws InterruptedException * @throws IOException */ - void writeRawBuffer(HAWriteMessage msg, ByteBuffer b) throws IOException, + void writeRawBuffer(HAWriteMessage msg, IBufferAccess b) throws IOException, InterruptedException; /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -34,6 +34,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.ha.QuorumRead; +import com.bigdata.io.IBufferAccess; import com.bigdata.journal.ha.HAWriteMessage; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.quorum.Quorum; @@ -592,7 +593,7 @@ * IHABufferStrategy */ - public void writeRawBuffer(final HAWriteMessage msg, final ByteBuffer b) + public void writeRawBuffer(final HAWriteMessage msg, final IBufferAccess b) throws IOException, InterruptedException { m_store.writeRawBuffer(msg, b); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -44,6 +44,7 @@ import com.bigdata.counters.striped.StripedCounters; import com.bigdata.ha.QuorumRead; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.io.writecache.WriteCache; import com.bigdata.io.writecache.WriteCacheService; @@ -898,7 +899,7 @@ fileMetadata.writeCacheBufferCount, useChecksums, extent, opener, quorum) { @Override - public WriteCache newWriteCache(final ByteBuffer buf, + public WriteCache newWriteCache(final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<? extends Channel> opener) @@ -935,7 +936,7 @@ */ private class WriteCacheImpl extends WriteCache.FileChannelWriteCache { - public WriteCacheImpl(final long baseOffset, final ByteBuffer buf, + public WriteCacheImpl(final long baseOffset, final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<FileChannel> opener) @@ -2244,7 +2245,7 @@ // NOP } - public void writeRawBuffer(final HAWriteMessage msg, final ByteBuffer b) + public void writeRawBuffer(final HAWriteMessage msg, final IBufferAccess b) throws IOException, InterruptedException { writeCacheService.newWriteCache(b, useChecksums, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -58,6 +58,7 @@ import com.bigdata.counters.striped.StripedCounters; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.io.writecache.BufferedWrite; import com.bigdata.io.writecache.IBufferedWriter; @@ -515,7 +516,7 @@ private TreeMap<Integer, Integer> m_lockAddresses = null; class WriteCacheImpl extends WriteCache.FileChannelScatteredWriteCache { - public WriteCacheImpl(final ByteBuffer buf, + public WriteCacheImpl(final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<FileChannel> opener) @@ -663,7 +664,7 @@ m_reopener, m_quorum) { @SuppressWarnings("unchecked") - public WriteCache newWriteCache(final ByteBuffer buf, + public WriteCache newWriteCache(final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<? extends Channel> opener) @@ -4441,7 +4442,7 @@ } - public void writeRawBuffer(final HAWriteMessage msg, final ByteBuffer b) + public void writeRawBuffer(final HAWriteMessage msg, final IBufferAccess b) throws IOException, InterruptedException { m_writeCache.newWriteCache(b, true/* useChecksums */, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -31,6 +31,7 @@ import org.apache.log4j.Logger; +import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; import com.bigdata.io.writecache.WriteCache; import com.bigdata.io.writecache.WriteCacheService; @@ -59,7 +60,7 @@ * Provide default FileChannelScatteredWriteCache */ @Override - public WriteCache newWriteCache(final ByteBuffer buf, + public WriteCache newWriteCache(final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<? extends Channel> opener) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -38,6 +38,7 @@ import com.bigdata.counters.ICounterSetAccess; import com.bigdata.counters.OneShotInstrument; import com.bigdata.io.DirectBufferPool; +import com.bigdata.io.IBufferAccess; /** * The MemoryManager manages an off-heap Direct {@link ByteBuffer}. It uses the @@ -66,7 +67,7 @@ * The set of direct {@link ByteBuffer} which are currently being managed by * this {@link MemoryManager} instance. */ - private final ByteBuffer[] m_resources; + private final IBufferAccess[] m_resources; /** * The lock used to serialize all allocation/deallocation requests. This is @@ -160,7 +161,7 @@ m_pool = pool; - m_resources = new ByteBuffer[sectors]; + m_resources = new IBufferAccess[sectors]; m_sectorSize = pool.getBufferCapacity(); @@ -182,10 +183,10 @@ private void releaseDirectBuffers() { // release to pool. for (int i = 0; i < m_resources.length; i++) { - final ByteBuffer buf = m_resources[i]; + final IBufferAccess buf = m_resources[i]; if (buf != null) { try { - DirectBufferPool.INSTANCE.release(buf); + buf.release(); } catch (InterruptedException e) { log.error("Unable to release direct buffers", e); } finally { @@ -335,7 +336,7 @@ */ // Allocate new buffer (blocking request). - final ByteBuffer nbuf; + final IBufferAccess nbuf; try { if (blocks) { nbuf = m_pool.acquire(); @@ -629,7 +630,7 @@ final long paddr = sector.getPhysicalAddress(offset); // Duplicate the buffer to avoid side effects to position and limit. - final ByteBuffer ret = m_resources[sector.m_index].duplicate(); + final ByteBuffer ret = m_resources[sector.m_index].buffer().duplicate(); final int bufferAddr = (int) (paddr - (sector.m_index * m_sectorSize)); Added: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsApplication.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsApplication.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsApplication.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -0,0 +1,219 @@ +package com.bigdata.btree; + +import java.io.IOException; +import java.util.Random; + +/** + * Rather than run in a JUnit, the performance tests are best run + * standalone. + * + * @author Martyn Cutcher + * + */ +public class TestGetBitsApplication { + + public static void test_perf_getBits7() throws IOException { + doTestBits32("test_perf_getBits7", 7); + } + public static void test_perf_getBits10() throws IOException { + doTestBits32("test_perf_getBits10", 10); + } + public static void test_perf_getBits21() throws IOException { + doTestBits32("test_perf_getBits21", 21); + } + public static void test_perf_getBits47() throws IOException { + doTestBits64("test_perf_getBits47", 47); + } + + static void doTestBits32(final String msg, final int bits) throws IOException { + + final Random r = new Random(); + + // #of + final int limit = 1000000000; // 100000000; + + // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. + final int len = 124500; // + r.nextInt(Bytes.kilobyte32 * 8) + 1; + final int bitlen = len << 3; + // Fill array with random data. + final byte[] b = new byte[len]; + r.nextBytes(b); + + int[] sliceOffsets = new int[] { + 1245, + 12450, + 102000, + 80000, + 120000 + }; + + int runs = 0; + long start = System.currentTimeMillis(); + for (int i = 0; i < limit; i++) { + + // start of the bit slice. + final int sliceBitOff = sliceOffsets[i%5]; // r.nextInt(bitlen - 64); + + final int bitsremaining = bitlen - sliceBitOff; + + // allow any slice of between 1 and 32 bits length. + final int sliceBitLen = bits; // r.nextInt(Math.min(64, bitsremaining)) + 1; + assert sliceBitLen >= 1 && sliceBitLen <= bits; + + BytesUtil.getBits(b, sliceBitOff, sliceBitLen); + + runs++; + } + System.out.println(msg + " completed: " + runs + ", in " + (System.currentTimeMillis()-start) + "ms"); + } + + public static void test_perf_tstGetBits7() throws IOException { + doAltTestBits32("test_perf_tstGetBits7", 7); + } + public static void test_perf_tstGetBits10() throws IOException { + doAltTestBits32("test_perf_tstGetBits10", 10); + } + public static void test_perf_tstGetBits21() throws IOException { + doAltTestBits32("test_perf_tstGetBits21", 21); + } + public static void test_perf_tstGetBits47() throws IOException { + doAltTestBits64("test_perf_tstGetBits47", 47); + } + static void doAltTestBits32(final String msg, final int bits) throws IOException { + + final Random r = new Random(); + + // #of + final int limit = 1000000000; // 100000000; + + // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. + final int len = 124500; // + r.nextInt(Bytes.kilobyte32 * 8) + 1; + final int bitlen = len << 3; + // Fill array with random data. + final byte[] b = new byte[len]; + r.nextBytes(b); + + int[] sliceOffsets = new int[] { + 1245, + 12450, + 102000, + 80000, + 120000 + }; + + int runs = 0; + long start = System.currentTimeMillis(); + for (int i = 0; i < limit; i++) { + + // start of the bit slice. + final int sliceBitOff = sliceOffsets[i%5]; // r.nextInt(bitlen - 64); + + final int bitsremaining = bitlen - sliceBitOff; + + // allow any slice of between 1 and 32 bits length. + final int sliceBitLen = bits; // r.nextInt(Math.min(64, bitsremaining)) + 1; + assert sliceBitLen >= 1 && sliceBitLen <= 32; + + BytesUtil.altGetBits32(b, sliceBitOff, sliceBitLen); + + runs++; + } + System.out.println(msg + " completed: " + runs + ", in " + (System.currentTimeMillis()-start) + "ms"); + } + static void doAltTestBits64(final String msg, final int bits) throws IOException { + + final Random r = new Random(); + + // #of + final int limit = 500000000; // 100000000; + + // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. + final int len = 124500; // + r.nextInt(Bytes.kilobyte32 * 8) + 1; + final int bitlen = len << 3; + // Fill array with random data. + final byte[] b = new byte[len]; + r.nextBytes(b); + + int[] sliceOffsets = new int[] { + 1245, + 12450, + 102000, + 80000, + 120000 + }; + + int runs = 0; + long start = System.currentTimeMillis(); + for (int i = 0; i < limit; i++) { + + // start of the bit slice. + final int sliceBitOff = sliceOffsets[i%5]; // r.nextInt(bitlen - 64); + + final int bitsremaining = bitlen - sliceBitOff; + + // allow any slice of between 1 and 32 bits length. + final int sliceBitLen = bits; // r.nextInt(Math.min(64, bitsremaining)) + 1; + assert sliceBitLen >= 1 && sliceBitLen <= 32; + + BytesUtil.altGetBits64(b, sliceBitOff, sliceBitLen); + + runs++; + } + System.out.println(msg + " completed: " + runs + ", in " + (System.currentTimeMillis()-start) + "ms"); + } + + static void doTestBits64(final String msg, final int bits) throws IOException { + + final Random r = new Random(); + + // #of + final int limit = 500000000; // 100000000; + + // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. + final int len = 124500; // + r.nextInt(Bytes.kilobyte32 * 8) + 1; + final int bitlen = len << 3; + // Fill array with random data. + final byte[] b = new byte[len]; + r.nextBytes(b); + + int[] sliceOffsets = new int[] { + 1245, + 12450, + 102000, + 80000, + 120000 + }; + + int runs = 0; + long start = System.currentTimeMillis(); + for (int i = 0; i < limit; i++) { + + // start of the bit slice. + final int sliceBitOff = sliceOffsets[i%5]; // r.nextInt(bitlen - 64); + + final int bitsremaining = bitlen - sliceBitOff; + + // allow any slice of between 1 and 32 bits length. + final int sliceBitLen = bits; // r.nextInt(Math.min(64, bitsremaining)) + 1; + assert sliceBitLen >= 1 && sliceBitLen <= 32; + + BytesUtil.getBits64(b, sliceBitOff, sliceBitLen); + + runs++; + } + System.out.println(msg + " completed: " + runs + ", in " + (System.currentTimeMillis()-start) + "ms"); + } + + public static void main(String[] args) throws IOException { + test_perf_tstGetBits7(); + test_perf_getBits7(); + test_perf_tstGetBits10(); + test_perf_getBits10(); + test_perf_tstGetBits21(); + test_perf_getBits21(); + test_perf_tstGetBits47(); + test_perf_getBits47(); + + System.out.println("All done!"); + } +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/TestGetBitsApplication.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/raba/codec/TestCanonicalHuffmanRabaCoder.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/raba/codec/TestCanonicalHuffmanRabaCoder.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/btree/raba/codec/TestCanonicalHuffmanRabaCoder.java 2011-06-07 09:13:25 UTC (rev 4636) @@ -49,6 +49,7 @@ import com.bigdata.btree.raba.ReadOnlyValuesRaba; import com.bigdata.btree.raba.codec.CanonicalHuffmanRabaCoder.AbstractCodingSetup; import com.bigdata.btree.raba.codec.CanonicalHuffmanRabaCoder.RabaCodingSetup; +import com.bigdata.rawstore.Bytes; /** * Test suite for the {@link CanonicalHuffmanRabaCoder}. @@ -583,5 +584,115 @@ } } + + /** + * A stress test for compatibility with {@link InputBitStream}. An array is + * filled with random bits and the behavior of {@link InputBitStream} and + * {@link BytesUtil#getBits(byte[], int, int)} is compared on a number of + * randomly selected bit slices. + * + * TODO Could be a performance comparison. + * + * @throws IOException + */ + public void test_stress_InputBitStream_compatible() throws IOException { + + final Random r = new Random(); + // #of + final int limit = 1000; + + // Note: length is guaranteed to be LT int32 bits so [int] index is Ok. + final int len = r.nextInt(Bytes.kilobyte32 * 8) + 1; + final int bitlen = len << 3; + // Fill array with random data. + final byte[] b = new byte[len]; + r.nextBytes(b); + + // wrap with InputBitStream. + final InputBitStream ibs = new InputBitStream(b); + + for (int i = 0; i < limit; i++) { + + // start of the bit slice. + final int sliceBitOff = r.nextInt(bitlen - 32); + + final int bitsremaining = bitlen - sliceBitOff; + + // allow any slice of between 1 and 32 bits length. + final int sliceBitLen = r.nextInt(Math.min(32, bitsremaining)) + 1; + assert sliceBitLen >= 1 && sliceBitLen <= 32; + + // position the stream. + ibs.position(sliceBitOff); + + final int v1 = ibs.readInt(sliceBitLen); + + final int v2 = BytesUtil.getBits(b, sliceBitOff, sliceBitLen); + + if (v1 != v2) { + fail("Expected=" + v1 + ", actual=" + v2 + ", trial=" + i + + ", bitSlice(off=" + sliceBitOff + ", len=" + + sliceBitLen + ")" + ", arrayLen=" + b.length); + } + + } + + } + + public void test_confirm_InputBitStream_compatible() throws IOException { + + final byte[] tbuf = new byte[] { + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA, + (byte) 0xAA + }; + + // wrap with InputBitStream. + final InputBitStream ibs = new InputBitStream(tbuf); + + // 1010 + assertTrue(compare(ibs, tbuf, 0, 4) == 0xA); + // 1010 1010 + assertTrue(compare(ibs, tbuf, 0, 8) == 0xAA); + // 0101 + assertTrue(compare(ibs, tbuf, 1, 4) == 0x5); + // 01 0101 + assertTrue(compare(ibs, tbuf, 1, 6) == 0x15); + // 1010 1010 + assertTrue(compare(ibs, tbuf, 0, 32) == 0xAAAAAAAA); + assertTrue(compare(ibs, tbuf, 1, 32) == 0x55555555); + + // Now try some 64bit comparisons + assertTrue(compare64(ibs, tbuf, 0, 48) == 0xAAAAAAAAAAAAL); + assertTrue(compare64(ibs, tbuf, 1, 48) == 0x555555555555L); + + } + + int compare(InputBitStream ibs, byte[] buf, int offset, int bits) throws IOException { + ibs.position(offset); + int v1 = ibs.readInt(bits); + int v2 = BytesUtil.getBits(buf, offset, bits); + + assertTrue(v1 == v2); + + return v1; + + } + + long compare64(InputBitStream ibs, byte[] buf, int offset, int bits) throws IOException { + ibs.position(offset); + long v1 = ibs.readLong(bits); + long v2 = BytesUtil.getBits64(buf, offset, bits); + + assertTrue(v1 == v2); + + return v1; + + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java 2011-06-06 21:27:16 UTC (rev 4635) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java 2011-06-07 09:13:25 UT... [truncated message content] |
From: <mar...@us...> - 2011-06-09 15:01:17
|
Revision: 4653 http://bigdata.svn.sourceforge.net/bigdata/?rev=4653&view=rev Author: martyncutcher Date: 2011-06-09 15:01:10 +0000 (Thu, 09 Jun 2011) Log Message: ----------- make release methods private on DirectBufferPool Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 14:38:02 UTC (rev 4652) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 15:01:10 UTC (rev 4653) @@ -165,7 +165,6 @@ * native memory. */ final private BlockingQueue<ByteBuffer> pool; - final private ArrayList<ByteBuffer> acquiredBuffers; /** * The number {@link ByteBuffer}s allocated (must use {@link #lock} for @@ -437,8 +436,6 @@ this.pool = new LinkedBlockingQueue<ByteBuffer>(poolCapacity); - this.acquiredBuffers = new ArrayList<ByteBuffer>(); - pools.add(this); } @@ -519,7 +516,6 @@ // the head of the pool must exist. final ByteBuffer buf = pool.take(); - acquiredBuffers.add(buf); acquired++; totalAcquireCount.increment(); @@ -558,7 +554,7 @@ * if the buffer has already been released. * @throws InterruptedException */ - final protected void release(final ByteBuffer b) throws InterruptedException { + final private void release(final ByteBuffer b) throws InterruptedException { if (!release(b, Long.MAX_VALUE, TimeUnit.MILLISECONDS)) { @@ -596,10 +592,6 @@ try { // add to the pool. - if (!acquiredBuffers.contains(b)) - throw new IllegalArgumentException("Buffer not managed by this pool or already released"); - acquiredBuffers.remove(b); - if(!pool.offer(b, timeout, units)) return false; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java 2011-06-09 14:38:02 UTC (rev 4652) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java 2011-06-09 15:01:10 UTC (rev 4653) @@ -293,7 +293,7 @@ * released. */ // Note: package private for the unit tests. - final /*private*/ ByteBuffer nativeBuffer; + final /*private*/ IBufferAccess nativeBuffer; /** * A {@link ByteBuffer#slice()} onto the allocated region of the @@ -325,7 +325,7 @@ } private Allocation(final AllocationContext allocationContext, - final ByteBuffer nativeBuffer, final ByteBuffer allocatedSlice) { + final IBufferAccess nativeBuffer, final ByteBuffer allocatedSlice) { if (allocationContext == null) throw new IllegalArgumentException(); @@ -465,7 +465,7 @@ nativeBuffer.limit(limit); // create the slice. - final Allocation a = new Allocation(this, nativeBuffer, + final Allocation a = new Allocation(this, directBuffer, nativeBuffer.slice()); // restore limit to the remaining capacity. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-06-09 14:38:02 UTC (rev 4652) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPool.java 2011-06-09 15:01:10 UTC (rev 4653) @@ -175,33 +175,4 @@ } - /** - * Unit test to verify that a pool will reject a buffer not acquired from - * that pool. - */ - public void test_rejectBufferFromAnotherPool() throws InterruptedException { - - // A distinct pool with the same buffer capacity - final DirectBufferPool testPool = new DirectBufferPool("test", - 1/* poolCapacity */, DirectBufferPool.INSTANCE - .getBufferCapacity()); - - IBufferAccess b = null; - try { - b = DirectBufferPool.INSTANCE.acquire(); - try { - // Only possible to test since in same package - testPool.release(b.buffer()); - fail("Release should not be permitted to a different pool. Expecting: " - + IllegalArgumentException.class); - } catch (IllegalArgumentException ex) { - if (log.isInfoEnabled()) - log.info("Ignoring expected exception: " + ex); - } - } finally { - b.release(); - } - - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java 2011-06-09 14:38:02 UTC (rev 4652) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java 2011-06-09 15:01:10 UTC (rev 4653) @@ -230,10 +230,10 @@ assertTrue(x0.nativeBuffer == x1.nativeBuffer); // the position was advanced by the #of bytes allocated. - assertEquals(allocSize * 2, x0.nativeBuffer.position()); + assertEquals(allocSize * 2, x0.nativeBuffer.buffer().position()); // the limit on the native byte buffer has not been changed. - assertEquals(x0.nativeBuffer.capacity(), x0.nativeBuffer.limit()); + assertEquals(x0.nativeBuffer.buffer().capacity(), x0.nativeBuffer.buffer().limit()); } finally { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-09 15:15:17
|
Revision: 4655 http://bigdata.svn.sourceforge.net/bigdata/?rev=4655&view=rev Author: thompsonbry Date: 2011-06-09 15:15:11 +0000 (Thu, 09 Jun 2011) Log Message: ----------- Passing changes back to martyn Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 15:02:21 UTC (rev 4654) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 15:15:11 UTC (rev 4655) @@ -1,9 +1,7 @@ package com.bigdata.io; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.concurrent.BlockingQueue; @@ -74,14 +72,18 @@ private class BufferState implements IBufferAccess { /** - * The buffer instance. + * The buffer instance. This is guarded by the monitor of the + * {@link BufferState} object. */ private ByteBuffer buf; BufferState(final ByteBuffer buf) { + if (buf == null) throw new IllegalArgumentException(); + this.buf = buf; + } /** @@ -123,34 +125,39 @@ // Implement IDirectBuffer methods public ByteBuffer buffer() { - return buf; + synchronized(this) { + return buf; + } } public void release() throws InterruptedException { release(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } - public void release(long timeout, TimeUnit units) throws InterruptedException { - if (buf != null) { - DirectBufferPool.this.release(buf, timeout, units); - buf = null; - } else { - throw new IllegalStateException("Buffer has already been released"); - } + public void release(long timeout, TimeUnit units) + throws InterruptedException { + + synchronized (this) { + if (buf == null) { + throw new IllegalStateException(); + } + DirectBufferPool.this.release(buf, timeout, units); + buf = null; + } + } protected void finalize() { if (buf != null) { try { + log.error("Buffer release on finalize"); DirectBufferPool.this.release(buf); } catch (InterruptedException e) { // ignore } - log.warn("Buffer released on finalize"); } } - } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java 2011-06-09 15:02:21 UTC (rev 4654) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/IBufferAccess.java 2011-06-09 15:15:11 UTC (rev 4655) @@ -27,11 +27,45 @@ import java.nio.ByteBuffer; import java.util.concurrent.TimeUnit; +/** + * Interface for access to and release of a direct {@link ByteBuffer} managed by + * the {@link DirectBufferPool}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ public interface IBufferAccess { - // return the byte buffer - public ByteBuffer buffer(); - - // release the ByteBuffer, returning to owning pool - public void release() throws InterruptedException; - public void release(long time, TimeUnit unit) throws InterruptedException; + + /** + * Return the direct {@link ByteBuffer}. + * <p> + * <strong>Caution:</strong> DO NOT hold onto a reference to the returned + * {@link ByteBuffer} without also retaining the {@link IBufferAccess} + * object. This can cause the backing {@link ByteBuffer} to be returned to + * the pool, after which it may be handed off to another thread leading to + * data corruption through concurrent modification to the backing bytes! + * + * @throws IllegalStateException + * if the buffer has been released. + */ + public ByteBuffer buffer(); + + /** + * Release the {@link ByteBuffer}, returning to owning pool. + * + * @throws IllegalStateException + * if the buffer has been released. + */ + public void release() throws InterruptedException; + + /** + * Release the {@link ByteBuffer}, returning to owning pool. + * + * @param time + * @param unit + * @throws IllegalStateException + * if the buffer has been released. + */ + public void release(long time, TimeUnit unit) throws InterruptedException; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java 2011-06-09 15:02:21 UTC (rev 4654) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/TestDirectBufferPoolAllocator.java 2011-06-09 15:15:11 UTC (rev 4655) @@ -56,8 +56,18 @@ super(name); } - final DirectBufferPool pool = DirectBufferPool.INSTANCE; + private final DirectBufferPool pool = DirectBufferPool.INSTANCE; + @Override + protected void tearDown() throws Exception { + + // Verify that all allocated buffers were released. + DirectBufferPoolTestHelper.checkBufferPools(this); + + super.tearDown(); + + } + /** * Opens and closes the allocator. */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-09 16:50:55
|
Revision: 4659 http://bigdata.svn.sourceforge.net/bigdata/?rev=4659&view=rev Author: thompsonbry Date: 2011-06-09 16:50:47 +0000 (Thu, 09 Jun 2011) Log Message: ----------- Working w/ Martyn on finalization logic and release of buffers to the direct buffer pool. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestReceiveBuffer.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 16:05:46 UTC (rev 4658) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2011-06-09 16:50:47 UTC (rev 4659) @@ -160,28 +160,45 @@ } - protected void finalize() { - if (buf != null) { - try { - if (DEBUG) { - /* - * Note: This code path WILL NOT return the buffer to - * the pool. This is deliberate. When DEBUG is true we - * do not permit a buffer which was not correctly - * release to be reused. - */ - log.error( - "Buffer release on finalize: AllocationStack", - allocationStack); - } else { - log.error("Buffer release on finalize."); - DirectBufferPool.this.release(buf); - } - } catch (InterruptedException e) { - // ignore - } - } - } + protected void finalize() throws Throwable { + /* + * Ultra paranoid block designed to ensure that we do not double + * release the ByteBuffer to the owning pool via the action of + * another finalized. + */ + final ByteBuffer buf; + synchronized(this) { + buf = this.buf; + this.buf = null; + } + if (buf == null) + return; + if (DEBUG) { + /* + * Note: This code path WILL NOT return the buffer to the pool. + * This is deliberate. When DEBUG is true we do not permit a + * buffer which was not correctly release to be reused. + * + * Note: A common cause of this is that the caller is holding + * onto the acquired ByteBuffer object rather than the + * IBufferAccess object. This permits the IBufferAccess + * reference to be finalized. When the IBufferAccess object is + * finalized, it will attempt to release the buffer (except in + * DEBUG mode). However, if the caller is holding onto the + * ByteBuffer then this is an error which can rapidly lead to + * corrupt data through concurrent modification (what happens is + * that the ByteBuffer is handed out to another thread in + * response to another acquire() and we now have two threads + * using the same ByteBuffer, each of which believes that they + * "own" the reference). + */ + log.error("Buffer release on finalize: AllocationStack", + allocationStack); + } else { + log.error("Buffer release on finalize."); + DirectBufferPool.this.release(buf); + } + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2011-06-09 16:05:46 UTC (rev 4658) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2011-06-09 16:50:47 UTC (rev 4659) @@ -236,7 +236,7 @@ /** * Optional {@link WriteCache}. */ - private WriteCache writeCache = null; + final private WriteCache writeCache; /** * The next offset at which data in the {@link #writeCache} will be written Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java 2011-06-09 16:05:46 UTC (rev 4658) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/journal/TemporaryStoreFactory.java 2011-06-09 16:50:47 UTC (rev 4659) @@ -40,6 +40,7 @@ import com.bigdata.cache.ConcurrentWeakValueCache; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.WormAddressManager; +import com.bigdata.util.InnerCause; /** * Helper class for {@link IIndexStore#getTempStore()}. This class is very light @@ -302,6 +303,8 @@ */ synchronized public void closeAll() { + boolean interrupted = false; + final Iterator<Map.Entry<UUID,WeakReference<TemporaryStore>>> itr = stores.entryIterator(); while(itr.hasNext()) { @@ -318,14 +321,24 @@ } // close the temporary store (it will be deleted synchronously). - if(store.isOpen()) { + if (store.isOpen()) { - store.close(); - + try { + store.close(); + } catch (Throwable t) { + if (InnerCause.isInnerCause(t, InterruptedException.class)) + interrupted = true; + } + } - + } + if (interrupted) { + // Propagate the interrupt. + Thread.currentThread().interrupt(); + } + } - + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestReceiveBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestReceiveBuffer.java 2011-06-09 16:05:46 UTC (rev 4658) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/service/TestReceiveBuffer.java 2011-06-09 16:50:47 UTC (rev 4659) @@ -85,11 +85,13 @@ final UUID allowedUUID = UUID.randomUUID(); - final IBufferAccess allowedBufferdb = DirectBufferPool.INSTANCE.acquire(1, - TimeUnit.SECONDS); - final ByteBuffer allowedBuffer = allowedBufferdb.buffer(); + final IBufferAccess allowedBufferdb = DirectBufferPool.INSTANCE + .acquire(); + try { + final ByteBuffer allowedBuffer = allowedBufferdb.buffer(); + // populate with some random data. fillBufferWithRandomData(allowedBuffer); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2011-06-13 13:07:36
|
Revision: 4698 http://bigdata.svn.sourceforge.net/bigdata/?rev=4698&view=rev Author: martyncutcher Date: 2011-06-13 13:07:25 +0000 (Mon, 13 Jun 2011) Log Message: ----------- Correct TestWriteCache to correctly process distinct IBufferAccess objects that duplicate data rather than mock up WriteCache objects with directly shared ByteBuffers. Also fix restoreRecordMap to ensure buffer limit is correctly set before processing Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-10 23:20:14 UTC (rev 4697) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2011-06-13 13:07:25 UTC (rev 4698) @@ -1761,10 +1761,16 @@ */ public void resetRecordMapFromBuffer(final ByteBuffer buf, final Map<Long, RecordMetadata> recordMap) { recordMap.clear(); + final int sp = buf.position(); + int pos = 0; + buf.limit(sp); while (pos < buf.limit()) { buf.position(pos); long addr = buf.getLong(); + if (addr == 0L) { // end of content + break; + } int sze = buf.getInt(); if (sze == 0 /* deleted */) { recordMap.remove(addr); // should only happen if previous write already made to the buffer @@ -1775,7 +1781,7 @@ pos += 12 + sze; // hop over buffer info (addr + sze) and then // data } - } + } } @@ -2244,7 +2250,7 @@ writeLock.lockInterruptibly(); try { - resetRecordMapFromBuffer(buf.get().buffer(), recordMap); + resetRecordMapFromBuffer(buf.get().buffer().duplicate(), recordMap); } finally { writeLock.unlock(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-06-10 23:20:14 UTC (rev 4697) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2011-06-13 13:07:25 UTC (rev 4698) @@ -928,31 +928,33 @@ int chk2 = ChecksumUtility.threadChk.get().checksum(data2, 0/* offset */, data2.limit()); WriteCache cache1 = new WriteCache.FileChannelScatteredWriteCache(buf, true, true, false, opener, null); - WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache(buf2, true, true, + WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache(buf, true, true, false, opener, null); // write first data buffer cache1.write(addr1, data1, chk1); data1.flip(); - buf2.buffer().limit(buf.buffer().position()); - buf2.buffer().position(0); + syncBuffers(buf, buf2); cache2.resetRecordMapFromBuffer(); + assertEquals(cache1.read(addr1), data1); assertEquals(cache2.read(addr1), data1); // now simulate removal/delete cache1.clearAddrMap(addr1); - buf2.buffer().limit(buf.buffer().position()); - buf2.buffer().position(0); + + syncBuffers(buf, buf2); + cache2.resetRecordMapFromBuffer(); + + assertTrue(cache1.read(addr1) == null); assertTrue(cache2.read(addr1) == null); - assertTrue(cache1.read(addr1) == null); // now write second data buffer cache1.write(addr1, data2, chk2); data2.flip(); - buf2.buffer().limit(buf.buffer().position()); - buf2.buffer().position(0); + // buf2.buffer().limit(buf.buffer().position()); + syncBuffers(buf, buf2); cache2.resetRecordMapFromBuffer(); assertEquals(cache2.read(addr1), data2); assertEquals(cache1.read(addr1), data2); @@ -964,6 +966,23 @@ opener.destroy(); } } + + // ensure dst buffer is copy of src + private void syncBuffers(final IBufferAccess src, final IBufferAccess dst) { + final ByteBuffer sb = src.buffer(); + final ByteBuffer db = dst.buffer(); + int sp = sb.position(); + int sl = sb.limit(); + sb.position(0); + db.position(0); + sb.limit(sp); + db.limit(sp); + db.put(sb); + sb.position(sp); + db.position(sp); + sb.limit(sl); + db.limit(sl); + } /* * Now generate randomviews, first an ordered view of 10000 random lengths This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-13 15:03:50
|
Revision: 4699 http://bigdata.svn.sourceforge.net/bigdata/?rev=4699&view=rev Author: thompsonbry Date: 2011-06-13 15:03:43 +0000 (Mon, 13 Jun 2011) Log Message: ----------- - Bug fix to TokenBuffer. It was failing to normalize the last document indexed in each batch with the result that the local term weights for that document would be zero such that the document could not be discovered by search. In practical terms, this would be the last literal in each incremental batch for a bulk data load. The batch size is normally 10k, 100k, or 10M depending on the application. This was showing up as a test failure for TestPrefixSearch#test_prefixSearch() - Fix unit test (TestSearchRestartSafe). The test was written to the old Hiterator, which had all the results and would report the #of results, but would only visit those results whose cosine was in the allowed range. The new Hiterator return only reports the results which are in the permitted min/max rank and cosine range. - Deprecated methods on FullTextIndex which are only used by the test suite. All of these changes are already captured in the TERMS branch. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TokenBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestSearchRestartSafe.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-06-13 13:07:25 UTC (rev 4698) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2011-06-13 15:03:43 UTC (rev 4699) @@ -894,14 +894,14 @@ * @return A {@link Iterator} which may be used to traverse the search * results in order of decreasing relevance to the query. * - * @see Options#INDEXER_TIMEOUT + * @deprecated Only used by the test suite. */ public Hiterator search(final String query, final String languageCode) { return search(query, languageCode, false/* prefixMatch */); } - + /** @deprecated Only used by the test suite. */ public Hiterator search(final String query, final String languageCode, final boolean prefixMatch) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TokenBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TokenBuffer.java 2011-06-13 13:07:25 UTC (rev 4698) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/search/TokenBuffer.java 2011-06-13 15:03:43 UTC (rev 4699) @@ -280,6 +280,9 @@ log.info("count=" + count + ", ndocs=" + ndocs + ", nfields=" + nfields + ", nterms=" + nterms); + // Normalize the last document/field in the buffer + buffer[count - 1].normalize(); + /* * Generate keys[] and vals[]. */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestSearchRestartSafe.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestSearchRestartSafe.java 2011-06-13 13:07:25 UTC (rev 4698) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/search/TestSearchRestartSafe.java 2011-06-13 15:03:43 UTC (rev 4699) @@ -130,7 +130,7 @@ final Hiterator<?> itr = ndx.search(text, languageCode); - assertEquals(2, itr.size()); + assertEquals(1, itr.size());// Note: 2nd result pruned by cosine. assertTrue(itr.hasNext()); @@ -138,9 +138,9 @@ System.err.println("hit1:" + hit1); - /* - * Note: with cosine computation only the first hit is visited. - */ +// /* +// * Note: with cosine computation only the first hit is visited. +// */ assertFalse(itr.hasNext()); @@ -159,7 +159,7 @@ final Hiterator<?> itr = ndx.search(text, languageCode); - assertEquals(2, itr.size()); + assertEquals(1, itr.size()); // Note: 2nd result pruned by cosine. assertTrue(itr.hasNext()); @@ -167,9 +167,9 @@ System.err.println("hit1:" + hit1); - /* - * Note: with cosine computation only the first hit is visited. - */ +// /* +// * Note: with cosine computation only the first hit is visited. +// */ assertFalse(itr.hasNext()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2011-06-22 21:14:09
|
Revision: 4774 http://bigdata.svn.sourceforge.net/bigdata/?rev=4774&view=rev Author: thompsonbry Date: 2011-06-22 21:14:02 +0000 (Wed, 22 Jun 2011) Log Message: ----------- Bug fix for scale-out to the sparse row store's timestamp chooser. It needs to use the IJournal interface rather than casting to AbstractJournal. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/sparse/TimestampChooser.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/sparse/TestAll.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/sparse/TimestampChooser.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/sparse/TimestampChooser.java 2011-06-22 20:02:14 UTC (rev 4773) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/sparse/TimestampChooser.java 2011-06-22 21:14:02 UTC (rev 4774) @@ -31,7 +31,7 @@ import com.bigdata.btree.BTree; import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; -import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IJournal; import com.bigdata.journal.ITimestampService; import com.bigdata.journal.TemporaryRawStore; import com.bigdata.sparse.TPS.TPV; @@ -96,14 +96,13 @@ return MillisecondTimestampFactory.nextMillis(); } - + /* - * The backing store will be some kind of AbstractJournal - either - * a Journal or a ManagedJournal. + * The backing store will be some kind of IJournal - a Journal, a + * ManagedJournal, an IsolatedActionJournal, etc. */ - final AbstractJournal journal = (AbstractJournal) mutableBTree - .getStore(); + final IJournal journal = (IJournal) mutableBTree.getStore(); /* * This will be locally unique for a Journal and federation-wide Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/sparse/TestAll.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/sparse/TestAll.java 2011-06-22 20:02:14 UTC (rev 4773) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/sparse/TestAll.java 2011-06-22 21:14:02 UTC (rev 4774) @@ -68,7 +68,7 @@ public static Test suite() { - TestSuite suite = new TestSuite("Sparse Row Store"); + final TestSuite suite = new TestSuite("Sparse Row Store"); // value encoding and decoding. suite.addTestSuite(TestValueType.class); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |