From: <tho...@us...> - 2011-05-11 14:26:41
|
Revision: 4482 http://bigdata.svn.sourceforge.net/bigdata/?rev=4482&view=rev Author: thompsonbry Date: 2011-05-11 14:26:34 +0000 (Wed, 11 May 2011) Log Message: ----------- Bug fix to the update of the pointers in the parent directory page's buddy hash table when a child is split. Update to the worksheet for this unit test of the htree split rule. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/htree.xls branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/architecture/htree.xls =================================================================== (Binary files differ) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-11 03:20:08 UTC (rev 4481) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/HTree.java 2011-05-11 14:26:34 UTC (rev 4482) @@ -602,11 +602,15 @@ if (oldBucket.parent != parent.self) // must be same Reference. throw new IllegalStateException(); + if (log.isDebugEnabled()) + log.debug("parent=" + parent.toShortString() + ", buddyOffset=" + + buddyOffset + ", child=" + oldBucket); + final int oldDepth = oldBucket.globalDepth; final int newDepth = oldDepth + 1; // Allocate a new bucket page (globalDepth is increased by one). - final BucketPage newBucket = new BucketPage(this, oldBucket.globalDepth + 1); + final BucketPage newBucket = new BucketPage(this, newDepth); assert newBucket.isDirty(); @@ -614,201 +618,256 @@ newBucket.parent = (Reference) parent.self; // Increase global depth on the old page also. - oldBucket.globalDepth++; + oldBucket.globalDepth = newDepth; nleaves++; // One more bucket page in the hash tree. - // #of slots on the bucket page (invariant given addressBits). - final int slotsOnPage = (1 << addressBits); - /* * Update pointers in buddy hash table in the parent. * - * Note: The upper 1/2 of the pointers in the buddy hash table in the - * parent are rewritten to point to the new child. + * There will be [npointers] slots in the appropriate buddy hash table + * in the parent directory which point to the old bucket page. The upper + * 1/2 of those pointers will be modified to point to the new bucket + * page. The lower 1/2 of the pointers will be unchanged. */ { // #of address slots in the parent buddy hash table. final int slotsPerBuddy = (1 << parent.globalDepth); - // Must be at least two slots per buddy for us to redistribute - // pointers. + // #of pointers in the parent buddy hash table to the old bucket. + final int npointers = 1 << (parent.globalDepth - oldDepth); + + // Must be at least two slots since we will change at least one. assert slotsPerBuddy > 1 : "slotsPerBuddy=" + slotsPerBuddy; + + // Must be at least two pointers since we will change at least one. + assert npointers > 1 : "npointers=" + npointers; - // The first slot in the upper 1/2 of the buddy hash table. - final int firstSlot = buddyOffset + (slotsPerBuddy >> 1); + // The first slot in the buddy hash table in the parent. + final int firstSlot = buddyOffset; - // The last slot in the buddy hash table. + // The last slot in the buddy hash table in the parent. final int lastSlot = buddyOffset + slotsPerBuddy; - + + /* + * Count pointers to the old bucket page. There should be + * [npointers] of them and they should be contiguous. + * + * Note: We can test References here rather than comparing addresses + * because we know that the parent and the old bucket are both + * mutable. This means that their childRef is defined and their + * storage address is NULL. + * + * TODO This logic should be in DirectoryPage#dump() + */ + int firstPointer = -1; + int nfound = 0; + boolean discontiguous = false; for (int i = firstSlot; i < lastSlot; i++) { + if (parent.childRefs[i] == oldBucket.self) { + if (firstPointer == -1) + firstPointer = i; + nfound++; + if (((MutableDirectoryPageData) parent.data).childAddr[i] != IRawStore.NULL) { + throw new RuntimeException( + "Child address should be NULL since child is dirty"); + } + } else { + if (firstPointer != -1 && nfound != npointers) { + discontiguous = true; + } + } + } + if (firstPointer == -1) + throw new RuntimeException("No pointers to child"); + if (nfound != npointers) + throw new RuntimeException("Expected " + npointers + + " pointers to child, but found=" + nfound); + if (discontiguous) + throw new RuntimeException( + "Pointers to child are discontiguous in parent's buddy hash table."); + // Update the upper 1/2 of the pointers to the new bucket. + for (int i = firstPointer + (npointers >> 1); i < npointers; i++) { + + if (parent.childRefs[i] != oldBucket.self) + throw new RuntimeException("Does not point to old child."); + // update the references to the new bucket. parent.childRefs[i] = (Reference) newBucket.self; - // clear address since newBucket is dirty. - ((MutableDirectoryPageData) parent.data).childAddr[i] = 0L; - } } - /* - * Redistribute the buddy buckets. - * - * Note: We are not changing the #of buddy buckets, just their size and - * the page on which they are found. Any tuples in a source bucket will - * wind up in the same bucket afterwards, but the page and offset on the - * page of the buddy bucket may have been changed. - * - * TODO So we could proceed backwards, moving the upper half of the - * buddy buckets to the new bucket page first and then spreading out the - * lower half of the source page among the new bucket boundaries on the - * page. Again, we have to move backwards through the buddy buckets on - * the source page to avoid overwrites of data which has not yet been - * copied. - */ - { + // redistribute buddy buckets between old and new pages. + redistributeBuddyBuckets(oldDepth, newDepth, oldBucket, newBucket); - // #of address slots in each old buddy hash bucket. - final int slotsPerOldBuddy = (1 << oldDepth); + // TODO assert invariants? + + } - // #of address slots in each new buddy hash bucket. - final int slotsPerNewBuddy = (1 << newDepth); + /** + * Redistribute the buddy buckets. + * <p> + * Note: We are not changing the #of buddy buckets, just their size and + * the page on which they are found. Any tuples in a source bucket will + * wind up in the same bucket afterwards, but the page and offset on the + * page of the buddy bucket may have been changed. + * <p> + * We proceed backwards, moving the upper half of the buddy buckets to + * the new bucket page first and then spreading out the lower half of + * the source page among the new bucket boundaries on the page. + */ + private void redistributeBuddyBuckets(final int oldDepth, + final int newDepth, final BucketPage oldBucket, + final BucketPage newBucket) { - // #of buddy tables on the old bucket page. - final int oldBuddyCount = (slotsOnPage) / slotsPerOldBuddy; + // #of slots on the bucket page (invariant given addressBits). + final int slotsOnPage = (1 << addressBits); - // #of buddy tables on the bucket pages after the split. - final int newBuddyCount = (slotsOnPage) / slotsPerNewBuddy; + // #of address slots in each old buddy hash bucket. + final int slotsPerOldBuddy = (1 << oldDepth); - final BucketPage srcPage = oldBucket; - final MutableKeyBuffer srcKeys = (MutableKeyBuffer)oldBucket.getKeys(); - final MutableValueBuffer srcVals = (MutableValueBuffer)oldBucket.getValues(); + // #of address slots in each new buddy hash bucket. + final int slotsPerNewBuddy = (1 << newDepth); - /* - * Move top 1/2 of the buddy buckets from the child to the new page. - */ - { + // #of buddy tables on the old bucket page. + final int oldBuddyCount = (slotsOnPage) / slotsPerOldBuddy; - // target is the new page. - final BucketPage dstPage = newBucket; - final MutableKeyBuffer dstKeys = (MutableKeyBuffer)dstPage.getKeys(); - final MutableValueBuffer dstVals = (MutableValueBuffer)dstPage.getValues(); - - // index (vs offset) of first buddy in upper half of src page. - final int firstSrcBuddyIndex = (oldBuddyCount >> 1); + // #of buddy tables on the bucket pages after the split. + final int newBuddyCount = (slotsOnPage) / slotsPerNewBuddy; - // exclusive upper bound for index (vs offset) of last buddy in - // upper half of src page. - final int lastSrcBuddyIndex = oldBuddyCount; + final BucketPage srcPage = oldBucket; + final MutableKeyBuffer srcKeys = (MutableKeyBuffer) oldBucket.getKeys(); + final MutableValueBuffer srcVals = (MutableValueBuffer) oldBucket + .getValues(); - // exclusive upper bound for index (vs offset) of last buddy in - // upper half of target page. - final int lastDstBuddyIndex = newBuddyCount; + /* + * Move top 1/2 of the buddy buckets from the child to the new page. + */ + { - // work backwards over buddy buckets to avoid stomping data! - for (int srcBuddyIndex = lastSrcBuddyIndex - 1, dstBuddyIndex = lastDstBuddyIndex - 1; // - srcBuddyIndex >= firstSrcBuddyIndex; // - srcBuddyIndex--, dstBuddyIndex--// - ) { + // target is the new page. + final BucketPage dstPage = newBucket; + final MutableKeyBuffer dstKeys = (MutableKeyBuffer) dstPage + .getKeys(); + final MutableValueBuffer dstVals = (MutableValueBuffer) dstPage + .getValues(); - final int firstSrcSlot = srcBuddyIndex * slotsPerOldBuddy; + // index (vs offset) of first buddy in upper half of src page. + final int firstSrcBuddyIndex = (oldBuddyCount >> 1); - final int lastSrcSlot = (srcBuddyIndex + 1) - * slotsPerOldBuddy; + // exclusive upper bound for index (vs offset) of last buddy in + // upper half of src page. + final int lastSrcBuddyIndex = oldBuddyCount; - final int firstDstSlot = dstBuddyIndex * slotsPerNewBuddy; + // exclusive upper bound for index (vs offset) of last buddy in + // upper half of target page. + final int lastDstBuddyIndex = newBuddyCount; - for (int srcSlot = firstSrcSlot, dstSlot = firstDstSlot; srcSlot < lastSrcSlot; srcSlot++, dstSlot++) { + // work backwards over buddy buckets to avoid stomping data! + for (int srcBuddyIndex = lastSrcBuddyIndex - 1, dstBuddyIndex = lastDstBuddyIndex - 1; // + srcBuddyIndex >= firstSrcBuddyIndex; // + srcBuddyIndex--, dstBuddyIndex--// + ) { - if (log.isTraceEnabled()) - log.trace("moving: page(" + srcPage.toShortString() - + "=>" + dstPage.toShortString() + ")" - + ", buddyIndex(" + srcBuddyIndex + "=>" - + dstBuddyIndex + ")" + ", slot(" + srcSlot - + "=>" + dstSlot + ")"); - - // Move the tuple at that slot TODO move metadata also. - dstKeys.keys[dstSlot] = srcKeys.keys[srcSlot]; - dstVals.values[dstSlot] = srcVals.values[srcSlot]; - srcKeys.keys[srcSlot] = null; - srcVals.values[srcSlot] = null; + final int firstSrcSlot = srcBuddyIndex * slotsPerOldBuddy; - } + final int lastSrcSlot = (srcBuddyIndex + 1) * slotsPerOldBuddy; + final int firstDstSlot = dstBuddyIndex * slotsPerNewBuddy; + + for (int srcSlot = firstSrcSlot, dstSlot = firstDstSlot; srcSlot < lastSrcSlot; srcSlot++, dstSlot++) { + + if (log.isTraceEnabled()) + log.trace("moving: page(" + srcPage.toShortString() + + "=>" + dstPage.toShortString() + ")" + + ", buddyIndex(" + srcBuddyIndex + "=>" + + dstBuddyIndex + ")" + ", slot(" + srcSlot + + "=>" + dstSlot + ")"); + + // Move the tuple at that slot TODO move metadata also. + dstKeys.keys[dstSlot] = srcKeys.keys[srcSlot]; + dstVals.values[dstSlot] = srcVals.values[srcSlot]; + srcKeys.keys[srcSlot] = null; + srcVals.values[srcSlot] = null; + } - + } - /* - * Reposition the bottom 1/2 of the buddy buckets on the old page. - */ - { + } - // target is the old page. - final BucketPage dstPage = oldBucket; - final MutableKeyBuffer dstKeys = (MutableKeyBuffer)dstPage.getKeys(); - final MutableValueBuffer dstVals = (MutableValueBuffer)dstPage.getValues(); - - // index (vs offset) of first buddy in lower half of src page. - final int firstSrcBuddyIndex = 0; + /* + * Reposition the bottom 1/2 of the buddy buckets on the old page. + * + * Again, we have to move backwards through the buddy buckets on the + * source page to avoid overwrites of data which has not yet been + * copied. Also, notice that the buddy bucket at index ZERO does not + * move - it is already in place even though it's size has doubled. + */ + { - // exclusive upper bound for index (vs offset) of last buddy in - // lower half of src page. - final int lastSrcBuddyIndex = (oldBuddyCount >> 1); + // target is the old page. + final BucketPage dstPage = oldBucket; + final MutableKeyBuffer dstKeys = (MutableKeyBuffer) dstPage + .getKeys(); + final MutableValueBuffer dstVals = (MutableValueBuffer) dstPage + .getValues(); - // exclusive upper bound for index (vs offset) of last buddy in - // upper half of target page (which is also the source page). - final int lastDstBuddyIndex = newBuddyCount; + // index (vs offset) of first buddy in lower half of src page. + final int firstSrcBuddyIndex = 0; - /* - * Work backwards over buddy buckets to avoid stomping data! - * - * Note: The slots for first buddy in the lower half of the - * source page DO NOT MOVE. The offset of that buddy in the - * first page remains unchanged. Only the size of the buddy is - * changed (it is doubled, just like all the other buddies on - * the page). - */ - for (int srcBuddyIndex = lastSrcBuddyIndex - 1, dstBuddyIndex = lastDstBuddyIndex - 1; // - srcBuddyIndex > firstSrcBuddyIndex; // - srcBuddyIndex--, dstBuddyIndex--// - ) { + // exclusive upper bound for index (vs offset) of last buddy in + // lower half of src page. + final int lastSrcBuddyIndex = (oldBuddyCount >> 1); - final int firstSrcSlot = srcBuddyIndex * slotsPerOldBuddy; + // exclusive upper bound for index (vs offset) of last buddy in + // upper half of target page (which is also the source page). + final int lastDstBuddyIndex = newBuddyCount; - final int lastSrcSlot = (srcBuddyIndex + 1) - * slotsPerOldBuddy; + /* + * Work backwards over buddy buckets to avoid stomping data! + * + * Note: The slots for first buddy in the lower half of the source + * page DO NOT MOVE. The offset of that buddy in the first page + * remains unchanged. Only the size of the buddy is changed (it is + * doubled, just like all the other buddies on the page). + */ + for (int srcBuddyIndex = lastSrcBuddyIndex - 1, dstBuddyIndex = lastDstBuddyIndex - 1; // + srcBuddyIndex > firstSrcBuddyIndex; // DO NOT move 1st buddy bucket! + srcBuddyIndex--, dstBuddyIndex--// + ) { - final int firstDstSlot = dstBuddyIndex * slotsPerNewBuddy; + final int firstSrcSlot = srcBuddyIndex * slotsPerOldBuddy; - for (int srcSlot = firstSrcSlot, dstSlot = firstDstSlot; srcSlot < lastSrcSlot; srcSlot++, dstSlot++) { + final int lastSrcSlot = (srcBuddyIndex + 1) * slotsPerOldBuddy; - if (log.isTraceEnabled()) - log.trace("moving: page(" + srcPage.toShortString() - + "=>" + dstPage.toShortString() + ")" - + ", buddyIndex(" + srcBuddyIndex + "=>" - + dstBuddyIndex + ")" + ", slot(" + srcSlot - + "=>" + dstSlot + ")"); + final int firstDstSlot = dstBuddyIndex * slotsPerNewBuddy; - // Move the tuple at that slot TODO move metadata also. - dstKeys.keys[dstSlot] = srcKeys.keys[srcSlot]; - dstVals.values[dstSlot] = srcVals.values[srcSlot]; - srcKeys.keys[srcSlot] = null; - srcVals.values[srcSlot] = null; + for (int srcSlot = firstSrcSlot, dstSlot = firstDstSlot; srcSlot < lastSrcSlot; srcSlot++, dstSlot++) { - } + if (log.isTraceEnabled()) + log.trace("moving: page(" + srcPage.toShortString() + + "=>" + dstPage.toShortString() + ")" + + ", buddyIndex(" + srcBuddyIndex + "=>" + + dstBuddyIndex + ")" + ", slot(" + srcSlot + + "=>" + dstSlot + ")"); + // Move the tuple at that slot TODO move metadata also. + dstKeys.keys[dstSlot] = srcKeys.keys[srcSlot]; + dstVals.values[dstSlot] = srcVals.values[srcSlot]; + srcKeys.keys[srcSlot] = null; + srcVals.values[srcSlot] = null; + } - + } } - // TODO assert invariants? - } /** @@ -897,7 +956,7 @@ /** * The size of the address space (in bits) for each buddy hash table on - * a directory page. The legal range is <code>[0:addressBits]</code>. + * a directory page. The legal range is <code>[0:addressBits-1]</code>. * <p> * When the global depth is increased, the hash table requires twice as * many slots on the page. This forces the split of the directory page @@ -907,8 +966,9 @@ * minimum global depth is ZERO (0), at which point the buddy hash table * has a single slot. * <p> - * The global depth of a directory page is just the local depth of the - * directory page in its parent. + * The global depth of a child page is just the local depth of the + * directory page in its parent. The global depth of the child page + * is often called its <em>local depth</em>. * * TODO Since the root directory does not have a parent, its global * depth is recorded in the checkpoint record [actually, the global @@ -1959,6 +2019,20 @@ } + /* + * TODO We should dump each bucket page once. This could be done either + * by dumping each buddy bucket on the page separately or by skipping + * through the directory page until we get to the next bucket page and + * then dumping that. + * + * TODO The directory page validation should include checks on the + * bucket references and addresses. For a given buddy hash table, the + * reference and address should pairs should be consistent if either the + * reference or the address appears in another slot of that table. Also, + * there can not be "gaps" between observations of a reference to a + * given bucket - once you see another bucket reference a previously + * observed reference can not then appear. + */ @Override protected boolean dump(Level level, PrintStream out, int height, boolean recursive, boolean materialize) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-11 03:20:08 UTC (rev 4481) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/htree/TestHTree.java 2011-05-11 14:26:34 UTC (rev 4482) @@ -27,11 +27,13 @@ package com.bigdata.htree; +import junit.framework.TestCase2; + import org.apache.log4j.Level; -import junit.framework.TestCase2; - import com.bigdata.btree.AbstractBTreeTestCase; +import com.bigdata.htree.HTree.BucketPage; +import com.bigdata.htree.HTree.DirectoryPage; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.SimpleMemoryRawStore; @@ -40,6 +42,10 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * TODO Unit test for the code path where the sole buddy bucket on a + * page consists solely of duplicate keys thus forcing the size of the + * bucket page to double rather than splitting the page. */ public class TestHTree extends TestCase2 { @@ -146,13 +152,14 @@ } /** - * Simple test of basic CRUD operations using an address space with only TWO - * (2) bits (insert, contains, remove, etc). + * Test of basic insert, lookup, and split page operations (including when a + * new directory page must be introduced) using an address space with only + * TWO (2) bits. * - * TODO Verify that we can store keys having more than 2 bits (or 4 bits in - * a 4-bit address space) through a deeper hash tree. + * @see bigdata/src/architecture/htree.xls * - * TODO Do a worksheet example for this test case. + * TODO Verify that we can store keys having more than 2 bits (or 4 + * bits in a 4-bit address space) through a deeper hash tree. */ public void test_example_addressBits2_01() { @@ -168,11 +175,22 @@ assertTrue("store", store == htree.getStore()); assertEquals("addressBits", addressBits, htree.getAddressBits()); + final DirectoryPage root = htree.getRoot(); + assertEquals(4, root.childRefs.length); + final BucketPage a = (BucketPage) root.childRefs[0].get(); + assertTrue(a == (BucketPage) root.childRefs[1].get()); + assertTrue(a == (BucketPage) root.childRefs[2].get()); + assertTrue(a == (BucketPage) root.childRefs[3].get()); + assertEquals(2, root.getGlobalDepth());// starts at max. + assertEquals(0, a.getGlobalDepth());// starts at min. + // verify preconditions. assertEquals("nnodes", 1, htree.getNodeCount()); assertEquals("nleaves", 1, htree.getLeafCount()); assertEquals("nentries", 0, htree.getEntryCount()); htree.dump(Level.ALL, System.err, true/* materialize */); + assertEquals(2, root.getGlobalDepth()); + assertEquals(0, a.getGlobalDepth()); assertFalse(htree.contains(new byte[] { 0x01 })); assertFalse(htree.contains(new byte[] { 0x02 })); assertEquals(null,htree.lookupFirst(new byte[] { 0x01 })); @@ -182,12 +200,17 @@ AbstractBTreeTestCase.assertSameIterator(// new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); - // insert a tuple and verify post-conditions. + /* + * 1. Insert a tuple and verify post-conditions. The tuple goes into + * an empty buddy bucket with a capacity of one. + */ htree.insert(new byte[] { 0x01 }, new byte[] { 0x01 }); assertEquals("nnodes", 1, htree.getNodeCount()); assertEquals("nleaves", 1, htree.getLeafCount()); assertEquals("nentries", 1, htree.getEntryCount()); htree.dump(Level.ALL, System.err, true/* materialize */); + assertEquals(2, root.getGlobalDepth()); + assertEquals(0, a.getGlobalDepth()); assertTrue(htree.contains(new byte[] { 0x01 })); assertFalse(htree.contains(new byte[] { 0x02 })); assertEquals(new byte[] { 0x01 }, htree @@ -200,12 +223,13 @@ new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); /* - * Insert a duplicate key. Since the localDepth of the bucket page - * is zero, each buddy bucket on the page can only accept one entry - * and this will force a split of the buddy bucket. That means that - * a new bucket page will be allocated, the pointers in the parent - * will be updated to link in the new buck page, and the buddy - * buckets will be redistributed among the old and new bucket page. + * 2. Insert a duplicate key. Since the localDepth of the bucket + * page is zero, each buddy bucket on the page can only accept one + * entry and this will force a split of the buddy bucket. That means + * that a new bucket page will be allocated, the pointers in the + * parent will be updated to link in the new buck page, and the + * buddy buckets will be redistributed among the old and new bucket + * page. * * Note: We do not know the order of the tuples in the bucket so * lookupFirst() is difficult to test when there are tuples for the @@ -216,6 +240,16 @@ assertEquals("nleaves", 2, htree.getLeafCount()); assertEquals("nentries", 2, htree.getEntryCount()); htree.dump(Level.ALL, System.err, true/* materialize */); + assertTrue(root == htree.getRoot()); + assertEquals(4, root.childRefs.length); + final BucketPage b = (BucketPage) root.childRefs[2].get(); + assertTrue(a == (BucketPage) root.childRefs[0].get()); + assertTrue(a == (BucketPage) root.childRefs[1].get()); + assertTrue(b == (BucketPage) root.childRefs[2].get()); + assertTrue(b == (BucketPage) root.childRefs[3].get()); + assertEquals(2, root.getGlobalDepth()); + assertEquals(1, a.getGlobalDepth());// localDepth has increased. + assertEquals(1, b.getGlobalDepth());// localDepth is same as [a]. assertTrue(htree.contains(new byte[] { 0x01 })); assertFalse(htree.contains(new byte[] { 0x02 })); assertEquals(new byte[] { 0x01 }, htree @@ -227,8 +261,43 @@ htree.lookupAll(new byte[] { 0x01 })); AbstractBTreeTestCase.assertSameIterator(// new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); + + /* + * 3. Insert another duplicate key. This forces another split. + * + * TODO Could check the #of entries on a bucket page and even in + * each bucket of the bucket pages. + */ + htree.insert(new byte[] { 0x01 }, new byte[] { 0x01 }); + assertEquals("nnodes", 1, htree.getNodeCount()); + assertEquals("nleaves", 3, htree.getLeafCount()); + assertEquals("nentries", 3, htree.getEntryCount()); + htree.dump(Level.ALL, System.err, true/* materialize */); + assertTrue(root == htree.getRoot()); + assertEquals(4, root.childRefs.length); + final BucketPage c = (BucketPage) root.childRefs[1].get(); + assertTrue(a == (BucketPage) root.childRefs[0].get()); + assertTrue(c == (BucketPage) root.childRefs[1].get()); + assertTrue(b == (BucketPage) root.childRefs[2].get()); + assertTrue(b == (BucketPage) root.childRefs[3].get()); + assertEquals(2, root.getGlobalDepth()); + assertEquals(2, a.getGlobalDepth());// localDepth has increased. + assertEquals(1, b.getGlobalDepth());// + assertEquals(2, c.getGlobalDepth());// localDepth is same as [a]. + assertTrue(htree.contains(new byte[] { 0x01 })); + assertFalse(htree.contains(new byte[] { 0x02 })); + assertEquals(new byte[] { 0x01 }, htree + .lookupFirst(new byte[] { 0x01 })); + assertNull(htree.lookupFirst(new byte[] { 0x02 })); + AbstractBTreeTestCase.assertSameIterator( + // + new byte[][] { new byte[] { 0x01 }, new byte[] { 0x01 }, + new byte[] { 0x01 } }, htree + .lookupAll(new byte[] { 0x01 })); + AbstractBTreeTestCase.assertSameIterator(// + new byte[][] {}, htree.lookupAll(new byte[] { 0x02 })); - // TODO REMOVE. + // TODO REMOVE (or test suite for remove). // TODO Continue progression here? @@ -245,7 +314,7 @@ * * @see bigdata/src/architecture/htree.xls */ - public void test_example_addressBits4_01() { + public void test_example_addressBits2_02() { fail("write test"); @@ -256,7 +325,7 @@ * or directory page because nothing has been inserted into that part of * the address space. */ - public void test_example_addressBits4_elidedPages() { + public void test_example_addressBits2_elidedPages() { fail("write test"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |