From: <tho...@us...> - 2012-02-06 15:06:22
|
Revision: 5965 http://bigdata.svn.sourceforge.net/bigdata/?rev=5965&view=rev Author: thompsonbry Date: 2012-02-06 15:06:13 +0000 (Mon, 06 Feb 2012) Log Message: ----------- Added txLog @ INFO for AbstractJournal open and close events. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-04 15:20:09 UTC (rev 5964) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-06 15:06:13 UTC (rev 5965) @@ -1083,7 +1083,12 @@ // report event. ResourceManager.openJournal(getFile() == null ? null : getFile().toString(), size(), getBufferStrategy() .getBufferMode()); - + + if (txLog.isInfoEnabled()) + txLog.info("OPEN-JOURNAL: uuid=" + getUUID() + ", file=" + + getFile() + ", bufferMode=" + + getBufferStrategy().getBufferMode()); + } finally { lock.unlock(); @@ -1344,8 +1349,8 @@ assertOpen(); - if (log.isInfoEnabled()) - log.info("file=" + getFile()); +// if (log.isInfoEnabled()) +// log.info("file=" + getFile()); _bufferStrategy.close(); @@ -1356,6 +1361,10 @@ // report event. ResourceManager.closeJournal(getFile() == null ? null : getFile().toString()); + if (txLog.isInfoEnabled()) + txLog.info("CLOSE-JOURNAL: uuid=" + getUUID() + ", file=" + + getFile()); + if (LRUNexus.INSTANCE != null) { try { Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-04 15:20:09 UTC (rev 5964) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-06 15:06:13 UTC (rev 5965) @@ -1084,6 +1084,11 @@ ResourceManager.openJournal(getFile() == null ? null : getFile().toString(), size(), getBufferStrategy() .getBufferMode()); + if (txLog.isInfoEnabled()) + txLog.info("OPEN-JOURNAL: uuid=" + getUUID() + ", file=" + + getFile() + ", bufferMode=" + + getBufferStrategy().getBufferMode()); + } finally { lock.unlock(); @@ -1344,8 +1349,8 @@ assertOpen(); - if (log.isInfoEnabled()) - log.info("file=" + getFile()); +// if (log.isInfoEnabled()) +// log.info("file=" + getFile()); _bufferStrategy.close(); @@ -1356,6 +1361,10 @@ // report event. ResourceManager.closeJournal(getFile() == null ? null : getFile().toString()); + if (txLog.isInfoEnabled()) + txLog.info("CLOSE-JOURNAL: uuid=" + getUUID() + ", file=" + + getFile()); + if (LRUNexus.INSTANCE != null) { try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-07 15:20:04
|
Revision: 5974 http://bigdata.svn.sourceforge.net/bigdata/?rev=5974&view=rev Author: thompsonbry Date: 2012-02-07 15:19:53 +0000 (Tue, 07 Feb 2012) Log Message: ----------- Bug fix to NOT recycle the root node or leaf if it is unchanged from the last commit point. https://sourceforge.net/apps/trac/bigdata/ticket/473 (PhysicalAddressResolutionException after reopen using RWStore and recycler) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 14:34:58 UTC (rev 5973) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 15:19:53 UTC (rev 5974) @@ -963,18 +963,9 @@ /* * The bloom filter is enabled, is loaded and is dirty, so write * it on the store now. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ - -// /* -// * TODO The code to recycle the old checkpoint addr, the old -// * root addr, and the old bloom filter has been disabled in -// * writeCheckpoint2 and AbstractBTree#insert pending the -// * resolution of ticket #440. This is being done to minimize -// * the likelyhood that the underlying bug for that ticket -// * can be tripped by the code. -// * -// * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 -// */ recycle(filter.getAddr()); filter.write(store); @@ -997,20 +988,23 @@ } /* - * TODO The code to recycle the old checkpoint addr, the old - * root addr, and the old bloom filter has been disabled in - * writeCheckpoint2 and AbstractBTree#insert pending the - * resolution of ticket #440. This is being done to minimize - * the likelyhood that the underlying bug for that ticket - * can be tripped by the code. + * Recycle the old checkpoint record. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ - // delete old checkpoint data recycle(checkpoint != null ? checkpoint.addrCheckpoint : IRawStore.NULL); - // delete old root data if changed - recycle(checkpoint != null ? checkpoint.getRootAddr() : IRawStore.NULL); + /* + * Delete old root data iff changed. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/473 + * (PhysicalAddressResolutionException after reopen using RWStore and + * recycler) + */ + if (checkpoint != null && getRoot() != null + && checkpoint.getRootAddr() != getRoot().identity) { + recycle(checkpoint != null ? checkpoint.getRootAddr() : IRawStore.NULL); + } // create new checkpoint record. checkpoint = metadata.newCheckpoint(this); Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 14:34:58 UTC (rev 5973) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 15:19:53 UTC (rev 5974) @@ -963,18 +963,9 @@ /* * The bloom filter is enabled, is loaded and is dirty, so write * it on the store now. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ - -// /* -// * TODO The code to recycle the old checkpoint addr, the old -// * root addr, and the old bloom filter has been disabled in -// * writeCheckpoint2 and AbstractBTree#insert pending the -// * resolution of ticket #440. This is being done to minimize -// * the likelyhood that the underlying bug for that ticket -// * can be tripped by the code. -// * -// * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 -// */ recycle(filter.getAddr()); filter.write(store); @@ -997,20 +988,23 @@ } /* - * TODO The code to recycle the old checkpoint addr, the old - * root addr, and the old bloom filter has been disabled in - * writeCheckpoint2 and AbstractBTree#insert pending the - * resolution of ticket #440. This is being done to minimize - * the likelyhood that the underlying bug for that ticket - * can be tripped by the code. + * Recycle the old checkpoint record. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ - // delete old checkpoint data recycle(checkpoint != null ? checkpoint.addrCheckpoint : IRawStore.NULL); - // delete old root data if changed - recycle(checkpoint != null ? checkpoint.getRootAddr() : IRawStore.NULL); + /* + * Delete old root data iff changed. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/473 + * (PhysicalAddressResolutionException after reopen using RWStore and + * recycler) + */ + if (checkpoint != null && getRoot() != null + && checkpoint.getRootAddr() != getRoot().identity) { + recycle(checkpoint != null ? checkpoint.getRootAddr() : IRawStore.NULL); + } // create new checkpoint record. checkpoint = metadata.newCheckpoint(this); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-07 16:56:37
|
Revision: 5977 http://bigdata.svn.sourceforge.net/bigdata/?rev=5977&view=rev Author: thompsonbry Date: 2012-02-07 16:56:25 +0000 (Tue, 07 Feb 2012) Log Message: ----------- Enables IndexMetadata recycling (this code path is nearly never used as only scale-out tends to update the IndexMetadata object after the index has been created). Adds TestBTreeRecycle with a test suite focused on the behavior of writeCheckpoint2(). This covers Checkpoint, root addr, IndexMetadata, and bloom filter recycling. There are two versions of the test. One with and one without a bloom filter. The test suite does not examine how nodes and leaves are recycled in a large index, just an index having a single root leaf. @see http://sourceforge.net/apps/trac/bigdata/ticket/473 (PhysicalAddressResolutionException after reopen using RWStore and recycler) @see http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 15:36:26 UTC (rev 5976) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -974,14 +974,19 @@ } - // TODO: Ensure indexMetadata is recycled - if (metadata.getMetadataAddr() == 0L) { /* * The index metadata has been modified so we write out a new * metadata record on the store. */ + + if (checkpoint != null) { + + // Recycle the old IndexMetadata record (if any). + recycle(checkpoint.getMetadataAddr()); + + } metadata.write(store); Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java 2012-02-07 15:36:26 UTC (rev 5976) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -127,6 +127,8 @@ suite.addTestSuite(TestReopen.class); // test of storing null values under a key with persistence. suite.addTestSuite(TestNullValues.class); + // test recycling of checkpoint, root block, etc. + suite.addTestSuite(TestBTreeRecycle.class); /* * test of transient BTree's (no backing store). Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -0,0 +1,622 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Feb 7, 2012 + */ + +package com.bigdata.btree; + +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.rawstore.IRawStore; +import com.bigdata.rawstore.SimpleMemoryRawStore; + +/** + * Test suite for recycling of B+Tree records. + * <p> + * Note: Due to the pattern by which a {@link BTree} is created, it is always + * loaded from an existing checkpoint. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/473"> + * PhysicalAddressResolutionException after reopen using RWStore and + * recycler</a> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestBTreeRecycle extends TestCase2 { + + /** + * + */ + public TestBTreeRecycle() { + } + + /** + * @param name + */ + public TestBTreeRecycle(String name) { + super(name); + } + +// /** +// * Helper class overrides {@link #recycle(long)} in order to observe +// * recycling events. +// */ +// private static class RecycleListenerBTree extends BTree { +// +// public RecycleListenerBTree(IRawStore store, Checkpoint checkpoint, +// IndexMetadata metadata, boolean readOnly) { +// super(store, checkpoint, metadata, readOnly); +// } +// +// @Override +// protected int recycle(final long addr) { +// return super.recycle(addr); +// } +// +// } + + /** + * Helper class overrides {@link IRawStore#delete(long)} to notice delete + * events. + */ + private static class RawStoreDeleteListener extends SimpleMemoryRawStore + { + + /** + * A set of addresses which SHOULD be deleted. The basic pattern is that + * you add addresses to this collection before the operation which will + * cause those addresses to be recycled. You then do that operation. You + * then verify that the collection is empty. Addresses which are NOT + * found in this collection when a {@link #delete(long)} is observed + * will result in a thrown exception. + */ + private final Set<Long> addrs = new HashSet<Long>(); + + /** + * Add an address which should be deleted to the set of such addresses. + * + * @param addr + * An address which should be deleted. + */ + public void expectDelete(final long addr) { + + if (addr == IRawStore.NULL) + fail("Not allowed to expect a NULL address"); + + if (!addrs.add(addr)) { + + fail("Address already in expectedDelete set: " + addr); + + } + + } + + /** + * Assert that all addresses which should have been deleted were in fact + * deleted. + */ + public void assertDeleteSetEmpty() { + + if (!addrs.isEmpty()) + fail("expectedDeleteAddrs is not empty: " + addrs); + + } + + @Override + public void delete(final long addr) { + + if(addr == IRawStore.NULL) { + + fail("Not allowed to delete a NULL address"); + + } + + if (!addrs.remove(addr)) { + + fail("Not expecting delete: addr=" + addr); + + } + + super.delete(addr); + + } + + } + + /** + * Unit test examines case of a {@link BTree} without a bloom filter. + */ + public void test_writeCheckpoint_btree() { + + final byte[] key0 = new byte[] { 1, 2, 3 }; + final byte[] val0 = new byte[] { 1, 2, 3 }; + + Checkpoint lastCheckpoint = null; + + final RawStoreDeleteListener store = new RawStoreDeleteListener(); + + try { + + final BTree btree; + { + + final IndexMetadata md = new IndexMetadata(getName(), + UUID.randomUUID()); + + md.setBranchingFactor(3); + + btree = BTree.create(store, md); + + } + + // Get the current checkpoint record. + lastCheckpoint = btree.getCheckpoint(); + + /* + * Initial checkpoint required because the btree root did not exist + * when we loaded the B+Tree from the disk. Therefore it is dirty + * and will be written out now. + */ + { + + // BTree is dirty. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Attempting to write a checkpoint on a clean BTree should return + * the old checkpoint reference. + */ + { + + final Checkpoint checkpoint3 = btree.writeCheckpoint2(); + + assertTrue(checkpoint3 == lastCheckpoint); + + } + + /* + * Make the counter dirty. Verify that the BTree needs a checkpoint + * and verify that the checkpoint recycles only the correct records. + */ + { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + // Make the counter dirty. + btree.getCounter().incrementAndGet(); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Test where just the root has changed (insert or delete tuple). + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + btree.insert(key0, val0); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Unit test where the {@link IndexMetadata} has changed. + * + * Note: There are not a lot of ways in which you are allowed to + * change the IndexMetadata once the index has been created. This + * picks one of them. + */ + if(true) { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + final IndexMetadata md = btree.getIndexMetadata().clone(); + + md.setIndexSegmentBranchingFactor(40); + + btree.setIndexMetadata(md); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be deleted. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // IndexMetadata record should be deleted. + store.expectDelete(lastCheckpoint.getMetadataAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // IndexMetadata has new address. + assertNotSame(newCheckpoint.getMetadataAddr(), + lastCheckpoint.getMetadataAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + } finally { + + store.destroy(); + + } + + } + + /** + * Unit test examines case of a {@link BTree} with a bloom filter. + */ + public void test_writeCheckpoint_btree_bloomFilter() { + + final byte[] key0 = new byte[] { 1, 2, 3 }; + final byte[] key1 = new byte[] { 1, 2, 3, 4 }; + final byte[] val0 = new byte[] { 1, 2, 3 }; + final byte[] val1 = new byte[] { 1, 2, 3, 4 }; + + Checkpoint lastCheckpoint = null; + + final RawStoreDeleteListener store = new RawStoreDeleteListener(); + + try { + + final BTree btree; + { + + final IndexMetadata md = new IndexMetadata(getName(), + UUID.randomUUID()); + + md.setBranchingFactor(3); + + // enable bloom filter for this version of the test. + md.setBloomFilterFactory(BloomFilterFactory.DEFAULT); + + btree = BTree.create(store, md); + + } + + // Get the current checkpoint record. + lastCheckpoint = btree.getCheckpoint(); + + /* + * Initial checkpoint required because the btree root did not exist + * when we loaded the B+Tree from the disk. Therefore it is dirty + * and will be written out now. + */ + { + + // BTree is dirty. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Attempting to write a checkpoint on a clean BTree should return + * the old checkpoint reference. + */ + { + + final Checkpoint checkpoint3 = btree.writeCheckpoint2(); + + assertTrue(checkpoint3 == lastCheckpoint); + + } + + /* + * Make the counter dirty. Verify that the BTree needs a checkpoint + * and verify that the checkpoint recycles only the correct records. + */ + { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + // Make the counter dirty. + btree.getCounter().incrementAndGet(); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Test where just the root has changed (insert or delete tuple). + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + btree.insert(key0, val0); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // The bloom filter is NULL until written on. + assertEquals(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + // Bloom filter is non-null + assertNotSame(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + } + + /* + * Test where just the root has changed again (insert or delete tuple). + * + * Note: This time the bloom filter address in the checkpoint is non-null. + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + // Bloom filter is non-null + assertNotSame(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + btree.insert(key1, val1); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // Should recycle the old bloom filter. + store.expectDelete(lastCheckpoint.getBloomFilterAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Bloom filter has new address. + assertNotSame(newCheckpoint.getBloomFilterAddr(), + lastCheckpoint.getBloomFilterAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Unit test where the {@link IndexMetadata} has changed. + * + * Note: There are not a lot of ways in which you are allowed to + * change the IndexMetadata once the index has been created. This + * picks one of them. + */ + if(true) { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + final IndexMetadata md = btree.getIndexMetadata().clone(); + + md.setIndexSegmentBranchingFactor(40); + + btree.setIndexMetadata(md); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be deleted. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // IndexMetadata record should be deleted. + store.expectDelete(lastCheckpoint.getMetadataAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // IndexMetadata has new address. + assertNotSame(newCheckpoint.getMetadataAddr(), + lastCheckpoint.getMetadataAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + } finally { + + store.destroy(); + + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 15:36:26 UTC (rev 5976) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/BTree.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -973,20 +973,25 @@ } } - - // TODO: Ensure indexMetadata is recycled - + if (metadata.getMetadataAddr() == 0L) { /* * The index metadata has been modified so we write out a new * metadata record on the store. */ + + if (checkpoint != null) { + + // Recycle the old IndexMetadata record (if any). + recycle(checkpoint.getMetadataAddr()); + + } metadata.write(store); } - + /* * Recycle the old checkpoint record. * Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java 2012-02-07 15:36:26 UTC (rev 5976) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestAll_BTreeBasics.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -127,6 +127,8 @@ suite.addTestSuite(TestReopen.class); // test of storing null values under a key with persistence. suite.addTestSuite(TestNullValues.class); + // test recycling of checkpoint, root block, etc. + suite.addTestSuite(TestBTreeRecycle.class); /* * test of transient BTree's (no backing store). Added: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java (rev 0) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java 2012-02-07 16:56:25 UTC (rev 5977) @@ -0,0 +1,622 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Feb 7, 2012 + */ + +package com.bigdata.btree; + +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.rawstore.IRawStore; +import com.bigdata.rawstore.SimpleMemoryRawStore; + +/** + * Test suite for recycling of B+Tree records. + * <p> + * Note: Due to the pattern by which a {@link BTree} is created, it is always + * loaded from an existing checkpoint. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/473"> + * PhysicalAddressResolutionException after reopen using RWStore and + * recycler</a> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestBTreeRecycle extends TestCase2 { + + /** + * + */ + public TestBTreeRecycle() { + } + + /** + * @param name + */ + public TestBTreeRecycle(String name) { + super(name); + } + +// /** +// * Helper class overrides {@link #recycle(long)} in order to observe +// * recycling events. +// */ +// private static class RecycleListenerBTree extends BTree { +// +// public RecycleListenerBTree(IRawStore store, Checkpoint checkpoint, +// IndexMetadata metadata, boolean readOnly) { +// super(store, checkpoint, metadata, readOnly); +// } +// +// @Override +// protected int recycle(final long addr) { +// return super.recycle(addr); +// } +// +// } + + /** + * Helper class overrides {@link IRawStore#delete(long)} to notice delete + * events. + */ + private static class RawStoreDeleteListener extends SimpleMemoryRawStore + { + + /** + * A set of addresses which SHOULD be deleted. The basic pattern is that + * you add addresses to this collection before the operation which will + * cause those addresses to be recycled. You then do that operation. You + * then verify that the collection is empty. Addresses which are NOT + * found in this collection when a {@link #delete(long)} is observed + * will result in a thrown exception. + */ + private final Set<Long> addrs = new HashSet<Long>(); + + /** + * Add an address which should be deleted to the set of such addresses. + * + * @param addr + * An address which should be deleted. + */ + public void expectDelete(final long addr) { + + if (addr == IRawStore.NULL) + fail("Not allowed to expect a NULL address"); + + if (!addrs.add(addr)) { + + fail("Address already in expectedDelete set: " + addr); + + } + + } + + /** + * Assert that all addresses which should have been deleted were in fact + * deleted. + */ + public void assertDeleteSetEmpty() { + + if (!addrs.isEmpty()) + fail("expectedDeleteAddrs is not empty: " + addrs); + + } + + @Override + public void delete(final long addr) { + + if(addr == IRawStore.NULL) { + + fail("Not allowed to delete a NULL address"); + + } + + if (!addrs.remove(addr)) { + + fail("Not expecting delete: addr=" + addr); + + } + + super.delete(addr); + + } + + } + + /** + * Unit test examines case of a {@link BTree} without a bloom filter. + */ + public void test_writeCheckpoint_btree() { + + final byte[] key0 = new byte[] { 1, 2, 3 }; + final byte[] val0 = new byte[] { 1, 2, 3 }; + + Checkpoint lastCheckpoint = null; + + final RawStoreDeleteListener store = new RawStoreDeleteListener(); + + try { + + final BTree btree; + { + + final IndexMetadata md = new IndexMetadata(getName(), + UUID.randomUUID()); + + md.setBranchingFactor(3); + + btree = BTree.create(store, md); + + } + + // Get the current checkpoint record. + lastCheckpoint = btree.getCheckpoint(); + + /* + * Initial checkpoint required because the btree root did not exist + * when we loaded the B+Tree from the disk. Therefore it is dirty + * and will be written out now. + */ + { + + // BTree is dirty. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Attempting to write a checkpoint on a clean BTree should return + * the old checkpoint reference. + */ + { + + final Checkpoint checkpoint3 = btree.writeCheckpoint2(); + + assertTrue(checkpoint3 == lastCheckpoint); + + } + + /* + * Make the counter dirty. Verify that the BTree needs a checkpoint + * and verify that the checkpoint recycles only the correct records. + */ + { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + // Make the counter dirty. + btree.getCounter().incrementAndGet(); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Test where just the root has changed (insert or delete tuple). + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + btree.insert(key0, val0); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Unit test where the {@link IndexMetadata} has changed. + * + * Note: There are not a lot of ways in which you are allowed to + * change the IndexMetadata once the index has been created. This + * picks one of them. + */ + if(true) { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + final IndexMetadata md = btree.getIndexMetadata().clone(); + + md.setIndexSegmentBranchingFactor(40); + + btree.setIndexMetadata(md); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be deleted. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // IndexMetadata record should be deleted. + store.expectDelete(lastCheckpoint.getMetadataAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // IndexMetadata has new address. + assertNotSame(newCheckpoint.getMetadataAddr(), + lastCheckpoint.getMetadataAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + } finally { + + store.destroy(); + + } + + } + + /** + * Unit test examines case of a {@link BTree} with a bloom filter. + */ + public void test_writeCheckpoint_btree_bloomFilter() { + + final byte[] key0 = new byte[] { 1, 2, 3 }; + final byte[] key1 = new byte[] { 1, 2, 3, 4 }; + final byte[] val0 = new byte[] { 1, 2, 3 }; + final byte[] val1 = new byte[] { 1, 2, 3, 4 }; + + Checkpoint lastCheckpoint = null; + + final RawStoreDeleteListener store = new RawStoreDeleteListener(); + + try { + + final BTree btree; + { + + final IndexMetadata md = new IndexMetadata(getName(), + UUID.randomUUID()); + + md.setBranchingFactor(3); + + // enable bloom filter for this version of the test. + md.setBloomFilterFactory(BloomFilterFactory.DEFAULT); + + btree = BTree.create(store, md); + + } + + // Get the current checkpoint record. + lastCheckpoint = btree.getCheckpoint(); + + /* + * Initial checkpoint required because the btree root did not exist + * when we loaded the B+Tree from the disk. Therefore it is dirty + * and will be written out now. + */ + { + + // BTree is dirty. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Attempting to write a checkpoint on a clean BTree should return + * the old checkpoint reference. + */ + { + + final Checkpoint checkpoint3 = btree.writeCheckpoint2(); + + assertTrue(checkpoint3 == lastCheckpoint); + + } + + /* + * Make the counter dirty. Verify that the BTree needs a checkpoint + * and verify that the checkpoint recycles only the correct records. + */ + { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + // Make the counter dirty. + btree.getCounter().incrementAndGet(); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be recycled. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Test where just the root has changed (insert or delete tuple). + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + btree.insert(key0, val0); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // The bloom filter is NULL until written on. + assertEquals(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + // Bloom filter is non-null + assertNotSame(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + } + + /* + * Test where just the root has changed again (insert or delete tuple). + * + * Note: This time the bloom filter address in the checkpoint is non-null. + */ + { + + // Checkpoint is not required. + assertFalse(btree.needsCheckpoint()); + + // Bloom filter is non-null + assertNotSame(IRawStore.NULL, + lastCheckpoint.getBloomFilterAddr()); + + btree.insert(key1, val1); + + // Checkpoint is required. + assertTrue(btree.needsCheckpoint()); + + // Should recycle the old checkpoint record. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // Should recycle the old root node/leaf record. + store.expectDelete(lastCheckpoint.getRootAddr()); + + // Should recycle the old bloom filter. + store.expectDelete(lastCheckpoint.getBloomFilterAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // Bloom filter has new address. + assertNotSame(newCheckpoint.getBloomFilterAddr(), + lastCheckpoint.getBloomFilterAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + /* + * Unit test where the {@link IndexMetadata} has changed. + * + * Note: There are not a lot of ways in which you are allowed to + * change the IndexMetadata once the index has been created. This + * picks one of them. + */ + if(true) { + + // BTree is clean. + assertFalse(btree.needsCheckpoint()); + + final IndexMetadata md = btree.getIndexMetadata().clone(); + + md.setIndexSegmentBranchingFactor(40); + + btree.setIndexMetadata(md); + + // BTree needs checkpoint. + assertTrue(btree.needsCheckpoint()); + + // Checkpoint record should be deleted. + store.expectDelete(lastCheckpoint.getCheckpointAddr()); + + // IndexMetadata record should be deleted. + store.expectDelete(lastCheckpoint.getMetadataAddr()); + + // Checkpoint the index. + final Checkpoint newCheckpoint = btree.writeCheckpoint2(); + + // Everything which should have been deleted was deleted. + store.assertDeleteSetEmpty(); + + // Root has new address. + assertNotSame(newCheckpoint.getRootAddr(), + lastCheckpoint.getRootAddr()); + + // IndexMetadata has new address. + assertNotSame(newCheckpoint.getMetadataAddr(), + lastCheckpoint.getMetadataAddr()); + + // Verify that a new checkpoint was written. + assertTrue(lastCheckpoint != newCheckpoint); + + lastCheckpoint = newCheckpoint; + + // No longer reports that the B+Tree is dirty. + assertFalse(btree.needsCheckpoint()); + + } + + } finally { + + store.destroy(); + + } + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/btree/TestBTreeRecycle.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-08 00:20:49
|
Revision: 5983 http://bigdata.svn.sourceforge.net/bigdata/?rev=5983&view=rev Author: thompsonbry Date: 2012-02-08 00:20:41 +0000 (Wed, 08 Feb 2012) Log Message: ----------- Added release notes for 1.0.6. Bumped version for release. @see https://sourceforge.net/apps/trac/bigdata/ticket/477 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/build.properties Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_6.txt branches/BIGDATA_RELEASE_1_1_0/bigdata/src/releases/RELEASE_1_0_6.txt Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_6.txt =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_6.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_6.txt 2012-02-08 00:20:41 UTC (rev 5983) @@ -0,0 +1,125 @@ +This is a 1.0.x maintenance release of bigdata(R). New users are encouraged to go directly to the 1.1.0 release. Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_6 + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- 100% native SPARQL 1.0 evaluation with lots of query optimizations; +- Fast RDFS+ inference and truth maintenance; +- Fast statement level provenance mode (SIDs). + +Road map [3]: + +- High-volume analytic query and SPARQL 1.1 query, including aggregations; +- SPARQL 1.1 Update, Property Paths, and Federation support; +- Simplified deployment, configuration, and administration for clusters; and +- High availability for the journal and the cluster. + +Change log: + + Note: Versions with (*) require data migration. For details, see [9]. + +1.0.6 + +- http://sourceforge.net/apps/trac/bigdata/ticket/473 (PhysicalAddressResolutionException after reopen using RWStore and recycler) + +1.0.5 + +- http://sourceforge.net/apps/trac/bigdata/ticket/362 (Fix incompatible with log4j - slf4j bridge.) +- http://sourceforge.net/apps/trac/bigdata/ticket/440 (BTree can not be cast to Name2Addr) +- http://sourceforge.net/apps/trac/bigdata/ticket/453 (Releasing blob DeferredFree record) +- http://sourceforge.net/apps/trac/bigdata/ticket/467 (IllegalStateException trying to access lexicon index using RWStore with recycling) + +1.0.4 + +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata, please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/releases/RELEASE_1_0_6.txt ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/BIGDATA_RELEASE_1_0_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/build.properties 2012-02-07 22:10:35 UTC (rev 5982) +++ branches/BIGDATA_RELEASE_1_0_0/build.properties 2012-02-08 00:20:41 UTC (rev 5983) @@ -50,7 +50,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.0.5 +build.ver=1.0.6 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to Added: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/releases/RELEASE_1_0_6.txt =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/releases/RELEASE_1_0_6.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/releases/RELEASE_1_0_6.txt 2012-02-08 00:20:41 UTC (rev 5983) @@ -0,0 +1,125 @@ +This is a 1.0.x maintenance release of bigdata(R). New users are encouraged to go directly to the 1.1.0 release. Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_0_6 + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- 100% native SPARQL 1.0 evaluation with lots of query optimizations; +- Fast RDFS+ inference and truth maintenance; +- Fast statement level provenance mode (SIDs). + +Road map [3]: + +- High-volume analytic query and SPARQL 1.1 query, including aggregations; +- SPARQL 1.1 Update, Property Paths, and Federation support; +- Simplified deployment, configuration, and administration for clusters; and +- High availability for the journal and the cluster. + +Change log: + + Note: Versions with (*) require data migration. For details, see [9]. + +1.0.6 + +- http://sourceforge.net/apps/trac/bigdata/ticket/473 (PhysicalAddressResolutionException after reopen using RWStore and recycler) + +1.0.5 + +- http://sourceforge.net/apps/trac/bigdata/ticket/362 (Fix incompatible with log4j - slf4j bridge.) +- http://sourceforge.net/apps/trac/bigdata/ticket/440 (BTree can not be cast to Name2Addr) +- http://sourceforge.net/apps/trac/bigdata/ticket/453 (Releasing blob DeferredFree record) +- http://sourceforge.net/apps/trac/bigdata/ticket/467 (IllegalStateException trying to access lexicon index using RWStore with recycling) + +1.0.4 + +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata, please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata\xAE is a horizontally-scaled, general purpose storage and computing fabric +for ordered data (B+Trees), designed to operate on either a single server or a +cluster of commodity hardware. Bigdata\xAE uses dynamically partitioned key-range +shards in order to remove any realistic scaling limits - in principle, bigdata\xAE +may be deployed on 10s, 100s, or even thousands of machines and new capacity may +be added incrementally without requiring the full reload of all data. The bigdata\xAE +RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), +and datum level provenance. Property changes on: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/releases/RELEASE_1_0_6.txt ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-10 16:02:09
|
Revision: 6008 http://bigdata.svn.sourceforge.net/bigdata/?rev=6008&view=rev Author: thompsonbry Date: 2012-02-10 16:01:58 +0000 (Fri, 10 Feb 2012) Log Message: ----------- Commit of journal migration utility by Martyn. The utility prunes entries in the commit record index whose deferred deletes have already been released. This utility is necessary when upgrading from bigdata 1.0.4 to 1.0.6 if deferred deletes were enabled for the journal (minReleaseAge > 0). When minReleaseAge is ZERO (0), the database uses session protection mode and the older commit record entries will not cause a problem. This utility might also be necessary when moving from 1.1.0 to 1.1.1. @see https://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) Added Paths: ----------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java Added: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java (rev 0) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java 2012-02-10 16:01:58 UTC (rev 6008) @@ -0,0 +1,107 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Feb 10, 2012 + */ + +package com.bigdata.journal; + +import java.io.File; +import java.util.Properties; + +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.Journal; +import com.bigdata.journal.Options; +import com.bigdata.journal.RWStrategy; +import com.bigdata.rwstore.RWStore; + +/** + * Given an existing journal, ensure that any commitRecords that reference a + * time prior to the last deferred release time are removed. This provides a + * "fix" for opening a bigdata 1.0.4 journal in bigdata 1.0.6. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/480"> Error + * releasing deferred frees using 1.0.6 against a 1.0.4 journal</a> + * + * @author <a href="mailto:mar...@us...">Martyn Cutcher</a> + * @version $Id$ + */ +public class VerifyCommitRecordIndex { + + public static void main(final String[] args) { + if (args.length != 1) { + System.err.println("Usage: <journal filename>"); + + return; + } + + final String fname = args[0]; + final File f = new File(fname); + if (!f.exists()) { + System.err.println("File: " + fname + " not found"); + + return; + } + + final Properties props = new Properties(); + props.setProperty(Options.FILE, fname); + + final Journal jrnl = new Journal(props); + + // Check if journal is DISKRW + if (jrnl.getBufferStrategy().getBufferMode() != BufferMode.DiskRW) { + System.err.println("Buffer mode should be DiskRW not " + jrnl.getBufferStrategy().getBufferMode()); + + return; + } + + final RWStrategy rwstrategy = (RWStrategy) jrnl.getBufferStrategy(); + final RWStore rwstore = rwstrategy.getRWStore(); + + final IIndex commitRecordIndex = jrnl.getReadOnlyCommitRecordIndex(); + if (commitRecordIndex == null) { + System.err.println("Unexpected null commit record index"); + return; + } + + final IndexMetadata metadata = commitRecordIndex + .getIndexMetadata(); + + final byte[] zeroKey = metadata.getTupleSerializer() + .serializeKey(0L); + + final byte[] releaseKey = metadata.getTupleSerializer() + .serializeKey( rwstore.getLastDeferredReleaseTime()); + + final int removed = jrnl.removeCommitRecordEntries(zeroKey, releaseKey); + + System.out.println("Commit Record Index verified with " + removed + " records removed"); + + jrnl.commit(); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java (rev 0) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java 2012-02-10 16:01:58 UTC (rev 6008) @@ -0,0 +1,107 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Feb 10, 2012 + */ + +package com.bigdata.journal; + +import java.io.File; +import java.util.Properties; + +import com.bigdata.btree.IIndex; +import com.bigdata.btree.IndexMetadata; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.Journal; +import com.bigdata.journal.Options; +import com.bigdata.journal.RWStrategy; +import com.bigdata.rwstore.RWStore; + +/** + * Given an existing journal, ensure that any commitRecords that reference a + * time prior to the last deferred release time are removed. This provides a + * "fix" for opening a bigdata 1.0.4 journal in bigdata 1.0.6. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/480"> Error + * releasing deferred frees using 1.0.6 against a 1.0.4 journal</a> + * + * @author <a href="mailto:mar...@us...">Martyn Cutcher</a> + * @version $Id$ + */ +public class VerifyCommitRecordIndex { + + public static void main(final String[] args) { + if (args.length != 1) { + System.err.println("Usage: <journal filename>"); + + return; + } + + final String fname = args[0]; + final File f = new File(fname); + if (!f.exists()) { + System.err.println("File: " + fname + " not found"); + + return; + } + + final Properties props = new Properties(); + props.setProperty(Options.FILE, fname); + + final Journal jrnl = new Journal(props); + + // Check if journal is DISKRW + if (jrnl.getBufferStrategy().getBufferMode() != BufferMode.DiskRW) { + System.err.println("Buffer mode should be DiskRW not " + jrnl.getBufferStrategy().getBufferMode()); + + return; + } + + final RWStrategy rwstrategy = (RWStrategy) jrnl.getBufferStrategy(); + final RWStore rwstore = rwstrategy.getRWStore(); + + final IIndex commitRecordIndex = jrnl.getReadOnlyCommitRecordIndex(); + if (commitRecordIndex == null) { + System.err.println("Unexpected null commit record index"); + return; + } + + final IndexMetadata metadata = commitRecordIndex + .getIndexMetadata(); + + final byte[] zeroKey = metadata.getTupleSerializer() + .serializeKey(0L); + + final byte[] releaseKey = metadata.getTupleSerializer() + .serializeKey( rwstore.getLastDeferredReleaseTime()); + + final int removed = jrnl.removeCommitRecordEntries(zeroKey, releaseKey); + + System.out.println("Commit Record Index verified with " + removed + " records removed"); + + jrnl.commit(); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/VerifyCommitRecordIndex.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-10 16:21:55
|
Revision: 6009 http://bigdata.svn.sourceforge.net/bigdata/?rev=6009&view=rev Author: thompsonbry Date: 2012-02-10 16:21:44 +0000 (Fri, 10 Feb 2012) Log Message: ----------- Modified RWStore to NOT recycle commit records starting at commitTime := 0L. Instead, it starts recycling from lastDeferredReleaseTime+1. This is correct and also handles the case where the commit record index was not pruned when deferred deletes were recycled (bigdata 1.0.4). Commented AbstractJournal#getCommitRecord(long) to indicate that we must continue to check against the lastReleaseTime and refuse to return a commit record from the commit record index which is older than the lastReleaseTime. Both changes were applied to the 1.0.x and 1.1.x maintenance branches. @see https://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-10 16:01:58 UTC (rev 6008) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-10 16:21:44 UTC (rev 6009) @@ -3332,6 +3332,16 @@ public ICommitRecord getCommitRecord(final long commitTime) { if (this._bufferStrategy instanceof RWStrategy) { + /* + * There are some bigdata releases (such as 1.0.4) where the commit + * record index was not pruned when deferred deletes were recycled. + * By maintaining this test, we will correctly refuse to return a + * commit record for a commit point whose deferred deletes have been + * recycled, even when the commit record is still present in the + * commit record index. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/480 + */ if (commitTime <= ((RWStrategy) _bufferStrategy).getLastReleaseTime()) { return null; // no index available } Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-10 16:01:58 UTC (rev 6008) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-10 16:21:44 UTC (rev 6009) @@ -2509,12 +2509,17 @@ /* * Free deferrals. * - * Note: This adds one to the lastDeferredReleaseTime to give - * exclusive lower bound semantics. + * Note: Per ticket#480, we can not begin recycling from the first + * commit point in the commit record index as there are some bigdata + * versions (1.0.4) where we did not prune the commit record index. + * Therefore, this relies on the (lastDeferredReleaseTime+1) for the + * exclusive lower bound. This is avoids triggering an exception + * from an attempt to process deferred free blocks which have + * already been released. * - * FIXME Discuss lower bound again with Martyn. 0L should be Ok. + * @see https://sourceforge.net/apps/trac/bigdata/ticket/480 */ - return freeDeferrals(journal, 0L /*m_lastDeferredReleaseTime + 1*/, + return freeDeferrals(journal, m_lastDeferredReleaseTime + 1, latestReleasableTime); } finally { Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-10 16:01:58 UTC (rev 6008) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2012-02-10 16:21:44 UTC (rev 6009) @@ -3332,6 +3332,16 @@ public ICommitRecord getCommitRecord(final long commitTime) { if (this._bufferStrategy instanceof RWStrategy) { + /* + * There are some bigdata releases (such as 1.0.4) where the commit + * record index was not pruned when deferred deletes were recycled. + * By maintaining this test, we will correctly refuse to return a + * commit record for a commit point whose deferred deletes have been + * recycled, even when the commit record is still present in the + * commit record index. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/480 + */ if (commitTime <= ((RWStrategy) _bufferStrategy).getLastReleaseTime()) { return null; // no index available } Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-10 16:01:58 UTC (rev 6008) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-10 16:21:44 UTC (rev 6009) @@ -2509,12 +2509,17 @@ /* * Free deferrals. * - * Note: This adds one to the lastDeferredReleaseTime to give - * exclusive lower bound semantics. + * Note: Per ticket#480, we can not begin recycling from the first + * commit point in the commit record index as there are some bigdata + * versions (1.0.4) where we did not prune the commit record index. + * Therefore, this relies on the (lastDeferredReleaseTime+1) for the + * exclusive lower bound. This is avoids triggering an exception + * from an attempt to process deferred free blocks which have + * already been released. * - * FIXME Discuss lower bound again with Martyn. 0L should be Ok. + * @see https://sourceforge.net/apps/trac/bigdata/ticket/480 */ - return freeDeferrals(journal, 0L /*m_lastDeferredReleaseTime + 1*/, + return freeDeferrals(journal, m_lastDeferredReleaseTime + 1, latestReleasableTime); } finally { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-02-11 01:05:17
|
Revision: 6011 http://bigdata.svn.sourceforge.net/bigdata/?rev=6011&view=rev Author: thompsonbry Date: 2012-02-11 01:05:10 +0000 (Sat, 11 Feb 2012) Log Message: ----------- Updated the copyright in the banner. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/Banner.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2012-02-10 20:11:20 UTC (rev 6010) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/Banner.java 2012-02-11 01:05:10 UTC (rev 6011) @@ -345,7 +345,7 @@ "\n Affordable"+// "\n Web-Scale Computing for the Enterprise"+// "\n"+// - "\nCopyright SYSTAP, LLC 2006-2010. All rights reserved."+// + "\nCopyright SYSTAP, LLC 2006-2012. All rights reserved."+// "\n"+// "\n"+AbstractStatisticsCollector.fullyQualifiedHostName+// "\n"+new Date()+// Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/Banner.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/Banner.java 2012-02-10 20:11:20 UTC (rev 6010) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/Banner.java 2012-02-11 01:05:10 UTC (rev 6011) @@ -345,7 +345,7 @@ "\n Affordable"+// "\n Web-Scale Computing for the Enterprise"+// "\n"+// - "\nCopyright SYSTAP, LLC 2006-2010. All rights reserved."+// + "\nCopyright SYSTAP, LLC 2006-2012. All rights reserved."+// "\n"+// "\n"+AbstractStatisticsCollector.fullyQualifiedHostName+// "\n"+new Date()+// This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2012-02-16 17:44:20
|
Revision: 6034 http://bigdata.svn.sourceforge.net/bigdata/?rev=6034&view=rev Author: martyncutcher Date: 2012-02-16 17:44:12 +0000 (Thu, 16 Feb 2012) Log Message: ----------- Remove DirectFixedAllocator facility and provide correct reset semantics for RWStore first identified in ticket #482. The revised reset also now correctly handles unisolated resets with active isolated transactions. New unit tests were added to confirm this behaviour. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java Property Changed: ---------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/ Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -1636,7 +1636,8 @@ if (m_written) { log.warn("DUPLICATE writeOnChannel for : " + this.hashCode()); } else { - assert !this.isEmpty(); + // Can be empty if reset! + // assert !this.isEmpty(); m_written = true; } Property changes on: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore ___________________________________________________________________ Added: svn:ignore + design.txt Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -349,6 +349,63 @@ } /** + * When resetting an alloc block to committed unisolated state, care must be + * taken to protect any isolated writes. This is indicated by a non-null + * m_saveCommit array which is set when a ContextAlocation takes ownership + * of the parent FixedAllocator. + * + * With no Isolated writes the state simply reverts to the committed state as + * retained in the m_commit array and any buffered allocations are cleared from + * the cache. + * + * @param cache containing buffered writes to be cleared + */ + void reset(final RWWriteCacheService cache) { + for (int i = 0; i < m_live.length; i++) { + final int startBit = i * 32; + if (m_saveCommit == null) { + /* + * Simply set live and transients to the commit bits + * + * But remember to clear out any buffered writes in the cache + * first! New allocations determined by comparing + * m_commit with m_transients. + */ + final int chkbits = m_transients[i] & ~m_commit[i]; + clearCacheBits(cache, startBit, chkbits); + + m_live[i] = m_commit[i]; + m_transients[i] = m_commit[i]; + } else { + /* + * Example + * + * C1: 1100 + * T1: 1110 (single unisolated allocation) + * + * ContextAllocation takes over FixedAllocator + * + * S2: 1100 (saved commit) + * C2: 1110 (copy of transient T1) + * T2: 1111 (new allocation) + * + * RESET called: must clear isolated allocations + * - difference of S2 and C2 + * = C2 & ~S2 = 1110 & 0011 = 0010 + * + * Must then clear any buffered writes from the cache + * ...and clear unisolated allocations from m_live and m_transients + */ + final int chkbits = m_commit[i] & ~m_saveCommit[i]; + clearCacheBits(cache, startBit, chkbits); + + m_live[i] &= ~chkbits; + m_transients[i] &= ~chkbits; + } + } + } + + /** * When a session is active, the transient bits do not equate to an ORing * of the committed bits and the live bits, but rather an ORing of the live * with all the committed bits since the start of the session. Deleted: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -1,18 +0,0 @@ -package com.bigdata.rwstore; - -/** - * The DirectFixedAllocator is used to manage in-memory Direct ByteBuffer - * allocated memory. - * - */ -public class DirectFixedAllocator extends FixedAllocator { - - DirectFixedAllocator(RWStore store, int size) { - super(store, size); - } - - protected int grabAllocation(RWStore store, int blockSize) { - return store.allocateDirect(blockSize); - } - -} Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -660,20 +660,9 @@ checkFreeList(); } else { + m_freeTransients++; if (m_sessionActive) { - m_freeTransients++; - boolean assertsEnabled = false; - assert assertsEnabled = true; - if (assertsEnabled){ - final int sessionFrees = m_sessionFrees.incrementAndGet(); - int sessionBits = 0; - for (AllocBlock ab : m_allocBlocks) { - sessionBits += ab.sessionBits(); - } - assert sessionFrees <= sessionBits : "sessionFrees: " + sessionFrees + " > sessionBits: " + sessionBits; - } - } else { - m_freeTransients++; + assert checkSessionFrees(); } } @@ -711,13 +700,19 @@ return false; } + private boolean checkSessionFrees() { + final int sessionFrees = m_sessionFrees.incrementAndGet(); + int sessionBits = 0; + for (AllocBlock ab : m_allocBlocks) { + sessionBits += ab.sessionBits(); + } + return sessionFrees <= sessionBits; + } + private void checkFreeList() { if (m_freeWaiting && !m_pendingContextCommit) { - if (m_freeBits > 0 && this instanceof DirectFixedAllocator) { + if (m_freeBits >= m_store.cDefaultFreeBitsThreshold) { m_freeWaiting = false; - m_freeList.add(0, this); - } else if (m_freeBits >= m_store.cDefaultFreeBitsThreshold) { - m_freeWaiting = false; if (log.isDebugEnabled()) log.debug("Returning Allocator to FreeList - " + m_size); @@ -972,6 +967,32 @@ public void setBucketStats(Bucket b) { m_statsBucket = b; } + + /** + * The semantics of reset are to ditch all unisolated modifications + * since the last commit point. + * + * @param cache + */ + void reset(RWWriteCacheService cache) { + for (AllocBlock ab : m_allocBlocks) { + ab.reset(cache); + } + + m_freeTransients = transientbits(); + + assert calcSessionFrees(); + } + + private boolean calcSessionFrees() { + int sessionBits = 0; + for (AllocBlock ab : m_allocBlocks) { + sessionBits += ab.sessionBits(); + } + m_sessionFrees.set(sessionBits); + + return true; + } void releaseSession(RWWriteCacheService cache) { if (m_context != null) { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -523,19 +523,6 @@ private StorageStats m_storageStats; private long m_storageStatsAddr = 0; - /** - * Direct ByteBuffer allocations. - * - * TODO: Support different scaleups for disk and direct allocation to - * allow for finer granularity of allocation. For example, a 1K - * scaleup would allow 32bit slot allocations for all slot sizes. - */ - private int m_directSpaceAvailable = 0; - private int m_nextDirectAllocation = cDirectAllocationOffset; - private ArrayList<ByteBuffer> m_directBuffers = null; - - private final boolean m_enableDirectBuffer; - /** * <code>true</code> iff the backing store is open. */ @@ -623,16 +610,6 @@ if (log.isInfoEnabled()) log.info(AbstractTransactionService.Options.MIN_RELEASE_AGE + "=" + m_minReleaseAge); - /* - * Disable TemporaryRW option for now - */ - // m_enableDirectBuffer = fileMetadata.getBufferMode() == BufferMode.TemporaryRW; - m_enableDirectBuffer = false; - - if (m_enableDirectBuffer) { - m_directBuffers = new ArrayList<ByteBuffer>(); - addDirectBuffer(); - } cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( Options.META_BITS_SIZE, @@ -787,14 +764,6 @@ } } - private void addDirectBuffer() { - if (cMaxDirectBuffers > m_directBuffers.size()) { - ByteBuffer bbuf = ByteBuffer.allocateDirect(cDirectBufferCapacity); - m_directBuffers.add(bbuf); - m_directSpaceAvailable += cDirectBufferCapacity; - } - } - private void setAllocations(final FileMetadata fileMetadata) throws IOException { @@ -1393,11 +1362,7 @@ } - if (paddr < 0) { // read from Direct ByteBuffer - directRead(paddr, buf, offset, length); - - return; - } + assert paddr > 0; /** * Check WriteCache first @@ -1501,69 +1466,6 @@ } } - /** - * Retrieves data from the direct byte buffers, must handle transfers across - * multiple buffers - */ - private void directRead(final long paddr, final byte[] buf, final int offset, final int length) { - assert paddr < 0; - assert m_directBuffers != null; - - final int baddr = (int) (-paddr) - cDirectAllocationOffset; // buffer address - int bufIndex = baddr / cDirectBufferCapacity; - int bufOffset = baddr % cDirectBufferCapacity; - - int transfer = 0; - int curOut = offset; - - while (transfer < length) { - ByteBuffer direct = m_directBuffers.get(bufIndex); - direct.position(bufOffset); - int avail = cDirectBufferCapacity - bufOffset; - int req = length - transfer; - int tlen = avail < req ? avail : req; - - direct.get(buf, curOut, tlen); - - transfer += tlen; - curOut += tlen; - - bufIndex++; - bufOffset = 0; - } - } - - /** - * Writes to direct buffers, transferring across boundaries as required - */ - private void directWrite(final long pa, final byte[] buf, final int offset, final int length, final int chk) { - assert pa < 0; - assert m_directBuffers != null; - - final int baddr = (int) (-pa) - cDirectAllocationOffset; // buffer address - int bufIndex = baddr / cDirectBufferCapacity; - int bufOffset = baddr % cDirectBufferCapacity; - - int transfer = 0; - int curIn = offset; - - while (transfer < length) { - ByteBuffer direct = m_directBuffers.get(bufIndex); - direct.position(bufOffset); - int avail = cDirectBufferCapacity - bufOffset; - int req = length - transfer; - int tlen = avail < req ? avail : req; - - direct.put(buf, curIn, tlen); - - transfer += tlen; - curIn += tlen; - - bufIndex++; - bufOffset = 0; - } - } - private void assertAllocators() { for (int i = 0; i < m_allocs.size(); i++) { if (m_allocs.get(i).getIndex() != i) { @@ -1654,17 +1556,19 @@ /* * The session protection is complicated by the mix of * transaction protection and isolated AllocationContexts. + * + * If this is the first use of an IAllocationContext then + * then isSessionProtected may return false, so check the + * context first. */ - if (this.isSessionProtected()) { - if (context != null) { - if (alloc.canImmediatelyFree(addr, sze, context)) { - immediateFree(addr, sze, true); - } else { - establishContextAllocation(context).deferFree(encodeAddr(addr, sze)); - } + if (context != null) { + if (alloc.canImmediatelyFree(addr, sze, context)) { + immediateFree(addr, sze, true); } else { - immediateFree(addr, sze, false); + establishContextAllocation(context).deferFree(encodeAddr(addr, sze)); } + } else if (this.isSessionProtected()) { + immediateFree(addr, sze, false); } else { immediateFree(addr, sze); } @@ -1945,11 +1849,7 @@ final ArrayList<FixedAllocator> list = m_freeFixed[i]; if (list.size() == 0) { - if (canAllocateDirect()) { - allocator = new DirectFixedAllocator(this, block); - } else { - allocator = new FixedAllocator(this, block); - } + allocator = new FixedAllocator(this, block); allocator.setFreeList(list); allocator.setIndex(m_allocs.size()); @@ -2008,13 +1908,6 @@ } } - /** - * @return true if we have spare directBuffers. - */ - private boolean canAllocateDirect() { - return m_directBuffers != null && m_directBuffers.size() < cMaxDirectBuffers; - } - private int fixedAllocatorIndex(final int size) { int i = 0; @@ -2104,15 +1997,10 @@ final long pa = physicalAddress(newAddr); - // if from DirectFixedAllocator then physical address will be negative - if (pa < 0) { - directWrite(pa, buf, 0, size, chk); - } else { - try { - m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); - } catch (InterruptedException e) { - throw new RuntimeException("Closed Store?", e); - } + try { + m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); + } catch (InterruptedException e) { + throw new RuntimeException("Closed Store?", e); } // Update counters. @@ -2194,52 +2082,27 @@ // } /** - * Toss away all buffered writes and then reload from the current root - * block. + * The semantics of reset are to revert unisolated writes to committed state. * + * Unisolated writes must also be removed from the write cache. + * + * The AllocBlocks of the FixedAllocators maintain the state to determine + * the correct reset behaviour. + * * If the store is using DirectFixedAllocators then an IllegalStateException * is thrown */ public void reset() { assertOpen(); - if (m_directBuffers != null) - throw new IllegalStateException("Reset is not supported with direct buffers"); - if (log.isInfoEnabled()) { log.info("RWStore Reset"); } m_allocationLock.lock(); try { - - final RootBlockUtility tmp = new RootBlockUtility(m_reopener, m_fd, - true/* validateChecksum */, false/* alternateRootBlock */, - false/* ignoreBadRootBlock */); - - final IRootBlockView rootBlock = tmp.rootBlock; - - checkRootBlock(rootBlock); - - m_commitList.clear(); - m_allocs.clear(); - // m_freeBlobs.clear(); - - final int numFixed = m_allocSizes.length; - for (int i = 0; i < numFixed; i++) { - m_freeFixed[i].clear(); + for (FixedAllocator fa : m_allocs) { + fa.reset(m_writeCache); } - - - try { - m_writeCache.reset(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - - initfromRootBlock(rootBlock); - - // notify of current file length. - m_writeCache.setExtent(convertAddr(m_fileSize)); } catch (Exception e) { throw new IllegalStateException("Unable to reset the store", e); } finally { @@ -2297,14 +2160,13 @@ if (addr == 0) { throw new IllegalStateException("Invalid metabits address: " + m_metaBitsAddr); } - if (addr < 0) { - directWrite(addr, buf, 0, buf.length, 0); - } else { - try { - m_writeCache.write(addr, ByteBuffer.wrap(buf), 0, false); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + + assert addr > 0; + + try { + m_writeCache.write(addr, ByteBuffer.wrap(buf), 0, false); + } catch (InterruptedException e) { + throw new RuntimeException(e); } } @@ -3106,7 +2968,7 @@ final int offset = getOffset(addr); final long laddr = allocator.getPhysicalAddress(offset, nocheck); - return allocator instanceof DirectFixedAllocator ? -laddr : laddr; + return laddr; } } @@ -3707,9 +3569,9 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ -// // Now remove the commit record entries from the commit record index. + // Now remove the commit record entries from the commit record index. final int commitPointsRemoved = journal.removeCommitRecordEntries( - fromKey, toKey); + fromKey, toKey); if (txLog.isInfoEnabled()) txLog.info("fromTime=" + fromTime + ", toTime=" + toTime @@ -4708,30 +4570,6 @@ } } - /** - * A request for a direct allocation from a Direct ByteBuffer - * - * @param blockSize the size requested - * @return the address of the direct allocation - */ - public int allocateDirect(final int blockSize) { - final int allocBytes = blockSize << this.ALLOCATION_SCALEUP; - if (m_directSpaceAvailable < allocBytes) { - // try and allocate a further buffer - addDirectBuffer(); - } - - if (m_directSpaceAvailable < allocBytes) { - return -1; - } else { - final int ret = m_nextDirectAllocation; - m_nextDirectAllocation += allocBytes; - m_directSpaceAvailable -= allocBytes; - - return ret; - } - } - /** * Returns the slot size associated with this address */ Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -480,6 +480,9 @@ hdrbuf.flip(); final int retaddr = getAllocationAddress(allocate(hdrbuf,blocks)); + if (log.isTraceEnabled()) + log.trace("Allocation BLOB at: " + retaddr); + return makeAddr(retaddr, nbytes); } catch (MemoryManagerOutOfMemory oom) { // We could have failed to allocate any of the blob parts or the header @@ -692,6 +695,9 @@ if (size == 0) throw new IllegalArgumentException(); + if (log.isTraceEnabled()) + log.trace("Releasing allocation at: " + rwaddr + "[" + size + "]"); + m_allocationLock.lock(); try { Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -34,6 +34,8 @@ import java.util.Properties; import java.util.TreeMap; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import junit.extensions.proxy.ProxyTestSuite; import junit.framework.Test; @@ -2089,6 +2091,272 @@ } } + /** + * Tests semantics of a simple reset + * + * Commit some data + * Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void + */ + public void test_simpleReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + final long addr = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + bs.delete(addr); + + final long addr2 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr2)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr2)); + try { + bs.read(addr2); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + store.commit(); + + assertTrue(bs.isCommitted(addr)); + } finally { + store.destroy(); + } + + } + + /** + * Tests semantics of a simple isolated reset + * + * Commit some data + * UnIsolated: Delete committed and allocate new data + * Isolated: Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void for + * unisolated actions but not isolated + */ + public void test_isolatedReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final IAllocationContext isolated = new IAllocationContext() {}; + + final long addr = bs.write(randomData(78)); + final long addr2 = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + bs.delete(addr); + + final long addr3 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr3)); + + bs.delete(addr2, isolated); + final long addr4 = bs.write(randomData(78), isolated); + assertTrue(bs.inWriteCache(addr4)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr3)); // unisolated removed + assertTrue(bs.inWriteCache(addr4)); // isolated remains + try { + bs.read(addr3); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + bs.detachContext(isolated); + + store.commit(); + + assertTrue(bs.isCommitted(addr)); + assertTrue(bs.isCommitted(addr4)); + } finally { + store.destroy(); + } + + } + + /** + * Tests semantics of a more complex isolated reset + * + * Primarily the same as the simple isolated but ensuring + * more unisolated interactions after isolation is + * established. + * + * Commit some data + * UnIsolated: Delete committed and allocate new data + * Isolated: Delete committed and allocate new data + * UnIsolated: Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void for + * unisolated actions but not isolated + */ + public void test_notSoSimpleIsolatedReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final IAllocationContext isolated = new IAllocationContext() {}; + + final long addr = bs.write(randomData(78)); + final long addr2 = bs.write(randomData(78)); + final long addr5 = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + // Unisolated actions + bs.delete(addr); + + final long addr3 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr3)); + + // Isolated actions + bs.delete(addr2, isolated); + final long addr4 = bs.write(randomData(78), isolated); + assertTrue(bs.inWriteCache(addr4)); + + // Further Unisolated actions + bs.delete(addr5); + final long addr6 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr6)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr3)); // unisolated removed + assertFalse(bs.inWriteCache(addr6)); // unisolated removed + assertTrue(bs.inWriteCache(addr4)); // isolated remains + try { + bs.read(addr3); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + try { + bs.read(addr6); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + // Detach isolated context prior to commit + bs.detachContext(isolated); + + store.commit(); + + assertTrue(bs.isCommitted(addr)); + assertTrue(bs.isCommitted(addr4)); + assertTrue(bs.isCommitted(addr5)); + assertFalse(bs.isCommitted(addr2)); + } finally { + store.destroy(); + } + + } + + /** + * Concurrent readers should no longer be an issue now that + * reset() is not re-initializing from the root block. + * + * This test should confirm that. + * + * Establishes some committed data and runs two readers + * concurrent with a single writer intermittently aborting + * + * @throws InterruptedException + */ + public void test_simpleConcurrentReadersWithResets() throws InterruptedException { + final Journal store = (Journal) getStore(); + // use executor service to enable exception trapping + final ExecutorService es = store.getExecutorService(); + try { + final RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + final long[] addrs = new long[50]; + for (int w = 0; w < addrs.length; w++) { + addrs[w] = bs.write(randomData(r.nextInt(150)+10)); + } + + store.commit(); + + assertTrue(bs.isCommitted(addrs[0])); + + Runnable writer = new Runnable() { + public void run() { + for (int i = 0; i < 2000; i++) { + bs.delete(addrs[r.nextInt(addrs.length)]); + for (int w = 0; w < 1000; w++) + bs.write(randomData(r.nextInt(500)+1)); + bs.abort(); + } + } + }; + final Future wfuture = es.submit(writer); + Runnable reader1 = new Runnable() { + public void run() { + for (int i = 0; i < 5000; i++) { + for (int rdr = 0; rdr < addrs.length; rdr++) { + bs.read(addrs[r.nextInt(addrs.length)]); + } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + }; + final Future r1future = es.submit(reader1); + Runnable reader2 = new Runnable() { + public void run() { + for (int i = 0; i < 5000; i++) { + for (int rdr = 0; rdr < addrs.length; rdr++) { + bs.read(addrs[r.nextInt(addrs.length)]); + } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + }; + final Future r2future = es.submit(reader2); + + try { + wfuture.get(); + r1future.get(); + r2future.get(); + } catch (Exception e) { + fail(e.getMessage(), e); + } + for (int i = 0; i < addrs.length; i++) { + assertTrue(bs.isCommitted(addrs[i])); + } + } finally { + es.shutdownNow(); + store.destroy(); + } + } + ByteBuffer randomData(final int sze) { byte[] buf = new byte[sze + 4]; // extra for checksum r.nextBytes(buf); @@ -2103,7 +2371,7 @@ return buf; } } - + /** * Test suite integration for {@link AbstractMROWTestCase}. * Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -1636,7 +1636,8 @@ if (m_written) { log.warn("DUPLICATE writeOnChannel for : " + this.hashCode()); } else { - assert !this.isEmpty(); + // Can be empty if reset! + // assert !this.isEmpty(); m_written = true; } Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -349,6 +349,63 @@ } /** + * When resetting an alloc block to committed unisolated state, care must be + * taken to protect any isolated writes. This is indicated by a non-null + * m_saveCommit array which is set when a ContextAlocation takes ownership + * of the parent FixedAllocator. + * + * With no Isolated writes the state simply reverts to the committed state as + * retained in the m_commit array and any buffered allocations are cleared from + * the cache. + * + * @param cache containing buffered writes to be cleared + */ + void reset(final RWWriteCacheService cache) { + for (int i = 0; i < m_live.length; i++) { + final int startBit = i * 32; + if (m_saveCommit == null) { + /* + * Simply set live and transients to the commit bits + * + * But remember to clear out any buffered writes in the cache + * first! New allocations determined by comparing + * m_commit with m_transients. + */ + final int chkbits = m_transients[i] & ~m_commit[i]; + clearCacheBits(cache, startBit, chkbits); + + m_live[i] = m_commit[i]; + m_transients[i] = m_commit[i]; + } else { + /* + * Example + * + * C1: 1100 + * T1: 1110 (single unisolated allocation) + * + * ContextAllocation takes over FixedAllocator + * + * S2: 1100 (saved commit) + * C2: 1110 (copy of transient T1) + * T2: 1111 (new allocation) + * + * RESET called: must clear isolated allocations + * - difference of S2 and C2 + * = C2 & ~S2 = 1110 & 0011 = 0010 + * + * Must then clear any buffered writes from the cache + * ...and clear unisolated allocations from m_live and m_transients + */ + final int chkbits = m_commit[i] & ~m_saveCommit[i]; + clearCacheBits(cache, startBit, chkbits); + + m_live[i] &= ~chkbits; + m_transients[i] &= ~chkbits; + } + } + } + + /** * When a session is active, the transient bits do not equate to an ORing * of the committed bits and the live bits, but rather an ORing of the live * with all the committed bits since the start of the session. Deleted: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/DirectFixedAllocator.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -1,18 +0,0 @@ -package com.bigdata.rwstore; - -/** - * The DirectFixedAllocator is used to manage in-memory Direct ByteBuffer - * allocated memory. - * - */ -public class DirectFixedAllocator extends FixedAllocator { - - DirectFixedAllocator(RWStore store, int size) { - super(store, size); - } - - protected int grabAllocation(RWStore store, int blockSize) { - return store.allocateDirect(blockSize); - } - -} Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -660,20 +660,9 @@ checkFreeList(); } else { + m_freeTransients++; if (m_sessionActive) { - m_freeTransients++; - boolean assertsEnabled = false; - assert assertsEnabled = true; - if (assertsEnabled){ - final int sessionFrees = m_sessionFrees.incrementAndGet(); - int sessionBits = 0; - for (AllocBlock ab : m_allocBlocks) { - sessionBits += ab.sessionBits(); - } - assert sessionFrees <= sessionBits : "sessionFrees: " + sessionFrees + " > sessionBits: " + sessionBits; - } - } else { - m_freeTransients++; + assert checkSessionFrees(); } } @@ -711,13 +700,19 @@ return false; } + private boolean checkSessionFrees() { + final int sessionFrees = m_sessionFrees.incrementAndGet(); + int sessionBits = 0; + for (AllocBlock ab : m_allocBlocks) { + sessionBits += ab.sessionBits(); + } + return sessionFrees <= sessionBits; + } + private void checkFreeList() { if (m_freeWaiting && !m_pendingContextCommit) { - if (m_freeBits > 0 && this instanceof DirectFixedAllocator) { + if (m_freeBits >= m_store.cDefaultFreeBitsThreshold) { m_freeWaiting = false; - m_freeList.add(0, this); - } else if (m_freeBits >= m_store.cDefaultFreeBitsThreshold) { - m_freeWaiting = false; if (log.isDebugEnabled()) log.debug("Returning Allocator to FreeList - " + m_size); @@ -972,6 +967,32 @@ public void setBucketStats(Bucket b) { m_statsBucket = b; } + + /** + * The semantics of reset are to ditch all unisolated modifications + * since the last commit point. + * + * @param cache + */ + void reset(RWWriteCacheService cache) { + for (AllocBlock ab : m_allocBlocks) { + ab.reset(cache); + } + + m_freeTransients = transientbits(); + + assert calcSessionFrees(); + } + + private boolean calcSessionFrees() { + int sessionBits = 0; + for (AllocBlock ab : m_allocBlocks) { + sessionBits += ab.sessionBits(); + } + m_sessionFrees.set(sessionBits); + + return true; + } void releaseSession(RWWriteCacheService cache) { if (m_context != null) { Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -523,19 +523,6 @@ private StorageStats m_storageStats; private long m_storageStatsAddr = 0; - /** - * Direct ByteBuffer allocations. - * - * TODO: Support different scaleups for disk and direct allocation to - * allow for finer granularity of allocation. For example, a 1K - * scaleup would allow 32bit slot allocations for all slot sizes. - */ - private int m_directSpaceAvailable = 0; - private int m_nextDirectAllocation = cDirectAllocationOffset; - private ArrayList<ByteBuffer> m_directBuffers = null; - - private final boolean m_enableDirectBuffer; - /** * <code>true</code> iff the backing store is open. */ @@ -623,16 +610,6 @@ if (log.isInfoEnabled()) log.info(AbstractTransactionService.Options.MIN_RELEASE_AGE + "=" + m_minReleaseAge); - /* - * Disable TemporaryRW option for now - */ - // m_enableDirectBuffer = fileMetadata.getBufferMode() == BufferMode.TemporaryRW; - m_enableDirectBuffer = false; - - if (m_enableDirectBuffer) { - m_directBuffers = new ArrayList<ByteBuffer>(); - addDirectBuffer(); - } cDefaultMetaBitsSize = Integer.valueOf(fileMetadata.getProperty( Options.META_BITS_SIZE, @@ -787,14 +764,6 @@ } } - private void addDirectBuffer() { - if (cMaxDirectBuffers > m_directBuffers.size()) { - ByteBuffer bbuf = ByteBuffer.allocateDirect(cDirectBufferCapacity); - m_directBuffers.add(bbuf); - m_directSpaceAvailable += cDirectBufferCapacity; - } - } - private void setAllocations(final FileMetadata fileMetadata) throws IOException { @@ -1393,11 +1362,7 @@ } - if (paddr < 0) { // read from Direct ByteBuffer - directRead(paddr, buf, offset, length); - - return; - } + assert paddr > 0; /** * Check WriteCache first @@ -1501,69 +1466,6 @@ } } - /** - * Retrieves data from the direct byte buffers, must handle transfers across - * multiple buffers - */ - private void directRead(final long paddr, final byte[] buf, final int offset, final int length) { - assert paddr < 0; - assert m_directBuffers != null; - - final int baddr = (int) (-paddr) - cDirectAllocationOffset; // buffer address - int bufIndex = baddr / cDirectBufferCapacity; - int bufOffset = baddr % cDirectBufferCapacity; - - int transfer = 0; - int curOut = offset; - - while (transfer < length) { - ByteBuffer direct = m_directBuffers.get(bufIndex); - direct.position(bufOffset); - int avail = cDirectBufferCapacity - bufOffset; - int req = length - transfer; - int tlen = avail < req ? avail : req; - - direct.get(buf, curOut, tlen); - - transfer += tlen; - curOut += tlen; - - bufIndex++; - bufOffset = 0; - } - } - - /** - * Writes to direct buffers, transferring across boundaries as required - */ - private void directWrite(final long pa, final byte[] buf, final int offset, final int length, final int chk) { - assert pa < 0; - assert m_directBuffers != null; - - final int baddr = (int) (-pa) - cDirectAllocationOffset; // buffer address - int bufIndex = baddr / cDirectBufferCapacity; - int bufOffset = baddr % cDirectBufferCapacity; - - int transfer = 0; - int curIn = offset; - - while (transfer < length) { - ByteBuffer direct = m_directBuffers.get(bufIndex); - direct.position(bufOffset); - int avail = cDirectBufferCapacity - bufOffset; - int req = length - transfer; - int tlen = avail < req ? avail : req; - - direct.put(buf, curIn, tlen); - - transfer += tlen; - curIn += tlen; - - bufIndex++; - bufOffset = 0; - } - } - private void assertAllocators() { for (int i = 0; i < m_allocs.size(); i++) { if (m_allocs.get(i).getIndex() != i) { @@ -1654,17 +1556,19 @@ /* * The session protection is complicated by the mix of * transaction protection and isolated AllocationContexts. + * + * If this is the first use of an IAllocationContext then + * then isSessionProtected may return false, so check the + * context first. */ - if (this.isSessionProtected()) { - if (context != null) { - if (alloc.canImmediatelyFree(addr, sze, context)) { - immediateFree(addr, sze, true); - } else { - establishContextAllocation(context).deferFree(encodeAddr(addr, sze)); - } + if (context != null) { + if (alloc.canImmediatelyFree(addr, sze, context)) { + immediateFree(addr, sze, true); } else { - immediateFree(addr, sze, false); + establishContextAllocation(context).deferFree(encodeAddr(addr, sze)); } + } else if (this.isSessionProtected()) { + immediateFree(addr, sze, false); } else { immediateFree(addr, sze); } @@ -1945,11 +1849,7 @@ final ArrayList<FixedAllocator> list = m_freeFixed[i]; if (list.size() == 0) { - if (canAllocateDirect()) { - allocator = new DirectFixedAllocator(this, block); - } else { - allocator = new FixedAllocator(this, block); - } + allocator = new FixedAllocator(this, block); allocator.setFreeList(list); allocator.setIndex(m_allocs.size()); @@ -2008,13 +1908,6 @@ } } - /** - * @return true if we have spare directBuffers. - */ - private boolean canAllocateDirect() { - return m_directBuffers != null && m_directBuffers.size() < cMaxDirectBuffers; - } - private int fixedAllocatorIndex(final int size) { int i = 0; @@ -2104,15 +1997,10 @@ final long pa = physicalAddress(newAddr); - // if from DirectFixedAllocator then physical address will be negative - if (pa < 0) { - directWrite(pa, buf, 0, size, chk); - } else { - try { - m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); - } catch (InterruptedException e) { - throw new RuntimeException("Closed Store?", e); - } + try { + m_writeCache.write(pa, ByteBuffer.wrap(buf, 0, size), chk); + } catch (InterruptedException e) { + throw new RuntimeException("Closed Store?", e); } // Update counters. @@ -2194,52 +2082,27 @@ // } /** - * Toss away all buffered writes and then reload from the current root - * block. + * The semantics of reset are to revert unisolated writes to committed state. * + * Unisolated writes must also be removed from the write cache. + * + * The AllocBlocks of the FixedAllocators maintain the state to determine + * the correct reset behaviour. + * * If the store is using DirectFixedAllocators then an IllegalStateException * is thrown */ public void reset() { assertOpen(); - if (m_directBuffers != null) - throw new IllegalStateException("Reset is not supported with direct buffers"); - if (log.isInfoEnabled()) { log.info("RWStore Reset"); } m_allocationLock.lock(); try { - - final RootBlockUtility tmp = new RootBlockUtility(m_reopener, m_fd, - true/* validateChecksum */, false/* alternateRootBlock */, - false/* ignoreBadRootBlock */); - - final IRootBlockView rootBlock = tmp.rootBlock; - - checkRootBlock(rootBlock); - - m_commitList.clear(); - m_allocs.clear(); - // m_freeBlobs.clear(); - - final int numFixed = m_allocSizes.length; - for (int i = 0; i < numFixed; i++) { - m_freeFixed[i].clear(); + for (FixedAllocator fa : m_allocs) { + fa.reset(m_writeCache); } - - - try { - m_writeCache.reset(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - - initfromRootBlock(rootBlock); - - // notify of current file length. - m_writeCache.setExtent(convertAddr(m_fileSize)); } catch (Exception e) { throw new IllegalStateException("Unable to reset the store", e); } finally { @@ -2297,14 +2160,13 @@ if (addr == 0) { throw new IllegalStateException("Invalid metabits address: " + m_metaBitsAddr); } - if (addr < 0) { - directWrite(addr, buf, 0, buf.length, 0); - } else { - try { - m_writeCache.write(addr, ByteBuffer.wrap(buf), 0, false); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + + assert addr > 0; + + try { + m_writeCache.write(addr, ByteBuffer.wrap(buf), 0, false); + } catch (InterruptedException e) { + throw new RuntimeException(e); } } @@ -3106,7 +2968,7 @@ final int offset = getOffset(addr); final long laddr = allocator.getPhysicalAddress(offset, nocheck); - return allocator instanceof DirectFixedAllocator ? -laddr : laddr; + return laddr; } } @@ -3707,9 +3569,9 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/440 */ -// // Now remove the commit record entries from the commit record index. + // Now remove the commit record entries from the commit record index. final int commitPointsRemoved = journal.removeCommitRecordEntries( - fromKey, toKey); + fromKey, toKey); if (txLog.isInfoEnabled()) txLog.info("fromTime=" + fromTime + ", toTime=" + toTime @@ -4708,30 +4570,6 @@ } } - /** - * A request for a direct allocation from a Direct ByteBuffer - * - * @param blockSize the size requested - * @return the address of the direct allocation - */ - public int allocateDirect(final int blockSize) { - final int allocBytes = blockSize << this.ALLOCATION_SCALEUP; - if (m_directSpaceAvailable < allocBytes) { - // try and allocate a further buffer - addDirectBuffer(); - } - - if (m_directSpaceAvailable < allocBytes) { - return -1; - } else { - final int ret = m_nextDirectAllocation; - m_nextDirectAllocation += allocBytes; - m_directSpaceAvailable -= allocBytes; - - return ret; - } - } - /** * Returns the slot size associated with this address */ Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -526,6 +526,9 @@ hdrbuf.flip(); final int retaddr = getAllocationAddress(allocate(hdrbuf,blocks)); + if (log.isTraceEnabled()) + log.trace("Allocation BLOB at: " + retaddr); + return makeAddr(retaddr, nbytes); } catch (MemoryManagerOutOfMemory oom) { // We could have failed to allocate any of the blob parts or the header @@ -742,6 +745,9 @@ if (size == 0) throw new IllegalArgumentException(); + if (log.isTraceEnabled()) + log.trace("Releasing allocation at: " + rwaddr + "[" + size + "]"); + m_allocationLock.lock(); try { Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2012-02-16 16:12:27 UTC (rev 6033) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java 2012-02-16 17:44:12 UTC (rev 6034) @@ -34,6 +34,8 @@ import java.util.Properties; import java.util.TreeMap; import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import junit.extensions.proxy.ProxyTestSuite; import junit.framework.Test; @@ -66,6 +68,7 @@ import com.bigdata.rwstore.RWStore.RawTx; import com.bigdata.service.AbstractTransactionService; import com.bigdata.util.InnerCause; +import com.bigdata.journal.VerifyCommitRecordIndex; /** * Test suite for {@link BufferMode#DiskRW} journals. @@ -1757,6 +1760,57 @@ } } + /** + * Can be tested by removing RWStore call to journal.removeCommitRecordEntries + * in freeDeferrals. + * + * final int commitPointsRemoved = journal.removeCommitRecordEntries(fromKey, toKey); + * + * replaced with + * + * final int commitPointsRemoved = commitPointsRecycled; + * + */ + public void testVerifyCommitRecordIndex() { + final Properties properties = new Properties(getProperties()); + + properties.setProperty( + AbstractTransactionService.Options.MIN_RELEASE_AGE, "100"); + + final Journal store = (Journal) getStore(properties); + try { + + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + for (int r = 0; r < 10; r++) { + ArrayList<Long> addrs = new ArrayList<Long>(); + for (int i = 0; i < 100; i++) { + addrs.add(bs.write(randomData(45))); + } + store.commit(); + + for (long addr : addrs) { + bs.delete(addr); + } + + store.commit(); + + // Age the history (of the deletes!) + Thread.currentThread().sleep(200); + } + + final String fname = bs.getRWStore().getStoreFile().getAbsolutePath(); + + store.close(); + + VerifyCommitRecordIndex.main(new String[]{fname}); + + } catch (InterruptedException e) { + } finally { + store.destroy(); + } + } + private Journal getStore(Properties props) { return new Journal(props); } @@ -2089,6 +2143,272 @@ } } + /** + * Tests semantics of a simple reset + * + * Commit some data + * Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void + */ + public void test_simpleReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + final long addr = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + bs.delete(addr); + + final long addr2 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr2)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr2)); + try { + bs.read(addr2); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + store.commit(); + + assertTrue(bs.isCommitted(addr)); + } finally { + store.destroy(); + } + + } + + /** + * Tests semantics of a simple isolated reset + * + * Commit some data + * UnIsolated: Delete committed and allocate new data + * Isolated: Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void for + * unisolated actions but not isolated + */ + public void test_isolatedReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final IAllocationContext isolated = new IAllocationContext() {}; + + final long addr = bs.write(randomData(78)); + final long addr2 = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + bs.delete(addr); + + final long addr3 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr3)); + + bs.delete(addr2, isolated); + final long addr4 = bs.write(randomData(78), isolated); + assertTrue(bs.inWriteCache(addr4)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr3)); // unisolated removed + assertTrue(bs.inWriteCache(addr4)); // isolated remains + try { + bs.read(addr3); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + bs.detachContext(isolated); + + store.commit(); + + assertTrue(bs.isCommitted(addr)); + assertTrue(bs.isCommitted(addr4)); + } finally { + store.destroy(); + } + + } + + /** + * Tests semantics of a more complex isolated reset + * + * Primarily the same as the simple isolated but ensuring + * more unisolated interactions after isolation is + * established. + * + * Commit some data + * UnIsolated: Delete committed and allocate new data + * Isolated: Delete committed and allocate new data + * UnIsolated: Delete committed and allocate new data + * Reset + * Test that deletion and new allocation are void for + * unisolated actions but not isolated + */ + public void test_notSoSimpleIsolatedReset() { + Journal store = (Journal) getStore(); + try { + RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + final IAllocationContext isolated = new IAllocationContext() {}; + + final long addr = bs.write(randomData(78)); + final long addr2 = bs.write(randomData(78)); + final long addr5 = bs.write(randomData(78)); + + // Has just been written so must be in cache + assertTrue(bs.inWriteCache(addr)); + + store.commit(); + + // Unisolated actions + bs.delete(addr); + + final long addr3 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr3)); + + // Isolated actions + bs.delete(addr2, isolated); + final long addr4 = bs.write(randomData(78), isolated); + assertTrue(bs.inWriteCache(addr4)); + + // Further Unisolated actions + bs.delete(addr5); + final long addr6 = bs.write(randomData(78)); + assertTrue(bs.inWriteCache(addr6)); + + bs.abort(); + + assertTrue(bs.inWriteCache(addr)); // not removed in reset + assertFalse(bs.inWriteCache(addr3)); // unisolated removed + assertFalse(bs.inWriteCache(addr6)); // unisolated removed + assertTrue(bs.inWriteCache(addr4)); // isolated remains + try { + bs.read(addr3); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + try { + bs.read(addr6); + fail("Exception expected"); + } catch (IllegalArgumentException e) { + // expected + } + + // Detach isolated context prior to commit + bs.detachContext(isolated); + + store.commit(); + + assertTrue(bs.isCommitted(addr)); + assertTrue(bs.isCommitted(addr4)); + assertTrue(bs.isCommitted(addr5)); + assertFalse(bs.isCommitted(addr2)); + } finally { + store.destroy(); + } + + } + + /** + * Concurrent readers should no longer be an issue now that + * reset() is not re-initializing from the root block. + * + * This test should confirm that. + * + * Establishes some committed data and runs two readers + * concurrent with a single writer intermittently aborting + * + * @throws InterruptedException + */ + public void test_simpleConcurrentReadersWithResets() throws InterruptedException { + final Journal store = (Journal) getStore(); + // use executor service to enable exception trapping + final ExecutorService es = store.getExecutorService(); + try { + final RWStrategy bs = (RWStrategy) store.getBufferStrategy(); + + final long[] addrs = new long[50]; + for (int w = 0; w < addrs.length; w++) { + addrs[w] = bs.write(randomData(r.nextInt(150)+10)); + } + + store.commit(); + + assertTrue(bs.isCommitted(addrs[0])); + + Runnable writer = new Runnable() { + public void run() { + for (int i = 0; i < 2000; i++) { + bs.delete(addrs[r.nextInt(addrs.length)]); + for (int w = 0; w < 1000; w++) + bs.write(randomData(r.nextInt(500)+1)); + bs.abort(); + } + } + }; + final Future wfuture = es.submit(writer); + Runnable reader1 = new Runnable() { + public void run() { + for (int i = 0; i < 5000; i++) { + for (int rdr = 0; rdr < addrs.length; rdr++) { + bs.read(addrs[r.nextInt(addrs.length)]); + } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + }; + final Future r1future = es.submit(reader1); + Runnable reader2 = new Runnable() { + public void run() { + for (int i = 0; i < 5000; i++) { + for (int rdr = 0; rdr < addrs.length; rdr++) { + bs.read(addrs[r.nextInt(addrs.length)]); + } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + }; + final Future r2future = es.submit(reader2); + + try { + wfuture.get(); + r1future.get(); + r2future.get(); + } catch (Exception e) { + fail(e.getMessage(), e); + } + for (int i = 0; i < addrs.length; i++) { + assertTrue(bs.isCommitted(addrs[i])); + } + } finally { + es.shutdownNow(); + store.destroy(); + } + } + ByteBuffer randomData(final int sze) { byte[] buf = new byte[sze + 4]; // extra for checksum r.nextBytes(buf); @@ -2103,7 +2423,7 @@ return buf; } } - + /** * Test suite integration for {@link AbstractMROWTestCase}. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-03-12 13:03:59
|
Revision: 6107 http://bigdata.svn.sourceforge.net/bigdata/?rev=6107&view=rev Author: thompsonbry Date: 2012-03-12 13:03:53 +0000 (Mon, 12 Mar 2012) Log Message: ----------- The UnisolatedReadWriteIndex was using an hard coded effective value of 1000 rather than the configured value of 100 for the DEFAULT_CAPACITY. I modified the constructor to save the specified value on a defaultCapacity field, rangeIterator(...) to use that configured value, and the DEFAULT_CAPACITY to be 1000, which was the effective value for both 1.0.0x and 1.1.x. @see https://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java Modified: branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2012-03-11 00:32:39 UTC (rev 6106) +++ branches/BIGDATA_RELEASE_1_0_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2012-03-12 13:03:53 UTC (rev 6107) @@ -280,9 +280,16 @@ * main purpose of the capacity is to reduce the contention for the * {@link ReadWriteLock}. */ - final static protected int DEFAULT_CAPACITY = 100;//10000; + final static private int DEFAULT_CAPACITY = 1000;//10000; /** + * The default capacity for iterator reads against the underlying index. The + * main purpose of the capacity is to reduce the contention for the + * {@link ReadWriteLock}. + */ + final private int defaultCapacity; + + /** * Creates a view of an unisolated index that will enforce the concurrency * constraints of the {@link BTree} class, but only among other instances of * this class for the same underlying index. @@ -306,7 +313,7 @@ * * @param ndx * The underlying unisolated index. - * @param capacity + * @param defaultCapacity * The capacity for iterator reads against the underlying index. * The main purpose of the capacity is to reduce the contention * for the {@link ReadWriteLock}. Relatively small values should @@ -329,15 +336,17 @@ * the computed solutions onto the relations. It is likely that a * read-write lock will do well for this situation. */ - public UnisolatedReadWriteIndex(final BTree ndx, final int capacity) { + public UnisolatedReadWriteIndex(final BTree ndx, final int defaultCapacity) { if (ndx == null) throw new IllegalArgumentException(); - if (capacity <= 0) + if (defaultCapacity <= 0) throw new IllegalArgumentException(); this.ndx = ndx; + + this.defaultCapacity = defaultCapacity; this.readWriteLock = getReadWriteLock(ndx); @@ -631,11 +640,11 @@ if (capacity == 0) { /* - * When the buffer capacity is not specified a relatively small - * capacity is choosen. + * When the buffer capacity is not specified, use the default from + * the constructor. */ - capacity = 1000; + capacity = defaultCapacity; } Modified: branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java =================================================================== --- branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2012-03-11 00:32:39 UTC (rev 6106) +++ branches/BIGDATA_RELEASE_1_1_0/bigdata/src/java/com/bigdata/btree/UnisolatedReadWriteIndex.java 2012-03-12 13:03:53 UTC (rev 6107) @@ -281,7 +281,14 @@ * main purpose of the capacity is to reduce the contention for the * {@link ReadWriteLock}. */ - final static protected int DEFAULT_CAPACITY = 100;//10000; + final static private int DEFAULT_CAPACITY = 1000;// 10000; + + /** + * The default capacity for iterator reads against the underlying index. The + * main purpose of the capacity is to reduce the contention for the + * {@link ReadWriteLock}. + */ + final private int defaultCapacity; /** * Creates a view of an unisolated index that will enforce the concurrency @@ -307,7 +314,7 @@ * * @param ndx * The underlying unisolated index. - * @param capacity + * @param defaultCapacity * The capacity for iterator reads against the underlying index. * The main purpose of the capacity is to reduce the contention * for the {@link ReadWriteLock}. Relatively small values should @@ -330,18 +337,20 @@ * the computed solutions onto the relations. It is likely that a * read-write lock will do well for this situation. */ - public UnisolatedReadWriteIndex(final BTree ndx, final int capacity) { + public UnisolatedReadWriteIndex(final BTree ndx, final int defaultCapacity) { if (ndx == null) throw new IllegalArgumentException(); - if (capacity <= 0) + if (defaultCapacity <= 0) throw new IllegalArgumentException(); this.ndx = ndx; + this.defaultCapacity = defaultCapacity; + this.readWriteLock = getReadWriteLock(ndx); - + } /** @@ -633,11 +642,11 @@ if (capacity == 0) { /* - * When the buffer capacity is not specified a relatively small - * capacity is choosen. + * When the buffer capacity is not specified, use the default from + * the constructor. */ - capacity = 1000; + capacity = defaultCapacity; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2012-09-16 14:41:52
|
Revision: 6584 http://bigdata.svn.sourceforge.net/bigdata/?rev=6584&view=rev Author: thompsonbry Date: 2012-09-16 14:41:45 +0000 (Sun, 16 Sep 2012) Log Message: ----------- Bumping the version number for the 1.2.2 critical maintenance release. Development previously targetted at the 1.2.2 release is continuing and will now target a 1.2.3 version number. Note: maven artifacts WILL NOT be published for 1.2.2 since they were introduced after the point from which this maintenance release was branched. @see https://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/build.properties branches/BIGDATA_RELEASE_1_2_1/build.properties Modified: branches/BIGDATA_RELEASE_1_2_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/build.properties 2012-09-15 13:15:44 UTC (rev 6583) +++ branches/BIGDATA_RELEASE_1_2_0/build.properties 2012-09-16 14:41:45 UTC (rev 6584) @@ -82,7 +82,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.2.1 +build.ver=1.2.2 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to Modified: branches/BIGDATA_RELEASE_1_2_1/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_2_1/build.properties 2012-09-15 13:15:44 UTC (rev 6583) +++ branches/BIGDATA_RELEASE_1_2_1/build.properties 2012-09-16 14:41:45 UTC (rev 6584) @@ -62,7 +62,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.2.1 +build.ver=1.2.2 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-02-11 12:37:14
|
Revision: 6897 http://bigdata.svn.sourceforge.net/bigdata/?rev=6897&view=rev Author: thompsonbry Date: 2013-02-11 12:37:06 +0000 (Mon, 11 Feb 2013) Log Message: ----------- Commenting out test for the restore of a scattered write cache that has been broken for a very long time now. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java branches/READ_CACHE/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2013-02-08 16:17:49 UTC (rev 6896) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2013-02-11 12:37:06 UTC (rev 6897) @@ -920,123 +920,123 @@ } - /** - * To test the buffer restore, we will share a buffer between two WriteCache - * instances then write data to the first cache and update its recordMap - * from the buffer. This short circuits the HA pipeline that streams the - * ByteBuffer from one cache to the other. - * - * FIXME This is only testing a single addr. We need to test a bunch of - * writes, not just one. - */ - public void test_writeCacheScatteredBufferRestore() throws InterruptedException, IOException { - final File file = File.createTempFile(getName(), ".tmp"); - final ReopenFileChannel opener = new ReopenFileChannel(file, mode); - try { - - final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); - final IBufferAccess buf2 = DirectBufferPool.INSTANCE.acquire(); - try { - - final long addr1 = 12800; - - final ByteBuffer data1 = getRandomData(20 * 1024); - - final int chk1 = ChecksumUtility.threadChk.get().checksum( - data1, 0/* offset */, data1.limit()); - - final ByteBuffer data2 = getRandomData(20 * 1024); - - final int chk2 = ChecksumUtility.threadChk.get().checksum( - data2, 0/* offset */, data2.limit()); - - final WriteCache cache1 = new WriteCache.FileChannelScatteredWriteCache( - buf, true/* useChecksums */, - true/* isHighlyAvailable */, false/* bufferHasData */, - opener, 0L/* fileExtent */, null/* BufferedWrite */); - - final WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache( - buf, true/* useChecksums */, - true/* isHighlyAvailable */, false/* bufferHasData */, - opener, 0L/* fileExtent */, null/* BufferedWrite */); - - // write first data buffer - cache1.write(addr1, data1, chk1); - data1.flip(); - /* - * FIXME This is only testing a single address copy. We need to - * test much more than that. - */ - syncBuffers(buf, buf2); - assertEquals(buf.buffer(), buf2.buffer()); - cache2.resetRecordMapFromBuffer(); - assertEquals(cache1.read(addr1), data1); - if (cache2.read(addr1) == null) - fail("Nothing in replicated cache?"); - assertEquals(cache2.read(addr1), data1); - - // now simulate removal/delete - cache1.clearAddrMap(addr1, 0/*latchedAddr*/); - - syncBuffers(buf, buf2); - - cache2.resetRecordMapFromBuffer(); - - assertTrue(cache1.read(addr1) == null); - assertTrue(cache2.read(addr1) == null); - - // now write second data buffer - cache1.write(addr1, data2, chk2); - data2.flip(); - // buf2.buffer().limit(buf.buffer().position()); - syncBuffers(buf, buf2); - cache2.resetRecordMapFromBuffer(); - assertEquals(cache2.read(addr1), data2); - assertEquals(cache1.read(addr1), data2); - - } finally { - - buf.release(); - buf2.release(); - - } - - } finally { - - opener.destroy(); - - } - - } - - // ensure dst buffer is copy of src - private void syncBuffers(final IBufferAccess src, final IBufferAccess dst) { - - final ByteBuffer sb = src.buffer(); //.duplicate(); - final ByteBuffer db = dst.buffer(); //.duplicate(); - -// db.position(0); -// sb.position(0); +// /** +// * To test the buffer restore, we will share a buffer between two WriteCache +// * instances then write data to the first cache and update its recordMap +// * from the buffer. This short circuits the HA pipeline that streams the +// * ByteBuffer from one cache to the other. +// * +// * FIXME This is only testing a single addr. We need to test a bunch of +// * writes, not just one. (Also, this test is broken. MC needs to fix.) +// */ +// public void test_writeCacheScatteredBufferRestore() throws InterruptedException, IOException { +// final File file = File.createTempFile(getName(), ".tmp"); +// final ReopenFileChannel opener = new ReopenFileChannel(file, mode); +// try { +// +// final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); +// final IBufferAccess buf2 = DirectBufferPool.INSTANCE.acquire(); +// try { +// +// final long addr1 = 12800; +// +// final ByteBuffer data1 = getRandomData(20 * 1024); +// +// final int chk1 = ChecksumUtility.threadChk.get().checksum( +// data1, 0/* offset */, data1.limit()); +// +// final ByteBuffer data2 = getRandomData(20 * 1024); +// +// final int chk2 = ChecksumUtility.threadChk.get().checksum( +// data2, 0/* offset */, data2.limit()); +// +// final WriteCache cache1 = new WriteCache.FileChannelScatteredWriteCache( +// buf, true/* useChecksums */, +// true/* isHighlyAvailable */, false/* bufferHasData */, +// opener, 0L/* fileExtent */, null/* BufferedWrite */); +// +// final WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache( +// buf, true/* useChecksums */, +// true/* isHighlyAvailable */, false/* bufferHasData */, +// opener, 0L/* fileExtent */, null/* BufferedWrite */); +// +// // write first data buffer +// cache1.write(addr1, data1, chk1); +// data1.flip(); +// /* +// * FIXME This is only testing a single address copy. We need to +// * test much more than that. +// */ +// syncBuffers(buf, buf2); +// assertEquals(buf.buffer(), buf2.buffer()); +// cache2.resetRecordMapFromBuffer(); +// assertEquals(cache1.read(addr1), data1); +// if (cache2.read(addr1) == null) +// fail("Nothing in replicated cache?"); +// assertEquals(cache2.read(addr1), data1); +// +// // now simulate removal/delete +// cache1.clearAddrMap(addr1, 0/*latchedAddr*/); +// +// syncBuffers(buf, buf2); +// +// cache2.resetRecordMapFromBuffer(); +// +// assertTrue(cache1.read(addr1) == null); +// assertTrue(cache2.read(addr1) == null); +// +// // now write second data buffer +// cache1.write(addr1, data2, chk2); +// data2.flip(); +// // buf2.buffer().limit(buf.buffer().position()); +// syncBuffers(buf, buf2); +// cache2.resetRecordMapFromBuffer(); +// assertEquals(cache2.read(addr1), data2); +// assertEquals(cache1.read(addr1), data2); // -// db.put(sb/* src */); +// } finally { +// +// buf.release(); +// buf2.release(); +// +// } +// +// } finally { // -// db.position(0); -// sb.position(0); +// opener.destroy(); +// +// } +// +// } +// +// // ensure dst buffer is copy of src +// private void syncBuffers(final IBufferAccess src, final IBufferAccess dst) { +// +// final ByteBuffer sb = src.buffer(); //.duplicate(); +// final ByteBuffer db = dst.buffer(); //.duplicate(); +// +//// db.position(0); +//// sb.position(0); +//// +//// db.put(sb/* src */); +//// +//// db.position(0); +//// sb.position(0); +// +// int sp = sb.position(); +// int sl = sb.limit(); +// sb.position(0); +// db.position(0); +// sb.limit(sp); +//// db.limit(sp); +// db.put(sb); +// sb.position(sp); +// db.position(sp); +// sb.limit(sl); +// db.limit(sl); +// +// } - int sp = sb.position(); - int sl = sb.limit(); - sb.position(0); - db.position(0); - sb.limit(sp); -// db.limit(sp); - db.put(sb); - sb.position(sp); - db.position(sp); - sb.limit(sl); - db.limit(sl); - - } - /* * Now generate randomviews, first an ordered view of 10000 random lengths */ Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2013-02-08 16:17:49 UTC (rev 6896) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/io/writecache/TestWriteCache.java 2013-02-11 12:37:06 UTC (rev 6897) @@ -923,124 +923,124 @@ } - /** - * To test the buffer restore, we will share a buffer between two WriteCache - * instances then write data to the first cache and update its recordMap - * from the buffer. This short circuits the HA pipeline that streams the - * ByteBuffer from one cache to the other. - * - * FIXME This is only testing a single addr. We need to test a bunch of - * writes, not just one. - */ - public void test_writeCacheScatteredBufferRestore() throws InterruptedException, IOException { - final File file = File.createTempFile(getName(), ".tmp"); - final ReopenFileChannel opener = new ReopenFileChannel(file, mode); - try { - - final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); - final IBufferAccess buf2 = DirectBufferPool.INSTANCE.acquire(); - try { - - final long addr1 = 12800; - - final ByteBuffer data1 = getRandomData(20 * 1024); - - final int chk1 = ChecksumUtility.threadChk.get().checksum( - data1, 0/* offset */, data1.limit()); - - final ByteBuffer data2 = getRandomData(20 * 1024); - - final int chk2 = ChecksumUtility.threadChk.get().checksum( - data2, 0/* offset */, data2.limit()); - - final WriteCache cache1 = new WriteCache.FileChannelScatteredWriteCache( - buf, true/* useChecksums */, - true/* isHighlyAvailable */, false/* bufferHasData */, - opener, 0L/* fileExtent */, null/* BufferedWrite */); - - final WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache( - buf2, true/* useChecksums */, - true/* isHighlyAvailable */, false/* bufferHasData */, - opener, 0L/* fileExtent */, null/* BufferedWrite */); - - // write first data buffer - cache1.write(addr1, data1, chk1); - data1.flip(); - /* - * FIXME This is only testing a single address copy. We need to - * test much more than that. - */ - syncBuffers(buf, buf2); - assertEquals(buf.buffer(), buf2.buffer()); - buf2.buffer().flip(); - cache2.resetRecordMapFromBuffer(); - assertEquals(cache1.read(addr1, data1.capacity()), data1); - if (cache2.read(addr1, data1.capacity()) == null) - fail("Nothing in replicated cache?"); - assertEquals(cache2.read(addr1, data1.capacity()), data1); - - // now simulate removal/delete - cache1.clearAddrMap(addr1, 0/*latchedAddr*/); - - syncBuffers(buf, buf2); - - cache2.resetRecordMapFromBuffer(); - - assertTrue(cache1.read(addr1, data1.capacity()) == null); - assertTrue(cache2.read(addr1, data1.capacity()) == null); - - // now write second data buffer - cache1.write(addr1, data2, chk2); - // data2.flip(); - // buf2.buffer().limit(buf.buffer().position()); - syncBuffers(buf, buf2); - cache2.resetRecordMapFromBuffer(); - assertEquals(cache2.read(addr1, data1.capacity()), data2); - assertEquals(cache1.read(addr1, data1.capacity()), data2); - - } finally { - - buf.release(); - buf2.release(); - - } - - } finally { - - opener.destroy(); - - } - - } - - // ensure dst buffer is copy of src - private void syncBuffers(final IBufferAccess src, final IBufferAccess dst) { - - final ByteBuffer sb = src.buffer(); //.duplicate(); - final ByteBuffer db = dst.buffer(); //.duplicate(); - -// db.position(0); -// sb.position(0); +// /** +// * To test the buffer restore, we will share a buffer between two WriteCache +// * instances then write data to the first cache and update its recordMap +// * from the buffer. This short circuits the HA pipeline that streams the +// * ByteBuffer from one cache to the other. +// * +// * FIXME This is only testing a single addr. We need to test a bunch of +// * writes, not just one. (Also, this test is broken. MC needs to fix.) +// */ +// public void test_writeCacheScatteredBufferRestore() throws InterruptedException, IOException { +// final File file = File.createTempFile(getName(), ".tmp"); +// final ReopenFileChannel opener = new ReopenFileChannel(file, mode); +// try { +// +// final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); +// final IBufferAccess buf2 = DirectBufferPool.INSTANCE.acquire(); +// try { +// +// final long addr1 = 12800; +// +// final ByteBuffer data1 = getRandomData(20 * 1024); +// +// final int chk1 = ChecksumUtility.threadChk.get().checksum( +// data1, 0/* offset */, data1.limit()); +// +// final ByteBuffer data2 = getRandomData(20 * 1024); +// +// final int chk2 = ChecksumUtility.threadChk.get().checksum( +// data2, 0/* offset */, data2.limit()); +// +// final WriteCache cache1 = new WriteCache.FileChannelScatteredWriteCache( +// buf, true/* useChecksums */, +// true/* isHighlyAvailable */, false/* bufferHasData */, +// opener, 0L/* fileExtent */, null/* BufferedWrite */); +// +// final WriteCache cache2 = new WriteCache.FileChannelScatteredWriteCache( +// buf2, true/* useChecksums */, +// true/* isHighlyAvailable */, false/* bufferHasData */, +// opener, 0L/* fileExtent */, null/* BufferedWrite */); +// +// // write first data buffer +// cache1.write(addr1, data1, chk1); +// data1.flip(); +// /* +// * FIXME This is only testing a single address copy. We need to +// * test much more than that. +// */ +// syncBuffers(buf, buf2); +// assertEquals(buf.buffer(), buf2.buffer()); +// buf2.buffer().flip(); +// cache2.resetRecordMapFromBuffer(); +// assertEquals(cache1.read(addr1, data1.capacity()), data1); +// if (cache2.read(addr1, data1.capacity()) == null) +// fail("Nothing in replicated cache?"); +// assertEquals(cache2.read(addr1, data1.capacity()), data1); +// +// // now simulate removal/delete +// cache1.clearAddrMap(addr1, 0/*latchedAddr*/); +// +// syncBuffers(buf, buf2); +// +// cache2.resetRecordMapFromBuffer(); +// +// assertTrue(cache1.read(addr1, data1.capacity()) == null); +// assertTrue(cache2.read(addr1, data1.capacity()) == null); +// +// // now write second data buffer +// cache1.write(addr1, data2, chk2); +// // data2.flip(); +// // buf2.buffer().limit(buf.buffer().position()); +// syncBuffers(buf, buf2); +// cache2.resetRecordMapFromBuffer(); +// assertEquals(cache2.read(addr1, data1.capacity()), data2); +// assertEquals(cache1.read(addr1, data1.capacity()), data2); // -// db.put(sb/* src */); +// } finally { +// +// buf.release(); +// buf2.release(); +// +// } +// +// } finally { // -// db.position(0); -// sb.position(0); +// opener.destroy(); +// +// } +// +// } +// +// // ensure dst buffer is copy of src +// private void syncBuffers(final IBufferAccess src, final IBufferAccess dst) { +// +// final ByteBuffer sb = src.buffer(); //.duplicate(); +// final ByteBuffer db = dst.buffer(); //.duplicate(); +// +//// db.position(0); +//// sb.position(0); +//// +//// db.put(sb/* src */); +//// +//// db.position(0); +//// sb.position(0); +// +// int sp = sb.position(); +// int sl = sb.limit(); +// sb.position(0); +// db.position(0); +// sb.limit(sp); +//// db.limit(sp); +// db.put(sb); +// sb.position(sp); +// db.position(sp); +// sb.limit(sl); +// db.limit(sl); +// +// } - int sp = sb.position(); - int sl = sb.limit(); - sb.position(0); - db.position(0); - sb.limit(sp); -// db.limit(sp); - db.put(sb); - sb.position(sp); - db.position(sp); - sb.limit(sl); - db.limit(sl); - - } - /* * Now generate randomviews, first an ordered view of 10000 random lengths */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-03-15 22:31:25
|
Revision: 7002 http://bigdata.svn.sourceforge.net/bigdata/?rev=7002&view=rev Author: thompsonbry Date: 2013-03-15 22:31:14 +0000 (Fri, 15 Mar 2013) Log Message: ----------- Applied the same change to the READ_CACHE branch. Also, restored WriteCache.acquire() and WriteCache.release() as private methods. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-03-15 22:27:45 UTC (rev 7001) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-03-15 22:31:14 UTC (rev 7002) @@ -165,7 +165,7 @@ * @throws IllegalStateException * if the {@link WriteCache} is closed. */ - ByteBuffer acquire() throws InterruptedException, IllegalStateException { + private ByteBuffer acquire() throws InterruptedException, IllegalStateException { final Lock readLock = lock.readLock(); @@ -208,7 +208,7 @@ /** * Release the read lock on an acquired {@link ByteBuffer}. */ - void release() { + private void release() { lock.readLock().unlock(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-03-15 22:27:45 UTC (rev 7001) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-03-15 22:31:14 UTC (rev 7002) @@ -166,7 +166,7 @@ * @throws IllegalStateException * if the {@link WriteCache} is closed. */ - ByteBuffer acquire() throws InterruptedException, IllegalStateException { + private ByteBuffer acquire() throws InterruptedException, IllegalStateException { final Lock readLock = lock.readLock(); @@ -209,7 +209,7 @@ /** * Release the read lock on an acquired {@link ByteBuffer}. */ - void release() { + private void release() { lock.readLock().unlock(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-03-15 22:27:45 UTC (rev 7001) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-03-15 22:31:14 UTC (rev 7002) @@ -3757,19 +3757,19 @@ } cache.transferLock.lock(); try { - /** - * Note: The tests below require us to take the read lock on - * the WriteCache before we test the serviceMap again in - * order to guard against a concurrent reset() of the - * WriteCache. - * - * @see <a href= - * "https://sourceforge.net/apps/trac/bigdata/ticket/654" - * Rare AssertionError in WriteCache.clearAddrMap() - * </a> - */ - cache.acquire(); - try { +// /** +// * Note: The tests below require us to take the read lock on +// * the WriteCache before we test the serviceMap again in +// * order to guard against a concurrent reset() of the +// * WriteCache. +// * +// * @see <a href= +// * "https://sourceforge.net/apps/trac/bigdata/ticket/654" +// * Rare AssertionError in WriteCache.clearAddrMap() +// * </a> +// */ +// cache.acquire(); +// try { final WriteCache cache2 = serviceMap.get(offset); if (cache2 != cache) { /* @@ -3785,17 +3785,26 @@ // Remove entry from the recordMap. final WriteCache oldValue = serviceMap.remove(offset); + if (oldValue == null) { + /** + * Note: The [WriteCache.transferLock] protects the + * WriteCache against a concurrent transfer of a record + * in WriteCache.transferTo(). However, + * WriteCache.resetWith() does NOT take the + * transferLock. Therefore, it is possible (and valid) + * for the [recordMap] entry to be cleared to [null] for + * this record by a concurrent resetWith() call. + * + * @see <a href= + * "https://sourceforge.net/apps/trac/bigdata/ticket/654" + * Rare AssertionError in WriteCache.clearAddrMap() + * </a> + */ + continue; + } if (oldValue != cache) { /* * Concurrent modification! - * - * Note: The [WriteCache.transferLock] protects the - * WriteCache against a concurrent transfer of a record - * in WriteCache.transferTo(). However, - * WriteCache.resetWith() does NOT take the - * transferLock. Therefore, it is possible (and valid) - * for the [recordMap] entry to be cleared to [null] for - * this record by a concurrent resetWith() call. */ throw new AssertionError("oldValue=" + oldValue + ", cache=" + cache + ", offset=" + offset @@ -3812,9 +3821,9 @@ debugAddrs(offset, 0, 'F'); return true; } - } finally { - cache.release(); - } +// } finally { +// cache.release(); +// } } finally { cache.transferLock.unlock(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-06-29 17:22:13
|
Revision: 7204 http://bigdata.svn.sourceforge.net/bigdata/?rev=7204&view=rev Author: thompsonbry Date: 2013-06-29 17:22:01 +0000 (Sat, 29 Jun 2013) Log Message: ----------- There are a few locations in RemoteRepositoryManager which have this issue. I have written a test in which the namespace contains characters that are not allowed in a URL without encoding and can demonstrate failures against that test. There are also places on the server where the namespace is not being encoded when generating a URL, e.g., for the VOID description of the available KBs and also it was failing to decode the namespace in BigdataRDFServlet#getNamespace(). Changes are to: - BigdataRDFServlet - VoID - MultiTenancyServlet - ConnectOptions - RemoteRepositoryManager - TestMultiTenancyServlet Commit is against both the 1.2.x and the READ_CACHE branches. @see https://sourceforge.net/apps/trac/bigdata/ticket/689 (Missing URL encoding in RemoteRepositoryManager) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -33,6 +33,8 @@ import java.io.PipedOutputStream; import java.io.PrintWriter; import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; import java.util.Iterator; import java.util.Properties; @@ -285,6 +287,9 @@ * The URI path string. * * @return The namespace. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ protected String getNamespace(final HttpServletRequest req) { @@ -326,7 +331,14 @@ } // return the namespace. - return uri.substring(beginIndex + 1, endIndex); + final String t = uri.substring(beginIndex + 1, endIndex); + String namespace; + try { + namespace = URLDecoder.decode(t, UTF8); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + return namespace; } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -46,6 +46,7 @@ import com.bigdata.rdf.properties.PropertiesParserRegistry; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.ScaleOutTripleStore; import com.bigdata.service.IBigdataFederation; @@ -125,6 +126,9 @@ /** * Delete the KB associated with the effective namespace. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ @Override protected void doDelete(final HttpServletRequest req, @@ -137,7 +141,8 @@ final String namespace = getNamespace(req); - if (req.getRequestURI().endsWith("/namespace/" + namespace)) { + if (req.getRequestURI().endsWith( + "/namespace/" + ConnectOptions.urlEncode(namespace))) { // Delete that namespace. doDeleteNamespace(req, resp); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -46,6 +46,7 @@ import com.bigdata.rdf.model.BigdataResource; import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.spo.SPOKeyOrder; import com.bigdata.rdf.spo.SPORelation; import com.bigdata.rdf.store.AbstractTripleStore; @@ -173,11 +174,17 @@ // Also present the namespace in an unambiguous manner. g.add(aDataset, SD.KB_NAMESPACE, f.createLiteral(namespace)); - /* + /** * Service end point for this namespace. + * + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ - g.add(aDataset, VoidVocabularyDecl.sparqlEndpoint, - f.createURI(serviceURI + "/" + namespace + "/sparql")); + g.add(aDataset, + VoidVocabularyDecl.sparqlEndpoint, + f.createURI(serviceURI + "/" + + ConnectOptions.urlEncode(namespace) + "/sparql")); // any URI is considered to be an entity. g.add(aDataset, VoidVocabularyDecl.uriRegexPattern, Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -241,6 +241,34 @@ } + /** + * Apply a UTF8 encoding to a component of a URL. + * + * @param in + * The text to be encoded. + * + * @return The UTF8 encoding of that text. + * + * @throws RuntimeException + * if the {@link RemoteRepository#UTF8} encoding is not + * available. + * @throws NullPointerException + * if the argument is <code>null</code>. + */ + public static String urlEncode(final String in) { + try { + + final String out = URLEncoder.encode(in, RemoteRepository.UTF8); + + return out; + + } catch (UnsupportedEncodingException e) { + + throw new RuntimeException(e); + + } + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -102,6 +102,24 @@ } /** + * Return the base URL for a remote repository (less the /sparql path + * component). + * + * @param namespace + * The namespace. + * + * @return The base URL. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> + */ + protected String getRepositoryBaseURLForNamespace(final String namespace) { + + return baseServiceURL + "/namespace/" + + ConnectOptions.urlEncode(namespace); + } + + /** * Obtain a {@link RemoteRepository} for a data set managed by the remote * service. * @@ -112,9 +130,9 @@ */ public RemoteRepository getRepositoryForNamespace(final String namespace) { - return new RemoteRepository(baseServiceURL + "/namespace/" + namespace + return new RemoteRepository(getRepositoryBaseURLForNamespace(namespace) + "/sparql", httpClient, executor); - + } /** @@ -220,8 +238,7 @@ */ public void deleteRepository(final String namespace) throws Exception { - final ConnectOptions opts = newConnectOptions(baseServiceURL - + "/namespace/" + namespace); + final ConnectOptions opts = newConnectOptions(getRepositoryBaseURLForNamespace(namespace)); opts.method = "DELETE"; @@ -251,8 +268,8 @@ public Properties getRepositoryProperties(final String namespace) throws Exception { - final ConnectOptions opts = newConnectOptions(baseServiceURL - + "/namespace/" + namespace + "/properties"); + final ConnectOptions opts = newConnectOptions(getRepositoryBaseURLForNamespace(namespace) + + "/properties"); opts.method = "GET"; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -287,12 +287,53 @@ */ final String namespace2 = "kb2-" + UUID.randomUUID(); + doTestCreate(namespace2); + + } + + /** + * Test for correct URL encoding of the namespace in the URL requests. + * + * @throws Exception + */ + public void test_create02() throws Exception { + + /* + * Create a new data set. The namespace incorporates a UUID in case we + * are running against a server rather than an embedded per-test target. + * The properties are mostly inherited from the default configuration, + * but the namespace of the new data set is explicitly set for the + * CREATE operation. + */ + final String namespace2 = "kb2-" + UUID.randomUUID() + "-&/<>-foo"; + + doTestCreate(namespace2); + + } + + private void doTestCreate(final String namespace2) throws Exception { + final Properties properties = new Properties(); properties.setProperty(BigdataSail.Options.NAMESPACE, namespace2); + { // verify does not exist. + try { + m_repo.getRepositoryProperties(namespace2); + fail("Should not exist: " + namespace2); + } catch (HttpException ex) { + // Expected status code. + assertEquals(404,ex.getStatusCode()); + } + } + m_repo.createRepository(namespace2, properties); + { // verify exists. + final Properties p = m_repo.getRepositoryProperties(namespace2); + assertNotNull(p); + } + /* * Verify error if attempting to create a KB for a namespace which * already exists. Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServlet.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -33,6 +33,8 @@ import java.io.PipedOutputStream; import java.io.PrintWriter; import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; import java.util.Iterator; import java.util.Properties; @@ -285,6 +287,9 @@ * The URI path string. * * @return The namespace. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ protected String getNamespace(final HttpServletRequest req) { @@ -326,8 +331,15 @@ } // return the namespace. - return uri.substring(beginIndex + 1, endIndex); - + final String t = uri.substring(beginIndex + 1, endIndex); + String namespace; + try { + namespace = URLDecoder.decode(t, UTF8); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + return namespace; + } /** Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -46,6 +46,7 @@ import com.bigdata.rdf.properties.PropertiesParserRegistry; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.ScaleOutTripleStore; import com.bigdata.service.IBigdataFederation; @@ -125,6 +126,9 @@ /** * Delete the KB associated with the effective namespace. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ @Override protected void doDelete(final HttpServletRequest req, @@ -137,7 +141,8 @@ final String namespace = getNamespace(req); - if (req.getRequestURI().endsWith("/namespace/" + namespace)) { + if (req.getRequestURI().endsWith( + "/namespace/" + ConnectOptions.urlEncode(namespace))) { // Delete that namespace. doDeleteNamespace(req, resp); Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/VoID.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -46,6 +46,7 @@ import com.bigdata.rdf.model.BigdataResource; import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.spo.SPOKeyOrder; import com.bigdata.rdf.spo.SPORelation; import com.bigdata.rdf.store.AbstractTripleStore; @@ -173,11 +174,17 @@ // Also present the namespace in an unambiguous manner. g.add(aDataset, SD.KB_NAMESPACE, f.createLiteral(namespace)); - /* + /** * Service end point for this namespace. + * + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> */ - g.add(aDataset, VoidVocabularyDecl.sparqlEndpoint, - f.createURI(serviceURI + "/" + namespace + "/sparql")); + g.add(aDataset, + VoidVocabularyDecl.sparqlEndpoint, + f.createURI(serviceURI + "/" + + ConnectOptions.urlEncode(namespace) + "/sparql")); // any URI is considered to be an entity. g.add(aDataset, VoidVocabularyDecl.uriRegexPattern, Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -241,6 +241,34 @@ } + /** + * Apply a UTF8 encoding to a component of a URL. + * + * @param in + * The text to be encoded. + * + * @return The UTF8 encoding of that text. + * + * @throws RuntimeException + * if the {@link RemoteRepository#UTF8} encoding is not + * available. + * @throws NullPointerException + * if the argument is <code>null</code>. + */ + public static String urlEncode(final String in) { + try { + + final String out = URLEncoder.encode(in, RemoteRepository.UTF8); + + return out; + + } catch (UnsupportedEncodingException e) { + + throw new RuntimeException(e); + + } + } + } Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -102,17 +102,35 @@ } /** + * Return the base URL for a remote repository (less the /sparql path + * component). + * + * @param namespace + * The namespace. + * + * @return The base URL. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/689" > + * Missing URL encoding in RemoteRepositoryManager </a> + */ + protected String getRepositoryBaseURLForNamespace(final String namespace) { + + return baseServiceURL + "/namespace/" + + ConnectOptions.urlEncode(namespace); + } + + /** * Obtain a {@link RemoteRepository} for a data set managed by the remote * service. * * @param namespace * The name of the data set (its bigdata namespace). - * + * * @return An interface which may be used to talk to that data set. */ public RemoteRepository getRepositoryForNamespace(final String namespace) { - return new RemoteRepository(baseServiceURL + "/namespace/" + namespace + return new RemoteRepository(getRepositoryBaseURLForNamespace(namespace) + "/sparql", httpClient, executor); } @@ -220,8 +238,7 @@ */ public void deleteRepository(final String namespace) throws Exception { - final ConnectOptions opts = newConnectOptions(baseServiceURL - + "/namespace/" + namespace); + final ConnectOptions opts = newConnectOptions(getRepositoryBaseURLForNamespace(namespace)); opts.method = "DELETE"; @@ -251,8 +268,8 @@ public Properties getRepositoryProperties(final String namespace) throws Exception { - final ConnectOptions opts = newConnectOptions(baseServiceURL - + "/namespace/" + namespace + "/properties"); + final ConnectOptions opts = newConnectOptions(getRepositoryBaseURLForNamespace(namespace) + + "/properties"); opts.method = "GET"; Modified: branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2013-06-29 13:52:49 UTC (rev 7203) +++ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestMultiTenancyAPI.java 2013-06-29 17:22:01 UTC (rev 7204) @@ -266,12 +266,53 @@ */ final String namespace2 = "kb2-" + UUID.randomUUID(); + doTestCreate(namespace2); + + } + + /** + * Test for correct URL encoding of the namespace in the URL requests. + * + * @throws Exception + */ + public void test_create02() throws Exception { + + /* + * Create a new data set. The namespace incorporates a UUID in case we + * are running against a server rather than an embedded per-test target. + * The properties are mostly inherited from the default configuration, + * but the namespace of the new data set is explicitly set for the + * CREATE operation. + */ + final String namespace2 = "kb2-" + UUID.randomUUID() + "-&/<>-foo"; + + doTestCreate(namespace2); + + } + + private void doTestCreate(final String namespace2) throws Exception { + final Properties properties = new Properties(); properties.setProperty(BigdataSail.Options.NAMESPACE, namespace2); + { // verify does not exist. + try { + m_repo.getRepositoryProperties(namespace2); + fail("Should not exist: " + namespace2); + } catch (HttpException ex) { + // Expected status code. + assertEquals(404,ex.getStatusCode()); + } + } + m_repo.createRepository(namespace2, properties); + { // verify exists. + final Properties p = m_repo.getRepositoryProperties(namespace2); + assertNotNull(p); + } + /* * Verify error if attempting to create a KB for a namespace which * already exists. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-06-29 20:21:47
|
Revision: 7205 http://bigdata.svn.sourceforge.net/bigdata/?rev=7205&view=rev Author: thompsonbry Date: 2013-06-29 20:21:35 +0000 (Sat, 29 Jun 2013) Log Message: ----------- A CI deadlock has been observed again for [1]. I am modifying the test to use a timeout (3 minutes). This way the test should fail if a transaction deadlock arises rather than causing CI to deadlock. [1] https://sourceforge.net/apps/trac/bigdata/ticket/237 (CI deadlock in com.bigdata.concurrent.TestLockManager.test_multipleResourceLocking_resources10_locktries10) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java branches/READ_CACHE/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java 2013-06-29 17:22:01 UTC (rev 7204) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java 2013-06-29 20:21:35 UTC (rev 7205) @@ -160,7 +160,7 @@ * behavior of tasks that lock only a single resource, eg., unisolated * operations on the {@link DataService}. */ - public Result doComparisonTest(Properties properties) throws Exception { + public Result doComparisonTest(final Properties properties) throws Exception { final long testTimeout = Integer.parseInt(properties.getProperty( TestOptions.TIMEOUT, TestOptions.DEFAULT_TIMEOUT)); @@ -210,10 +210,10 @@ assert maxLockTries >= 1; - ExecutorService execService = Executors.newFixedThreadPool(nthreads, + final ExecutorService execService = Executors.newFixedThreadPool(nthreads, DaemonThreadFactory.defaultThreadFactory()); - Collection<Callable<Object>> tasks = new ArrayList<Callable<Object>>( + final Collection<Callable<Object>> tasks = new ArrayList<Callable<Object>>( ntasks); LockManager<String> db = new LockManager<String>( @@ -231,7 +231,7 @@ } - Random r = new Random(); + final Random r = new Random(); // create tasks; each will use between minLocks and maxLocks distinct // resources. @@ -765,13 +765,19 @@ * <p> * Note: This condition provides the basis for deadlocks. In fact, since we * have 10 resource locks for each operation and only 100 operations the - * chances of a deadlock on any given operation are extremely high. + * chances of a deadlock on any given operation are extremely high. However, + * since we are predeclaring our locks and the lock requests are being + * sorted NO deadlocks should result. * * @throws Exception + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/237" > CI + * deadlock in + * com.bigdata.concurrent.TestLockManager.test_multipleResourceLocking_resources10_locktries10</a> */ public void test_multipleResourceLocking_resources10_locktries10() throws Exception { - Properties properties = new Properties(); + final Properties properties = new Properties(); properties.setProperty(TestOptions.NTHREADS,"20"); properties.setProperty(TestOptions.NTASKS,"1000"); @@ -781,6 +787,11 @@ properties.setProperty(TestOptions.MAX_LOCK_TRIES,"10"); properties.setProperty(TestOptions.PREDECLARE_LOCKS,"false"); properties.setProperty(TestOptions.SORT_LOCK_REQUESTS,"false"); + /* + * Note: A timeout was introduced in order to work cause this test to + * fail rather than deadlock. It very occasionally will deadlock in CI. + */ + properties.setProperty(TestOptions.TIMEOUT, Long.toString(3 * 60/* seconds */)); doComparisonTest(properties); Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java 2013-06-29 17:22:01 UTC (rev 7204) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/concurrent/TestLockManager.java 2013-06-29 20:21:35 UTC (rev 7205) @@ -160,7 +160,7 @@ * behavior of tasks that lock only a single resource, eg., unisolated * operations on the {@link DataService}. */ - public Result doComparisonTest(Properties properties) throws Exception { + public Result doComparisonTest(final Properties properties) throws Exception { final long testTimeout = Integer.parseInt(properties.getProperty( TestOptions.TIMEOUT, TestOptions.DEFAULT_TIMEOUT)); @@ -210,10 +210,10 @@ assert maxLockTries >= 1; - ExecutorService execService = Executors.newFixedThreadPool(nthreads, + final ExecutorService execService = Executors.newFixedThreadPool(nthreads, DaemonThreadFactory.defaultThreadFactory()); - Collection<Callable<Object>> tasks = new ArrayList<Callable<Object>>( + final Collection<Callable<Object>> tasks = new ArrayList<Callable<Object>>( ntasks); LockManager<String> db = new LockManager<String>( @@ -231,7 +231,7 @@ } - Random r = new Random(); + final Random r = new Random(); // create tasks; each will use between minLocks and maxLocks distinct // resources. @@ -737,6 +737,10 @@ * sorted NO deadlocks should result. * * @throws Exception + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/237" > CI + * deadlock in + * com.bigdata.concurrent.TestLockManager.test_multipleResourceLocking_resources10_locktries10</a> */ public void test_multipleResourceLocking_resources10_locktries10_predeclareLocks() throws Exception { @@ -771,7 +775,7 @@ */ public void test_multipleResourceLocking_resources10_locktries10() throws Exception { - Properties properties = new Properties(); + final Properties properties = new Properties(); properties.setProperty(TestOptions.NTHREADS,"20"); properties.setProperty(TestOptions.NTASKS,"1000"); @@ -781,6 +785,11 @@ properties.setProperty(TestOptions.MAX_LOCK_TRIES,"10"); properties.setProperty(TestOptions.PREDECLARE_LOCKS,"false"); properties.setProperty(TestOptions.SORT_LOCK_REQUESTS,"false"); + /* + * Note: A timeout was introduced in order to work cause this test to + * fail rather than deadlock. It very occasionally will deadlock in CI. + */ + properties.setProperty(TestOptions.TIMEOUT, Long.toString(3 * 60/* seconds */)); doComparisonTest(properties); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-07-05 19:44:50
|
Revision: 7217 http://bigdata.svn.sourceforge.net/bigdata/?rev=7217&view=rev Author: thompsonbry Date: 2013-07-05 19:44:40 +0000 (Fri, 05 Jul 2013) Log Message: ----------- Branch to work through error state transitions. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java Added Paths: ----------- branches/READ_CACHE2/ Property changes on: branches/READ_CACHE2 ___________________________________________________________________ Added: svn:ignore + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI Added: svn:mergeinfo + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7213 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/QuorumService.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -141,4 +141,16 @@ void didMeet(final long token, final long commitCounter, final boolean isLeader); + /** + * Enter an error state. The error state should take whatever corrective + * actions are necessary in order to prepare the service for continued + * operations. + */ + void enterErrorState(); + + /** + * Discard all state associated with the current write set. + */ + void discardWriteSet(); + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -620,8 +620,13 @@ * Conditional remove iff file is open. Will not remove * something that has been closed. */ + if (haLog.isInfoEnabled()) + haLog.info("Will close: " + m_state.m_haLogFile); + m_state.forceCloseAll(); - + if(false||m_state.isCommitted()) return; // Do not remove a sealed HALog file! + if (haLog.isInfoEnabled()) + haLog.info("Will remove: " + m_state.m_haLogFile); if (m_state.m_haLogFile.exists() && !m_state.m_haLogFile.delete()) { /* Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HA2PhasePrepareMessage.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -130,7 +130,19 @@ return unit; } + /** + * {@inheritDoc} + * <p> + * Returns <code>false</code> by default + */ @Override + public boolean voteNo() { + + return false; + + } + + @Override public String toString() { return super.toString()+"{"// +"consensusReleaseTime="+getConsensusReleaseTime()// Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHA2PhasePrepareMessage.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -88,4 +88,10 @@ * The unit for the timeout. */ TimeUnit getUnit(); + + /** + * When <code>true</code>, always vote note. + */ + boolean voteNo(); + } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -2699,7 +2699,7 @@ * {@link ICommitRecord} from the root blocks of the store. */// TODO Could merge with doLocalAbort(). private void _abort() { - +log.warn("ABORT",new RuntimeException("ABORT")); final WriteLock lock = _fieldReadWriteLock.writeLock(); lock.lock(); @@ -2812,10 +2812,28 @@ // discard any hard references that might be cached. discardCommitters(); - // setup new committers, e.g., by reloading from their last root - // addr. - setupCommitters(); + /* + * Setup new committers, e.g., by reloading from their last root + * addr. + */ + setupCommitters(); + + if (quorum != null) { + + /* + * In HA, we need to tell the QuorumService that the database + * has done an abort() so it can discard any local state + * associated with the current write set (the HALog file and the + * last live HA message). + */ + + final QuorumService<HAGlue> localService = quorum.getClient(); + + localService.discardWriteSet(); + + } + if (log.isInfoEnabled()) log.info("done"); @@ -2825,7 +2843,7 @@ } - } + } /** * Rollback a journal to its previous commit point. @@ -3245,241 +3263,248 @@ commitLock.lock(); } try { - /* - * Call commit on buffer strategy prior to retrieving root block, - * required for RWStore since the metaBits allocations are not made - * until commit, leading to invalid addresses for recent store - * allocations. - * - * Note: This will flush the write cache. For HA, that ensures that - * the write set has been replicated to the followers. - * - * Note: After this, we do not write anything on the backing store - * other than the root block. The rest of this code is dedicated to - * creating a properly formed root block. For a non-HA deployment, - * we just lay down the root block. For an HA deployment, we do a - * 2-phase commit. - * - * Note: In HA, the followers lay down the replicated writes - * synchronously. Thus, they are guaranteed to be on local storage - * by the time the leader finishes WriteCacheService.flush(). This - * does not create much latency because the WriteCacheService drains - * the dirtyList in a seperate thread. - */ - _bufferStrategy.commit(); - - /* - * The next offset at which user data would be written. - * Calculated, after commit! - */ - nextOffset = _bufferStrategy.getNextOffset(); - - final long blockSequence; - - if (_bufferStrategy instanceof IHABufferStrategy) { - - // always available for HA. - blockSequence = ((IHABufferStrategy) _bufferStrategy) - .getBlockSequence(); - - } else { - - blockSequence = old.getBlockSequence(); - - } - - /* - * Prepare the new root block. - */ - final IRootBlockView newRootBlock; - { - - /* - * Update the firstCommitTime the first time a transaction - * commits and the lastCommitTime each time a transaction - * commits (these are commit timestamps of isolated or - * unisolated transactions). - */ - - final long firstCommitTime = (old.getFirstCommitTime() == 0L ? commitTime - : old.getFirstCommitTime()); - - final long priorCommitTime = old.getLastCommitTime(); - - if (priorCommitTime != 0L) { - - /* - * This is a local sanity check to make sure that the commit - * timestamps are strictly increasing. An error will be - * reported if the commit time for the current (un)isolated - * transaction is not strictly greater than the last commit - * time on the store as read back from the current root - * block. - */ - - assertPriorCommitTimeAdvances(commitTime, priorCommitTime); - - } - - final long lastCommitTime = commitTime; - final long metaStartAddr = _bufferStrategy.getMetaStartAddr(); - final long metaBitsAddr = _bufferStrategy.getMetaBitsAddr(); - - // Create the new root block. - newRootBlock = new RootBlockView(!old.isRootBlock0(), old - .getOffsetBits(), nextOffset, firstCommitTime, - lastCommitTime, newCommitCounter, commitRecordAddr, - commitRecordIndexAddr, old.getUUID(), // - blockSequence, commitToken,// - metaStartAddr, metaBitsAddr, old.getStoreType(), - old.getCreateTime(), old.getCloseTime(), - old.getVersion(), checker); - - } - - if (quorum == null) { - /* - * Non-HA mode. + * Call commit on buffer strategy prior to retrieving root block, + * required for RWStore since the metaBits allocations are not made + * until commit, leading to invalid addresses for recent store + * allocations. + * + * Note: This will flush the write cache. For HA, that ensures that + * the write set has been replicated to the followers. + * + * Note: After this, we do not write anything on the backing store + * other than the root block. The rest of this code is dedicated to + * creating a properly formed root block. For a non-HA deployment, + * we just lay down the root block. For an HA deployment, we do a + * 2-phase commit. + * + * Note: In HA, the followers lay down the replicated writes + * synchronously. Thus, they are guaranteed to be on local storage + * by the time the leader finishes WriteCacheService.flush(). This + * does not create much latency because the WriteCacheService drains + * the dirtyList in a seperate thread. */ - + _bufferStrategy.commit(); + /* - * Force application data to stable storage _before_ - * we update the root blocks. This option guarantees - * that the application data is stable on the disk - * before the atomic commit. Some operating systems - * and/or file systems may otherwise choose an - * ordered write with the consequence that the root - * blocks are laid down on the disk before the - * application data and a hard failure could result - * in the loss of application data addressed by the - * new root blocks (data loss on restart). - * - * Note: We do not force the file metadata to disk. - * If that is done, it will be done by a force() - * after we write the root block on the disk. + * The next offset at which user data would be written. + * Calculated, after commit! */ - if (doubleSync) { - - _bufferStrategy.force(false/* metadata */); - + nextOffset = _bufferStrategy.getNextOffset(); + + final long blockSequence; + + if (_bufferStrategy instanceof IHABufferStrategy) { + + // always available for HA. + blockSequence = ((IHABufferStrategy) _bufferStrategy) + .getBlockSequence(); + + } else { + + blockSequence = old.getBlockSequence(); + } - - // write the root block on to the backing store. - _bufferStrategy.writeRootBlock(newRootBlock, forceOnCommit); - - if (_bufferStrategy instanceof IRWStrategy) { - + + /* + * Prepare the new root block. + */ + final IRootBlockView newRootBlock; + { + + /* + * Update the firstCommitTime the first time a transaction + * commits and the lastCommitTime each time a transaction + * commits (these are commit timestamps of isolated or + * unisolated transactions). + */ + + final long firstCommitTime = (old.getFirstCommitTime() == 0L ? commitTime + : old.getFirstCommitTime()); + + final long priorCommitTime = old.getLastCommitTime(); + + if (priorCommitTime != 0L) { + + /* + * This is a local sanity check to make sure that the commit + * timestamps are strictly increasing. An error will be + * reported if the commit time for the current (un)isolated + * transaction is not strictly greater than the last commit + * time on the store as read back from the current root + * block. + */ + + assertPriorCommitTimeAdvances(commitTime, priorCommitTime); + + } + + final long lastCommitTime = commitTime; + final long metaStartAddr = _bufferStrategy.getMetaStartAddr(); + final long metaBitsAddr = _bufferStrategy.getMetaBitsAddr(); + + // Create the new root block. + newRootBlock = new RootBlockView(!old.isRootBlock0(), old + .getOffsetBits(), nextOffset, firstCommitTime, + lastCommitTime, newCommitCounter, commitRecordAddr, + commitRecordIndexAddr, old.getUUID(), // + blockSequence, commitToken,// + metaStartAddr, metaBitsAddr, old.getStoreType(), + old.getCreateTime(), old.getCloseTime(), + old.getVersion(), checker); + + } + + if (quorum == null) { + /* - * Now the root blocks are down we can commit any transient - * state. + * Non-HA mode. */ - - ((IRWStrategy) _bufferStrategy).postCommit(); - - } - - // set the new root block. - _rootBlock = newRootBlock; - - // reload the commit record from the new root block. - _commitRecord = _getCommitRecord(); - - if (txLog.isInfoEnabled()) - txLog.info("COMMIT: commitTime=" + commitTime); - - } else { - - /* - * HA mode. - * - * Note: We need to make an atomic decision here regarding - * whether a service is joined with the met quorum or not. This - * information will be propagated through the HA 2-phase prepare - * message so services will know how they must intepret the - * 2-phase prepare(), commit(), and abort() requests. The atomic - * decision is necessary in order to enforce a consistent role - * on a services that is resynchronizing and which might vote to - * join the quorum and enter the quorum asynchronously with - * respect to this decision point. - * - * TODO If necessary, we could also explicitly provide the zk - * version metadata for the znode that is the parent of the - * joined services. However, we would need an expanded interface - * to get that metadata from zookeeper out of the Quorum.. - */ - - boolean didVoteYes = false; - try { - - // Atomic decision point for joined vs non-joined services. - final IJoinedAndNonJoinedServices prepareJoinedAndNonJoinedServices = new JoinedAndNonJoinedServices( - quorum); - - final PrepareRequest req = new PrepareRequest(// - consensusReleaseTime,// - gatherJoinedAndNonJoinedServices,// - prepareJoinedAndNonJoinedServices,// - newRootBlock,// - quorumService.getPrepareTimeout(), // timeout - TimeUnit.MILLISECONDS// - ); - - // issue prepare request. - final PrepareResponse resp = quorumService - .prepare2Phase(req); - - if (haLog.isInfoEnabled()) - haLog.info(resp.toString()); - - if (resp.willCommit()) { - - didVoteYes = true; - - quorumService - .commit2Phase(new CommitRequest(req, resp)); - - } else { - - quorumService.abort2Phase(commitToken); - + + /* + * Force application data to stable storage _before_ + * we update the root blocks. This option guarantees + * that the application data is stable on the disk + * before the atomic commit. Some operating systems + * and/or file systems may otherwise choose an + * ordered write with the consequence that the root + * blocks are laid down on the disk before the + * application data and a hard failure could result + * in the loss of application data addressed by the + * new root blocks (data loss on restart). + * + * Note: We do not force the file metadata to disk. + * If that is done, it will be done by a force() + * after we write the root block on the disk. + */ + if (doubleSync) { + + _bufferStrategy.force(false/* metadata */); + } - - } catch (Throwable e) { - if (didVoteYes) { + + // write the root block on to the backing store. + _bufferStrategy.writeRootBlock(newRootBlock, forceOnCommit); + + if (_bufferStrategy instanceof IRWStrategy) { + /* - * The quorum voted to commit, but something went wrong. - * - * FIXME RESYNC : At this point the quorum is probably - * inconsistent in terms of their root blocks. Rather - * than attempting to send an abort() message to the - * quorum, we probably should force the leader to yield - * its role at which point the quorum will attempt to - * elect a new master and resynchronize. + * Now the root blocks are down we can commit any transient + * state. */ - if (quorumService != null) { - try { - quorumService.abort2Phase(commitToken); - } catch (Throwable t) { - log.warn(t, t); + + ((IRWStrategy) _bufferStrategy).postCommit(); + + } + + // set the new root block. + _rootBlock = newRootBlock; + + // reload the commit record from the new root block. + _commitRecord = _getCommitRecord(); + + if (txLog.isInfoEnabled()) + txLog.info("COMMIT: commitTime=" + commitTime); + + } else { + + /* + * HA mode. + * + * Note: We need to make an atomic decision here regarding + * whether a service is joined with the met quorum or not. This + * information will be propagated through the HA 2-phase prepare + * message so services will know how they must intepret the + * 2-phase prepare(), commit(), and abort() requests. The atomic + * decision is necessary in order to enforce a consistent role + * on a services that is resynchronizing and which might vote to + * join the quorum and enter the quorum asynchronously with + * respect to this decision point. + * + * TODO If necessary, we could also explicitly provide the zk + * version metadata for the znode that is the parent of the + * joined services. However, we would need an expanded interface + * to get that metadata from zookeeper out of the Quorum.. + */ + + boolean didVoteYes = false; + try { + + // Atomic decision point for joined vs non-joined services. + final IJoinedAndNonJoinedServices prepareJoinedAndNonJoinedServices = new JoinedAndNonJoinedServices( + quorum); + + final PrepareRequest req = new PrepareRequest(// + consensusReleaseTime,// + gatherJoinedAndNonJoinedServices,// + prepareJoinedAndNonJoinedServices,// + newRootBlock,// + quorumService.getPrepareTimeout(), // timeout + TimeUnit.MILLISECONDS// + ); + + // issue prepare request. + final PrepareResponse resp = quorumService + .prepare2Phase(req); + + if (haLog.isInfoEnabled()) + haLog.info(resp.toString()); + + if (resp.willCommit()) { + + didVoteYes = true; + + quorumService + .commit2Phase(new CommitRequest(req, resp)); + + } else { + + /* + * TODO We only need to issue the 2-phase abort + * against those services that (a) were joined with + * the met quorum; and (b) voted YES in response to + * the PREPARE message. + */ + + quorumService.abort2Phase(commitToken); + + } + + } catch (Throwable e) { + if (didVoteYes) { + /* + * The quorum voted to commit, but something went wrong. + * + * FIXME RESYNC : At this point the quorum is probably + * inconsistent in terms of their root blocks. Rather + * than attempting to send an abort() message to the + * quorum, we probably should force the leader to yield + * its role at which point the quorum will attempt to + * elect a new master and resynchronize. + */ + if (quorumService != null) { + try { + quorumService.abort2Phase(commitToken); + } catch (Throwable t) { + log.warn(t, t); + } } + } else { + /* + * This exception was thrown during the abort handling + * logic. Note that we already attempting an 2-phase + * abort since the quorum did not vote "yes". + * + * TODO We should probably force a quorum break since + * there is clearly something wrong with the lines of + * communication among the nodes. + */ } - } else { - /* - * This exception was thrown during the abort handling - * logic. Note that we already attempting an 2-phase - * abort since the quorum did not vote "yes". - * - * TODO We should probably force a quorum break since - * there is clearly something wrong with the lines of - * communication among the nodes. - */ + throw new RuntimeException(e); } - throw new RuntimeException(e); - } + + } // else HA mode - } // else HA mode - } finally { if(commitLock != null) { /* @@ -5402,10 +5427,21 @@ } else { +// /* +// * No change in state. +// */ +// +// log.warn("No change"// +// + ": qorumToken(" + oldValue + " => " + newValue + ")"// +// + ", haReadyToken(" + haReadyToken + ")"// +// ); + didBreak = false; didMeet = false; didJoinMetQuorum = false; didLeaveMetQuorum = false; + + return; } @@ -5868,9 +5904,10 @@ } /** - * Local commit protocol (HA). + * Local commit protocol (HA). This exists to do a non-2-phase abort + * in HA. */ - protected void doLocalAbort() { + final public void doLocalAbort() { _abort(); @@ -6492,24 +6529,38 @@ // Vote NO. vote.set(false); - - doRejectedCommit(); - + + final IHA2PhasePrepareMessage req = prepareRequest.get(); + + doLocalAbort(); + + if (req.isJoinedService()) { + + /* + * Force a service that was joined at the atomic decision + * point of the 2-phase commit protocol to do a service + * leave. + */ + + quorum.getClient().enterErrorState(); + + } + return vote.get(); } } // class VoteNoTask - /** - * Method must be extended by subclass to coordinate the rejected - * commit. - */ - protected void doRejectedCommit() { - - doLocalAbort(); - - } +// /** +// * Method must be extended by subclass to coordinate the rejected +// * commit. +// */ +// protected void doRejectedCommit() { +// +// doLocalAbort(); +// +// } /** * Task prepares for a 2-phase commit (syncs to the disk) and votes YES @@ -6717,9 +6768,19 @@ } + if (prepareMessage.voteNo()) { + + /* + * Hook allows the test suite to force a NO vote. + */ + + throw new RuntimeException("Force NO vote"); + + } + // Vote YES. vote.set(true); - + return vote.get(); } finally { @@ -6728,7 +6789,13 @@ /* * Throw away our local write set. */ - doRejectedCommit(); + doLocalAbort(); + /* + * Since the service refuses the commit, we want it to + * enter an error state and then figure out whether it + * needs to resynchronize with the quorum. + */ + quorum.getClient().enterErrorState(); } } Modified: branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -487,6 +487,16 @@ throw new UnsupportedOperationException(); } + @Override + public void enterErrorState() { + // TODO Auto-generated method stub + } + + @Override + public void discardWriteSet() { + // TODO Auto-generated method stub + } + }; } Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -474,10 +474,11 @@ * observes a quorum break or similar event. This is just being * proactive. * - * FIXME This will not be called if the quorum remains met but the + * done. This will not be called if the quorum remains met but the * local service leaves the quorum. However, we should still cancel * a running snapshot if that occurs (if we add a serviceLeave() - * handle then this will fix that). + * handler then this will fix that). [there is no a serviceLeave() + * handler in HAJournalServer.] */ final Future<IHASnapshotResponse> ft = getSnapshotManager() @@ -654,20 +655,20 @@ } - /** - * {@inheritDoc} - * <p> - * Extended to expose this method to the {@link HAQuorumService}. - */ - @Override - protected void doLocalAbort() { - - // Clear the last live message out. - haLogNexus.lastLiveHAWriteMessage = null; - - super.doLocalAbort(); - - } +// /** +// * {@inheritDoc} +// * <p> +// * Extended to expose this method to the {@link HAQuorumService}. +// */ +// @Override +// protected void doLocalAbort() { +// +// // Clear the last live message out. +// haLogNexus.lastLiveHAWriteMessage = null; +// +// super.doLocalAbort(); +// +// } /** * Extended implementation supports RMI. @@ -1867,21 +1868,21 @@ } - /** - * {@inheritDoc} - * <p> - * Extended to kick the {@link HAJournalServer} into an error state. It - * will recover from that error state by re-entering seek consensus. - */ - @Override - protected void doRejectedCommit() { +// /** +// * {@inheritDoc} +// * <p> +// * Extended to kick the {@link HAJournalServer} into an error state. It +// * will recover from that error state by re-entering seek consensus. +// */ +// @Override +// protected void doRejectedCommit() { +// +// super.doRejectedCommit(); +// +// getQuorumService().enterErrorState(); +// +// } - super.doRejectedCommit(); - - getQuorumService().enterErrorState(); - - } - /** * Return this quorum member, appropriately cast. * Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -1002,9 +1002,23 @@ */ private final AtomicReference<FutureTask<Void>> runStateFutureRef = new AtomicReference<FutureTask<Void>>(/*null*/); + /** + * The {@link RunStateEnum} for the current executing task. This is set + * when the task actually begins to execute in its + * {@link RunStateCallable#doRun()} method. + */ private final AtomicReference<RunStateEnum> runStateRef = new AtomicReference<RunStateEnum>( null/* none */); + /** + * The {@link RunStateEnum} for the last task submitted. This is used by + * {@link #enterRunState(RunStateCallable)} to close a concurrency gap + * where the last submitted task has not yet begun to execute and + * {@link #runStateRef} has therefore not yet been updated. + */ + private final AtomicReference<RunStateEnum> lastSubmittedRunStateRef = new AtomicReference<RunStateEnum>( + null/* none */); + /* * Exposed to HAJournal.HAGlueService. */ @@ -1104,7 +1118,7 @@ * the error task to interrupt itself). */ - enterRunState(new ErrorTask()); + enterErrorState();// enterRunState(new ErrorTask()); } @@ -1180,35 +1194,21 @@ } // RunStateCallable /** - * Transition to {@link RunStateEnum#Error}. + * {@inheritDoc} * <p> - * Note: if the current {@link Thread} is a {@link Thread} executing one - * of the {@link RunStateCallable#doRun()} methods, then it will be - * <strong>interrupted</strong> when entering the new run state. Thus, - * the caller MAY observe an {@link InterruptedException} in their - * thread, but only if they are being run out of - * {@link RunStateCallable}. + * Note: Invoked from {@link AbstractJournal#doLocalAbort()}. */ - void enterErrorState() { - - /* - * Do synchronous service leave. - */ - - log.warn("Will do SERVICE LEAVE"); + @Override + public void discardWriteSet() { - serviceLeave(); - - /* - * Update the haReadyTokena and haStatus regardless of whether the - * quorum token has changed since this service is no longer joined - * with a met quorum. - */ - journal.setQuorumToken(getQuorum().token()); - logLock.lock(); try { - if (journal.getHALogNexus().isHALogOpen()) { + log.warn(""); + + // Clear the last live message out. + journal.getHALogNexus().lastLiveHAWriteMessage = null; + + if (false&&journal.getHALogNexus().isHALogOpen()) { /* * Note: Closing the HALog is necessary for us to be able to * re-enter SeekConsensus without violating a pre-condition @@ -1224,12 +1224,22 @@ logLock.unlock(); } - /* - * Transition into the error state. - * - * Note: This can cause the current Thread to be interrupted if it - * is the Thread executing one of the RunStateCallable classes. - */ + } + + /** + * {@inheritDoc} + * <p> + * Transition to {@link RunStateEnum#Error}. + * <p> + * Note: if the current {@link Thread} is a {@link Thread} executing one + * of the {@link RunStateCallable#doRun()} methods, then it will be + * <strong>interrupted</strong> when entering the new run state (but we + * will not re-enter the current active state). Thus, the caller MAY + * observe an {@link InterruptedException} in their thread, but only if + * they are being run out of {@link RunStateCallable}. + */ + @Override + public void enterErrorState() { enterRunState(new ErrorTask()); @@ -1292,18 +1302,34 @@ } /** - * Change the run state. + * Change the run state (but it will not re-enter the currently active + * state). * * @param runStateTask * The task for the new run state. + * + * @return The {@link Future} of the newly submitted run state -or- + * <code>null</code> if the service is already in that run + * state. */ - private Future<Void> enterRunState(final RunStateCallable<Void> runStateTask) { + private Future<Void> enterRunState( + final RunStateCallable<Void> runStateTask) { if (runStateTask == null) throw new IllegalArgumentException(); synchronized (runStateRef) { + if (runStateTask.runState + .equals(lastSubmittedRunStateRef.get())) { + + haLog.warn("Will not reenter active run state: " + + runStateTask.runState); + + return null; + + } + final FutureTask<Void> ft = new FutureTaskMon<Void>( runStateTask); @@ -1314,6 +1340,9 @@ try { runStateFutureRef.set(ft); + + // set before we submit the task. + lastSubmittedRunStateRef.set(runStateTask.runState); // submit future task. journal.getExecutorService().submit(ft); @@ -1336,10 +1365,14 @@ if (!success) { + log.error("Unable to submit task: " + runStateTask); + ft.cancel(true/* interruptIfRunning */); runStateFutureRef.set(null); + lastSubmittedRunStateRef.set(null); + } } @@ -1455,6 +1488,16 @@ } + /* + * QUORUM EVENT HANDLERS + * + * Note: DO NOT write event handlers that submit event transitions to + * any state other than the ERROR state. The ERROR state will eventually + * transition to SeekConsensus. Once we are no longer in the ERROR + * state, the states will naturally transition among themselves (until + * the next serviceLeave(), quorumBreak(), etc.) + */ + @Override public void quorumMeet(final long token, final UUID leaderId) { @@ -1506,31 +1549,10 @@ // Submit task to handle this event. server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( - new QuorumBreakTask())); + new EnterErrorStateTask())); } - private class QuorumBreakTask implements Callable<Void> { - public Void call() throws Exception { - /* - * Note: I have removed this line. It arrived without - * documentation and I can not find any reason why we should - * have to do a service leave here. The quorum will - * automatically issue service leaves. - */ -// getQuorum().getActor().serviceLeave(); - - journal.setQuorumToken(Quorum.NO_QUORUM); - try { - journal.getHALogNexus().disableHALog(); - } catch (IOException e) { - haLog.error(e, e); - } - enterRunState(new SeekConsensusTask()); - return null; - } - } - /** * {@inheritDoc} * <p> @@ -1542,31 +1564,17 @@ super.serviceLeave(); - // FIXME serviceLeave() needs event handler. -// // Submit task to handle this event. -// server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( -// new ServiceLeaveTask())); + // Submit task to handle this event. + server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( + new EnterErrorStateTask())); } - private class ServiceLeaveTask implements Callable<Void> { + /** + * Transition to {@link RunStateEnum#Error}. + */ + private class EnterErrorStateTask implements Callable<Void> { public Void call() throws Exception { - /* - * Set token. Journal will notice that it is no longer - * "HA Ready" - * - * Note: AbstractJournal.setQuorumToken() will detect - * case where it transitions from a met quorum through - * a service leave and will clear its haReady token and - * update its haStatus field appropriately. (This is why - * we pass in quorum.token() rather than NO_QUORUM.) - */ - journal.setQuorumToken(getQuorum().token()); - try { - journal.getHALogNexus().disableHALog(); - } catch (IOException e) { - haLog.error(e, e); - } - enterRunState(new SeekConsensusTask()); // TODO Versus ERROR state? + enterRunState(new ErrorTask()); return null; } } @@ -1620,24 +1628,15 @@ super.memberRemove(); - // FIXME memberRemove() - restore event handler. Do NOT transition to seek consensus directly from error state. Instead, cause a memberRemove() that will trigger this event handler. -// // Submit task to handle this event. -// server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( -// new MemberRemoveTask())); + // Submit task to handle this event. + server.singleThreadExecutor.execute(new MonitoredFutureTask<Void>( + new EnterErrorStateTask())); } /** - * If this service is no longer a member, and the service is still - * running, then enter the SeekConsensus run state. + * Handle an error condition on the service. */ - private class MemberRemoveTask implements Callable<Void> { - public Void call() throws Exception { - enterRunState(new SeekConsensusTask()); - return null; - } - } - private class ErrorTask extends RunStateCallable<Void> { protected ErrorTask() { @@ -1648,55 +1647,72 @@ @Override public Void doRun() throws Exception { + +// /* +// * Discard the current write set. +// * +// * Note: This is going to call through to discardWriteSet(). +// * That method will close out the current HALog and discard the +// * last live write message. +// * +// * FIXME the setQuorumToken() after the serviceLeave() will also +// * cause doLocalAbort() to be called, so we probably do NOT want +// * to call it here. +// */ + journal.doLocalAbort(); + /* * Note: Bouncing the ZK connection here appears to cause * problems within the test suite. We have not tracked down why * yet. */ // server.haGlueService.bounceZookeeperConnection(); -// /* -// * Note: Try moving to doRejectedCommit() so this will be -// * synchronous. -// */ -// logLock.lock(); -// try { -// if (journal.getHALogNexus().isHALogOpen()) { -// /* -// * Note: Closing the HALog is necessary for us to be -// * able to re-enter SeekConsensus without violating a -// * pre-condition for that run state. -// */ -// journal.getHALogNexus().disableHALog(); -// } -// } finally { -// logLock.unlock(); -// } + + /* + * Do synchronous service leave. + */ -// // Force a service leave. -// getQuorum().getActor().serviceLeave(); + log.warn("Will do SERVICE LEAVE"); + + getActor().serviceLeave(); + + /* + * Set token. Journal will notice that it is no longer + * "HA Ready" + * + * Note: We update the haReadyToken and haStatus regardless of + * whether the quorum token has changed in case this service is + * no longer joined with a met quorum. + * + * Note: AbstractJournal.setQuorumToken() will detect case where + * it transitions from a met quorum through a service leave and + * will clear its haReady token and update its haStatus field + * appropriately. (This is why we pass in quorum.token() rather + * than NO_QUORUM.) + * + * TODO There are cases where nothing changes that may hit an + * AssertionError in setQuorumToken(). + * + * TODO This will (conditionally) trigger doLocalAbort(). Since we did this + * explicitly above, that can be do invocations each time we pass through here! + */ + journal.setQuorumToken(getQuorum().token()); + +// assert journal.getHAReady() == Quorum.NO_QUORUM; -// /* -// * Set token. Journal will notice that it is no longer -// * "HA Ready" -// * -// * Note: AbstractJournal.setQuorumToken() will detect case where -// * it transitions from a met quorum through a service leave and -// * will clear its haReady token and update its haStatus field -// * appropriately. -// * -// * FIXME There may be a data race here. The quorum.token() might -// * be be cleared by the time we call -// * setQuorumToken(quorum.token()) so we may have to explicitly -// * "clear" the journal token by passing in NO_QUORUM. -// */ -// journal.setQuorumToken(Quorum.NO_QUORUM); -// -// try { -// journal.getHALogNexus().disableHALog(); -// } catch (IOException e) { -// haLog.error(e, e); -// } + /* + * Note: We can spin here to give the service an opportunity to + * handle any backlog of events that trigger a transition into + * the ERROR state. This might not be strictly necessary, and we + * do not want to spin too long. + */ + + final long sleepMillis = 1000; // TODO CONFIG? + log.warn("Sleeping " + sleepMillis + "ms to let events quisce."); + + Thread.sleep(sleepMillis); + // Seek consensus. enterRunState(new SeekConsensusTask()); @@ -2364,6 +2380,9 @@ journal.doLocalAbort(); + // Sets up expectations (maybe just for the test suite?) + conditionalCreateHALog(); + /* * We will do a local commit with each HALog (aka write set) * that is replicated. This let's us catch up incrementally with @@ -2885,12 +2904,31 @@ // Verify that we have valid root blocks awaitJournalToken(token); + // Note: used to do conditionalCreateHALog() here. + + } + + /** + * Conditionally create the HALog. + * <p> + * Refactored out of {@link #pipelineSetup()} since + * {@link #discardWriteSet()} now removes the current HALog. Therefore, + * the {@link ResyncTask} needs to call + * {@link #conditionalCreateHALog()} <em>after</em> it calls + * {@link AbstractJournal#doLocalAbort()}. + * + * @throws FileNotFoundException + * @throws IOException + */ + private void conditionalCreateHALog() throws FileNotFoundException, + IOException { + logLock.lock(); - + try { if (!journal.getHALogNexus().isHALogOpen()) { - + /* * Open the HALogWriter for our current root blocks. * @@ -2899,14 +2937,14 @@ * because the historical log writes occur when we ask the * leader to send us a prior commit point in RESYNC. */ - + journal.getHALogNexus().createHALog( journal.getRootBlockView()); - + } } finally { - + logLock.unlock(); } @@ -2949,6 +2987,8 @@ logLock.lock(); try { + conditionalCreateHALog(); + if (haLog.isDebugEnabled()) haLog.debug("msg=" + msg + ", buf=" + data); @@ -3552,6 +3592,8 @@ try { + conditionalCreateHALog(); + /* * Throws IllegalStateException if the message is not * appropriate for the state of the log. Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-A.config 2013-07-05 19:44:40 UTC (rev 7217) @@ -241,7 +241,8 @@ "-Djava.util.logging.config.file=logging-A.properties", "-server", "-Xmx1G", - "-ea" + "-ea", + "-Xdebug","-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1050" }; serviceDir = bigdata.serviceDir; Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-B.config 2013-07-05 19:44:40 UTC (rev 7217) @@ -240,7 +240,8 @@ "-Djava.util.logging.config.file=logging-B.properties", "-server", "-Xmx1G", - "-ea" + "-ea", + "-Xdebug","-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1051" }; serviceDir = bigdata.serviceDir; Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-C.config 2013-07-05 19:44:40 UTC (rev 7217) @@ -240,7 +240,8 @@ "-Djava.util.logging.config.file=logging-C.properties", "-server", "-Xmx1G", - "-ea" + "-ea", + "-Xdebug","-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1052" }; serviceDir = bigdata.serviceDir; Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-07-05 12:20:30 UTC (rev 7214) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java 2013-07-05 19:44:40 UTC (rev 7217) @@ -83,6 +83,7 @@ import com.bigdata.ha.msg.IHAWriteSetStateRequest; import com.bigdata.ha.msg.IHAWriteSetStateResponse; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService; import com.bigdata.quorum.AsynchronousQuorumCloseException; import com.bigdata.quorum.Quorum; @@ -126,6 +127,14 @@ public void helloWorld() throws IOException; /** + * Logs a message @ WARN on the HAGlue service. + * + * @param msg + * The message. + */ + public void log(String msg) throws IOException; + + /** * Force the end point to enter into an error state from which it will * naturally move back into a consistent state. * <p> @@ -462,6 +471,13 @@ } @Override + public void log(final String msg) throws IOException { + + log.warn(msg); + + } + + @Override public Future<Void> enterErrorState() { final FutureTask<Void> ft = new FutureTaskMon<Void>( @@ -775,24 +791,21 @@ @Override public Future<Boolean> prepare2Phase( - IHA2PhasePrepareMessage prepareMessage) { + final IHA2PhasePrepareMessage prepareMessage) { checkMethod("prepare2Phase", new Class[] { IHA2PhasePrepareMessage.class }); if (voteNo.compareAndSet(true/* expect */, false/* update */)) { - final FutureTask<Boolean> ft = new FutureTask<Boolean>( - new VoteNoTask()); + return super.prepare2Phase(new MyPrepareMessage(prepareMessage)); - super.getIndexManager().getExecutorService().submit(ft); - - return super.getProxy(ft); - + } else { + + return super.prepare2Phase(prepareMessage); + } - return super.prepare2Phase(prepareMessage); - } @Override @@ -929,4 +942,57 @@ } // class HAGlueTestImpl + private static class MyPrepareMessage implements IHA2PhasePrepareMessage { + + private final IHA2PhasePrepareMessage delegate; + + MyPrepareMessage(final IHA2PhasePrepareMessage msg) { + this.delegate = msg; + } + + @Override + public IHANotifyReleaseTimeResponse getConsensusReleaseTime() { + return delegate.getConsensusReleaseTime(); + } + + @Override + public boolean isGatherService() { + return delegate.isGatherService(); + } + + @Override + public boolean isJoinedService() { + return delegate.isJoinedService(); + } + + @Override + public boolean isRootBlock0() { + return delegate.isRootBlock0(); + } + + @Overr... [truncated message content] |
From: <tho...@us...> - 2013-08-09 12:03:04
|
Revision: 7269 http://bigdata.svn.sourceforge.net/bigdata/?rev=7269&view=rev Author: thompsonbry Date: 2013-08-09 12:02:55 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Reconciling edit to test case showing how to use ORDER BY with INSERT INTO made to both the 1.2.x and READ_CACHE branches prior to merging changes from the 1.2.x branch into the READ_CACHE branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-08 19:38:24 UTC (rev 7268) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-09 12:02:55 UTC (rev 7269) @@ -467,16 +467,28 @@ final StringBuilder sb = new StringBuilder(); + /* + * FIXME test variants w/ and w/o embedded sub-select and verify the + * *order* is preserved when using the embedded subselect w/ its + * order by. Also, verify that we translate this by lifting out the + * sub-select since the top-level query is empty at thast point. + * + * Also, document this on the wiki. The sub-select is necessary because + * SPARQL does not allow solution modifiers on the top-level WHERE clause + * for INSERT/DELETE+WHERE. + */ sb.append("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n"); sb.append("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n"); sb.append("PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n"); sb.append("INSERT INTO %namedSet1\n"); sb.append("SELECT ?x ?name\n"); + sb.append("WHERE { SELECT ?x ?name\n"); sb.append("WHERE {\n"); sb.append(" ?x rdf:type foaf:Person .\n"); sb.append(" ?x rdfs:label ?name .\n"); sb.append("}\n"); -// sb.append("ORDER BY ?name"); + sb.append("ORDER BY ?name\n"); + sb.append("}"); con.prepareUpdate(QueryLanguage.SPARQL, sb.toString()).execute(); Modified: branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-08 19:38:24 UTC (rev 7268) +++ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-09 12:02:55 UTC (rev 7269) @@ -475,7 +475,7 @@ * * Also, document this on the wiki. The sub-select is necessary because * SPARQL does not allow solution modifiers on the top-level WHERE clause - * for INSERT/DELETE+WHERE. + * for INSERT/DELETE+WHERE. */ sb.append("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n"); sb.append("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-10-21 12:48:44
|
Revision: 7464 http://bigdata.svn.sourceforge.net/bigdata/?rev=7464&view=rev Author: thompsonbry Date: 2013-10-21 12:48:38 +0000 (Mon, 21 Oct 2013) Log Message: ----------- Creating a branch to address #718 (handling zk connection loss). Modified Paths: -------------- branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java Added Paths: ----------- branches/ZK_DISCONNECT_HANDLING/ Property changes on: branches/ZK_DISCONNECT_HANDLING ___________________________________________________________________ Added: svn:ignore + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI Added: svn:mergeinfo + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Modified: branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -443,7 +443,7 @@ pipeline = new LinkedHashSet<UUID>(k * 2); } - + @Override protected void finalize() throws Throwable { terminate(); Modified: branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -742,7 +742,7 @@ // Save UUID -> QuorumMember mapping on the fixture. fixture.known.put(client.getServiceId(), client); } - + @Override public void terminate() { final MockQuorumWatcher watcher = (MockQuorumWatcher) getWatcher(); super.terminate(); Modified: branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -973,7 +973,7 @@ log.info("Creating service impl..."); // init. - impl = newService(config); + impl = newService(config); // FIXME Pass in the HAClient.Connection. if (log.isInfoEnabled()) log.info("Service impl is " + impl); @@ -1075,7 +1075,7 @@ * Simple representation of state (non-blocking, safe). Some fields reported * in the representation may be <code>null</code> depending on the server * state. - */ + */@Override public String toString() { // note: MAY be null. Modified: branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAClient.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -1128,7 +1128,7 @@ InterruptedException { /* - * Fast path. Check for an existing instance. + * Fast path. Check for an existing instance. FIXME MUst also verify that quorum is running. If terminated, then start(). But must also pass in the AbstractQuorumClient to be run if we are the HAJournalServer. Or let the caller start the quorum for their client rather than embedding that logic into this method. */ Quorum<HAGlue, QuorumClient<HAGlue>> quorum; synchronized (quorums) { Modified: branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/java/com/bigdata/zookeeper/ZooKeeperAccessor.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -263,7 +263,7 @@ try { log.warn("Creating new client"); - + // FIXME must not create new zk while session not expired. zookeeper = new ZooKeeper(hosts, sessionTimeout, new ZooAliveWatcher()); @@ -331,7 +331,7 @@ if (zookeeper != null) { zookeeper.close(); - + // FIXME close must not clear the zk reference. only do this for session expired. zookeeper = null; } @@ -373,11 +373,12 @@ private class ZooAliveWatcher implements Watcher { private boolean connected = false; - + @Override public void process(final WatchedEvent e) { + System.err.println("event: "+e); + // FIXME Does not verify that event is for the current ZK client. + if(!open) return; // FIXME blocks view of events after a close(). - if(!open) return; - if (log.isInfoEnabled()) log.info(e.toString()); @@ -387,7 +388,7 @@ if(!open) return; - switch (e.getState()) { + switch (e.getState()) { // FIXME Review switch states. case Unknown: // @todo what to do with these events? @@ -428,7 +429,7 @@ } for (Watcher w : watchers) { - + System.err.println("send event: "+e); // send event to any registered watchers. w.process(e); Modified: branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java 2013-10-21 12:45:44 UTC (rev 7463) +++ branches/ZK_DISCONNECT_HANDLING/bigdata-jini/src/test/com/bigdata/zookeeper/TestAll.java 2013-10-21 12:48:38 UTC (rev 7464) @@ -49,6 +49,9 @@ final TestSuite suite = new TestSuite("zookeeper client library"); + // test suite for zookeeper session expiration semantics. + suite.addTestSuite(TestZookeeperSessionSemantics.class); + // test ability to handle an expired session. suite.addTestSuite(TestZookeeperAccessor.class); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-09 15:26:25
|
Revision: 7621 http://bigdata.svn.sourceforge.net/bigdata/?rev=7621&view=rev Author: thompsonbry Date: 2013-12-09 15:26:17 +0000 (Mon, 09 Dec 2013) Log Message: ----------- Modified forceRemoveService() to use the runActorTask() pattern for better code compatibility and forward support for timeouts on actor actors. See #779. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 15:13:30 UTC (rev 7620) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 15:26:17 UTC (rev 7621) @@ -2343,22 +2343,25 @@ * that will force them to fail rather than block forever. This will * then force the service into an error state if its QuorumActor can not * carry out the requested action within a specified timeout. - * - * @throws InterruptedException */ @Override - final public void forceRemoveService(final UUID psid) - throws InterruptedException { - lock.lockInterruptibly(); - try { + final public void forceRemoveService(final UUID psid) { + runActorTask(new ForceRemoveServiceTask(psid)); + } + + private class ForceRemoveServiceTask extends ActorTask { + private final UUID psid; + ForceRemoveServiceTask(final UUID psid) { + this.psid = psid; + } + @Override + protected void doAction() throws InterruptedException { log.warn("Forcing remove of service" + ": thisService=" + serviceId + ", otherServiceId=" + psid); doMemberRemove(psid); doWithdrawVote(psid); doPipelineRemove(psid); doServiceLeave(psid); - } finally { - lock.unlock(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java 2013-12-09 15:13:30 UTC (rev 7620) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java 2013-12-09 15:26:17 UTC (rev 7621) @@ -219,8 +219,7 @@ * * @param serviceId * The UUID of the service to be removed. - * @throws InterruptedException */ - public void forceRemoveService(UUID serviceId) throws InterruptedException; + public void forceRemoveService(UUID serviceId); } Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 15:13:30 UTC (rev 7620) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 15:26:17 UTC (rev 7621) @@ -2343,25 +2343,28 @@ * that will force them to fail rather than block forever. This will * then force the service into an error state if its QuorumActor can not * carry out the requested action within a specified timeout. - * - * @throws InterruptedException */ @Override - final public void forceRemoveService(final UUID psid) - throws InterruptedException { - lock.lockInterruptibly(); - try { + final public void forceRemoveService(final UUID psid) { + runActorTask(new ForceRemoveServiceTask(psid)); + } + + private class ForceRemoveServiceTask extends ActorTask { + private final UUID psid; + ForceRemoveServiceTask(final UUID psid) { + this.psid = psid; + } + @Override + protected void doAction() throws InterruptedException { log.warn("Forcing remove of service" + ": thisService=" + serviceId + ", otherServiceId=" + psid); doMemberRemove(psid); doWithdrawVote(psid); doPipelineRemove(psid); doServiceLeave(psid); - } finally { - lock.unlock(); } } - + /** * Invoked when our client will become the leader to (a) reorganize the * write pipeline such that our client is the first service in the write Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java 2013-12-09 15:13:30 UTC (rev 7620) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java 2013-12-09 15:26:17 UTC (rev 7621) @@ -219,8 +219,7 @@ * * @param serviceId * The UUID of the service to be removed. - * @throws InterruptedException */ - public void forceRemoveService(UUID serviceId) throws InterruptedException; + public void forceRemoveService(UUID serviceId); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-09 16:04:11
|
Revision: 7624 http://bigdata.svn.sourceforge.net/bigdata/?rev=7624&view=rev Author: thompsonbry Date: 2013-12-09 16:04:03 +0000 (Mon, 09 Dec 2013) Log Message: ----------- Added unit tests to the HA CI test suite for forceRemoveService(). See #779. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-12-09 15:36:35 UTC (rev 7623) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-12-09 16:04:03 UTC (rev 7624) @@ -72,6 +72,7 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; +import com.bigdata.ha.IndexManagerCallable; import com.bigdata.ha.RunState; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.ha.msg.HASnapshotDigestRequest; @@ -1100,6 +1101,34 @@ } + /** + * Debug class to explicitly ask one service to remove another. + * + * This emulates the behaviour of the service in receiving correct notification + * of a target service failure -for example after a wire pull or sure kill. + * + */ + protected static class ForceRemoveService extends IndexManagerCallable<Void> { + + private static final long serialVersionUID = 1L; + private final UUID service; + + ForceRemoveService(final UUID service) { + this.service = service; + } + + @Override + public Void call() throws Exception { + + final HAJournal ha = (HAJournal) this.getIndexManager(); + + ha.getQuorum().getActor().forceRemoveService(service); + + return null; + } + + } + private void safeShutdown(final HAGlue haGlue, final File serviceDir, final ServiceListener serviceListener) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-12-09 15:36:35 UTC (rev 7623) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-12-09 16:04:03 UTC (rev 7624) @@ -44,6 +44,7 @@ import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.journal.jini.ha.HAJournalTest.SpuriousTestException; +import com.bigdata.quorum.QuorumActor; import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.util.ClocksNotSynchronizedException; @@ -705,7 +706,7 @@ } } - public void doBounceFollower() throws Exception { + private void doBounceFollower() throws Exception { final HAGlue serverA = startA(); final HAGlue serverB = startB(); @@ -995,6 +996,295 @@ // } /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B. Once + * the quorum meets, we figure out which service is the leader. The leader + * then forces the other service out of the quorum. + */ + public void test_AB_forceRemoveService_B() throws Exception { + + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before quorum break:\n" + dumpZoo()); + } + + /* + * Force the follower out of the quorum. Verify quorum meets again and + * that we can read on all services. + */ + { + + final HAGlue leader = quorum.getClient().getLeader(token1); + + if (leader.equals(serverA)) { + + leader.submit(new ForceRemoveService(getServiceBId()), true).get(); + + } else { + + leader.submit(new ForceRemoveService(getServiceAId()), true).get(); + + } + + // Thread.sleep(100000); // sleep to allow thread dump for analysis + // Okay, is the problem that the quorum doesn't break? + // assertFalse(quorum.isQuorumMet()); + + // Right so the Quorum is not met, but the follower deosn't seem to know it's broken + + // Wait for the quorum to break and then meet again. + final long token2 = awaitNextQuorumMeet(token1); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum meet:\n" + dumpZoo()); + } + + /* + * Bouncing the connection broke the quorun, so verify that the + * quorum token was advanced. + */ + assertEquals(token1 + 1, token2); + + // The leader MAY have changed (since the quorum broke). + final HAGlue leader2 = quorum.getClient().getLeader(token2); + + // Verify leader self-reports in new role. + awaitHAStatus(leader2, HAStatusEnum.Leader); + +// final UUID leaderId2 = leader2.getServiceId(); +// +// assertFalse(leaderId1.equals(leaderId2)); + + /* + * Verify we can read on the KB on both nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B + C in + * strict order. Wait until the quorum is fully met and the initial KB + * create transaction is done. The leader then forces B out of the quorum. + * We verify that the quorum fully meets again, that B is now the last + * service in the pipeline order, and that the quorum did not break (same + * token). + */ + public void test_ABC_forceRemoveService_B() throws Exception { + + final ABC services = new ABC(true/*sequential*/); + final HAGlue serverA = services.serverA; + final HAGlue serverB = services.serverB; + final HAGlue serverC = services.serverC; + + final long token1 = awaitFullyMetQuorum(); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + doNSSStatusRequest(serverC); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + awaitHAStatus(serverC, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before forcing service remove:\n" + dumpZoo()); + } + + /* + * Bounce the 1st follower out of the quorum. Verify quorum meets again + * and that we can read on all services. + */ + { + + serverA.submit(new ForceRemoveService(getServiceBId()), true).get(); + + // Wait for the quorum to fully meet again. + final long token2 = awaitFullyMetQuorum(); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum fully met again:\n" + dumpZoo()); + } + + /* + * The quorum did not break. The token is unchanged. + */ + assertEquals(token1, token2); + + /* + * Service B came back in at the end of the pipeline. + */ + awaitPipeline(new HAGlue[] { serverA, serverC, serverB }); + + /* + * Verify we can read on the KB on all nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB, serverC }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B + C in + * strict order. Wait until the quorum is fully met and the initial KB + * create transaction is done. The leader then forces C out of the quorum. + * We verify that the quorum fully meets again, that C is again the last + * service in the pipeline order, and that the quorum did not break (same + * token). + */ + public void test_ABC_forceRemoveService_C() throws Exception { + + final ABC services = new ABC(true/*sequential*/); + final HAGlue serverA = services.serverA; + final HAGlue serverB = services.serverB; + final HAGlue serverC = services.serverC; + + final long token1 = awaitFullyMetQuorum(); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + doNSSStatusRequest(serverC); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + awaitHAStatus(serverC, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before forcing service remove:\n" + dumpZoo()); + } + + /* + * Bounce the 1st follower out of the quorum. Verify quorum meets again + * and that we can read on all services. + */ + { + + serverA.submit(new ForceRemoveService(getServiceCId()), true).get(); + + // Wait for the quorum to fully meet again. + final long token2 = awaitFullyMetQuorum(); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum fully met again:\n" + dumpZoo()); + } + + /* + * The quorum did not break. The token is unchanged. + */ + assertEquals(token1, token2); + + /* + * Service C came back in at the end of the pipeline (i.e., the + * pipeline is unchanged). + */ + awaitPipeline(new HAGlue[] { serverA, serverB, serverC }); + + /* + * Verify we can read on the KB on all nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB, serverC }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** * Verify ability to stop and restart the zookeeper process under test * control. * Modified: branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java 2013-12-09 15:36:35 UTC (rev 7623) +++ branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java 2013-12-09 16:04:03 UTC (rev 7624) @@ -1,21 +1,12 @@ package com.bigdata.journal.jini.ha; -import java.util.UUID; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; -import com.bigdata.ha.HAGlue; -import com.bigdata.ha.IndexManagerCallable; -import com.bigdata.ha.QuorumService; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; -import com.bigdata.quorum.Quorum; - import net.jini.config.Configuration; -import junit.framework.TestCase; +import com.bigdata.ha.HAGlue; + public class TestHA3JustKills extends AbstractHA3JournalServerTestCase { @@ -78,7 +69,7 @@ // FIXME: in the face of no implemented error propagation we can explicitly // tell the leader to remove the killed service! - startup.serverA.submit(new ForceRemoveService(getServiceCId()), true); + startup.serverA.submit(new ForceRemoveService(getServiceCId()), true).get(); awaitPipeline(20, TimeUnit.SECONDS, new HAGlue[] {startup.serverA, startup.serverB}); @@ -143,7 +134,7 @@ kill(startup.serverB); // FIXME: temporary call to explicitly remove the service prior to correct protocol - startup.serverA.submit(new ForceRemoveService(getServiceBId()), true); + startup.serverA.submit(new ForceRemoveService(getServiceBId()), true).get(); awaitPipeline(10, TimeUnit.SECONDS, new HAGlue[] {startup.serverA, startup.serverC}); Modified: branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-12-09 15:36:35 UTC (rev 7623) +++ branches/MGC_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-12-09 16:04:03 UTC (rev 7624) @@ -44,6 +44,7 @@ import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.journal.jini.ha.HAJournalTest.SpuriousTestException; +import com.bigdata.quorum.QuorumActor; import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.util.ClocksNotSynchronizedException; @@ -705,7 +706,7 @@ } } - public void doBounceFollower() throws Exception { + private void doBounceFollower() throws Exception { final HAGlue serverA = startA(); final HAGlue serverB = startB(); @@ -995,6 +996,295 @@ // } /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B. Once + * the quorum meets, we figure out which service is the leader. The leader + * then forces the other service out of the quorum. + */ + public void test_AB_forceRemoveService_B() throws Exception { + + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before quorum break:\n" + dumpZoo()); + } + + /* + * Force the follower out of the quorum. Verify quorum meets again and + * that we can read on all services. + */ + { + + final HAGlue leader = quorum.getClient().getLeader(token1); + + if (leader.equals(serverA)) { + + leader.submit(new ForceRemoveService(getServiceBId()), true).get(); + + } else { + + leader.submit(new ForceRemoveService(getServiceAId()), true).get(); + + } + + // Thread.sleep(100000); // sleep to allow thread dump for analysis + // Okay, is the problem that the quorum doesn't break? + // assertFalse(quorum.isQuorumMet()); + + // Right so the Quorum is not met, but the follower deosn't seem to know it's broken + + // Wait for the quorum to break and then meet again. + final long token2 = awaitNextQuorumMeet(token1); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum meet:\n" + dumpZoo()); + } + + /* + * Bouncing the connection broke the quorun, so verify that the + * quorum token was advanced. + */ + assertEquals(token1 + 1, token2); + + // The leader MAY have changed (since the quorum broke). + final HAGlue leader2 = quorum.getClient().getLeader(token2); + + // Verify leader self-reports in new role. + awaitHAStatus(leader2, HAStatusEnum.Leader); + +// final UUID leaderId2 = leader2.getServiceId(); +// +// assertFalse(leaderId1.equals(leaderId2)); + + /* + * Verify we can read on the KB on both nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B + C in + * strict order. Wait until the quorum is fully met and the initial KB + * create transaction is done. The leader then forces B out of the quorum. + * We verify that the quorum fully meets again, that B is now the last + * service in the pipeline order, and that the quorum did not break (same + * token). + */ + public void test_ABC_forceRemoveService_B() throws Exception { + + final ABC services = new ABC(true/*sequential*/); + final HAGlue serverA = services.serverA; + final HAGlue serverB = services.serverB; + final HAGlue serverC = services.serverC; + + final long token1 = awaitFullyMetQuorum(); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + doNSSStatusRequest(serverC); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + awaitHAStatus(serverC, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before forcing service remove:\n" + dumpZoo()); + } + + /* + * Bounce the 1st follower out of the quorum. Verify quorum meets again + * and that we can read on all services. + */ + { + + serverA.submit(new ForceRemoveService(getServiceBId()), true).get(); + + // Wait for the quorum to fully meet again. + final long token2 = awaitFullyMetQuorum(); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum fully met again:\n" + dumpZoo()); + } + + /* + * The quorum did not break. The token is unchanged. + */ + assertEquals(token1, token2); + + /* + * Service B came back in at the end of the pipeline. + */ + awaitPipeline(new HAGlue[] { serverA, serverC, serverB }); + + /* + * Verify we can read on the KB on all nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB, serverC }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** + * Test of {@link QuorumActor#forceRemoveService(UUID)}. Start A + B + C in + * strict order. Wait until the quorum is fully met and the initial KB + * create transaction is done. The leader then forces C out of the quorum. + * We verify that the quorum fully meets again, that C is again the last + * service in the pipeline order, and that the quorum did not break (same + * token). + */ + public void test_ABC_forceRemoveService_C() throws Exception { + + final ABC services = new ABC(true/*sequential*/); + final HAGlue serverA = services.serverA; + final HAGlue serverB = services.serverB; + final HAGlue serverC = services.serverC; + + final long token1 = awaitFullyMetQuorum(); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + doNSSStatusRequest(serverC); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB, serverC); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + awaitHAStatus(serverC, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before forcing service remove:\n" + dumpZoo()); + } + + /* + * Bounce the 1st follower out of the quorum. Verify quorum meets again + * and that we can read on all services. + */ + { + + serverA.submit(new ForceRemoveService(getServiceCId()), true).get(); + + // Wait for the quorum to fully meet again. + final long token2 = awaitFullyMetQuorum(); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum fully met again:\n" + dumpZoo()); + } + + /* + * The quorum did not break. The token is unchanged. + */ + assertEquals(token1, token2); + + /* + * Service C came back in at the end of the pipeline (i.e., the + * pipeline is unchanged). + */ + awaitPipeline(new HAGlue[] { serverA, serverB, serverC }); + + /* + * Verify we can read on the KB on all nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB, serverC }) { + + awaitNSSAndHAReady(service); + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** * Verify ability to stop and restart the zookeeper process under test * control. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-12-09 16:06:52
|
Revision: 7625 http://bigdata.svn.sourceforge.net/bigdata/?rev=7625&view=rev Author: thompsonbry Date: 2013-12-09 16:06:45 +0000 (Mon, 09 Dec 2013) Log Message: ----------- Removed the no longer used memberRemoveInterruptably() method. This method used to be used in AbstractQuorum.terminate(). It was probably removed from use when we did the ZK disconnect refactor. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 16:04:03 UTC (rev 7624) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 16:06:45 UTC (rev 7625) @@ -1860,22 +1860,22 @@ } } - /** - * An interruptable version of {@link #memberRemove()}. - * <p> - * Note: This is used by {@link AbstractQuorum#terminate()}. That code - * is already holding the lock in the caller's thread. Therefore it - * needs to run these operations in the same thread to avoid a deadlock - * with itself. - */ - protected void memberRemoveInterruptable() throws InterruptedException { - if (!lock.isHeldByCurrentThread()) - throw new IllegalMonitorStateException(); - conditionalServiceLeaveImpl(); - conditionalPipelineRemoveImpl(); - conditionalWithdrawVoteImpl(); - conditionalMemberRemoveImpl(); - } +// /** +// * An interruptable version of {@link #memberRemove()}. +// * <p> +// * Note: This is used by {@link AbstractQuorum#terminate()}. That code +// * is already holding the lock in the caller's thread. Therefore it +// * needs to run these operations in the same thread to avoid a deadlock +// * with itself. +// */ +// protected void memberRemoveInterruptable() throws InterruptedException { +// if (!lock.isHeldByCurrentThread()) +// throw new IllegalMonitorStateException(); +// conditionalServiceLeaveImpl(); +// conditionalPipelineRemoveImpl(); +// conditionalWithdrawVoteImpl(); +// conditionalMemberRemoveImpl(); +// } @Override final public void withdrawVote() { Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 16:04:03 UTC (rev 7624) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-12-09 16:06:45 UTC (rev 7625) @@ -1860,22 +1860,22 @@ } } - /** - * An interruptable version of {@link #memberRemove()}. - * <p> - * Note: This is used by {@link AbstractQuorum#terminate()}. That code - * is already holding the lock in the caller's thread. Therefore it - * needs to run these operations in the same thread to avoid a deadlock - * with itself. - */ - protected void memberRemoveInterruptable() throws InterruptedException { - if (!lock.isHeldByCurrentThread()) - throw new IllegalMonitorStateException(); - conditionalServiceLeaveImpl(); - conditionalPipelineRemoveImpl(); - conditionalWithdrawVoteImpl(); - conditionalMemberRemoveImpl(); - } +// /** +// * An interruptable version of {@link #memberRemove()}. +// * <p> +// * Note: This is used by {@link AbstractQuorum#terminate()}. That code +// * is already holding the lock in the caller's thread. Therefore it +// * needs to run these operations in the same thread to avoid a deadlock +// * with itself. +// */ +// protected void memberRemoveInterruptable() throws InterruptedException { +// if (!lock.isHeldByCurrentThread()) +// throw new IllegalMonitorStateException(); +// conditionalServiceLeaveImpl(); +// conditionalPipelineRemoveImpl(); +// conditionalWithdrawVoteImpl(); +// conditionalMemberRemoveImpl(); +// } @Override final public void withdrawVote() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-13 16:21:36
|
Revision: 7956 http://sourceforge.net/p/bigdata/code/7956 Author: thompsonbry Date: 2014-03-13 16:21:32 +0000 (Thu, 13 Mar 2014) Log Message: ----------- Bug fix for the RemoteRepository. - http://trac.bigdata.com/ticket/854 (Allow override of maximum length before converting an HTTP GET to an HTTP POST) - http://trac.bigdata.com/ticket/852 (RemoteRepository.cancel() does not consume the HTTP response entity) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 15:52:26 UTC (rev 7955) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:21:32 UTC (rev 7956) @@ -152,6 +152,16 @@ static private final String DEFAULT_QUERY_METHOD = "POST"; /** + * The name of the system property that may be used to specify the maximum + * length (in characters) for a requestURL associated with an HTTP GET + * before it is automatically converted to an HTTP POST. + * + * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of + * maximum length before converting an HTTP GET to an HTTP POST </a> + */ + static public final String MAX_REQUEST_URL_LENGTH = "maxRequestURLLength"; + + /** * The default maximum limit on a requestURL before the request is converted * into a POST using a <code>application/x-www-form-urlencoded</code> * request entity. @@ -182,7 +192,9 @@ * The maximum requestURL length before the request is converted into a POST * using a <code>application/x-www-form-urlencoded</code> request entity. */ - private volatile int maxRequestURLLength = DEFAULT_MAX_REQUEST_URL_LENGTH; + private volatile int maxRequestURLLength = Integer.parseInt(System + .getProperty(MAX_REQUEST_URL_LENGTH, + Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH))); /** * The HTTP verb that will be used for a QUERY (versus a UPDATE or other @@ -538,7 +550,20 @@ opts.addRequestParam("queryId", queryId.toString()); - checkResponseCode(doConnect(opts)); + HttpResponse response = null; + try { + // Issue request, check response status code. + checkResponseCode(response = doConnect(opts)); + } finally { + /* + * Ensure that the http response entity is consumed so that the http + * connection will be released in a timely fashion. + */ + try { + EntityUtils.consume(response.getEntity()); + } catch (IOException ex) { + } + } } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 15:52:26 UTC (rev 7955) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:21:32 UTC (rev 7956) @@ -150,6 +150,16 @@ * @see #setQueryMethod(String) */ static private final String DEFAULT_QUERY_METHOD = "POST"; + + /** + * The name of the system property that may be used to specify the maximum + * length (in characters) for a requestURL associated with an HTTP GET + * before it is automatically converted to an HTTP POST. + * + * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of + * maximum length before converting an HTTP GET to an HTTP POST </a> + */ + static public final String MAX_REQUEST_URL_LENGTH = "maxRequestURLLength"; /** * The default maximum limit on a requestURL before the request is converted @@ -161,7 +171,7 @@ * having a request URL that is 2000 characters long should go through with * a GET. 1000 is a safe value but it could reduce http caching. */ - static private final int DEFAULT_MAX_REQUEST_URL_LENGTH = 1000; + static public final int DEFAULT_MAX_REQUEST_URL_LENGTH = 1000; /** * The service end point for the default data set. @@ -182,7 +192,9 @@ * The maximum requestURL length before the request is converted into a POST * using a <code>application/x-www-form-urlencoded</code> request entity. */ - private volatile int maxRequestURLLength = DEFAULT_MAX_REQUEST_URL_LENGTH; + private volatile int maxRequestURLLength = Integer.parseInt(System + .getProperty(MAX_REQUEST_URL_LENGTH, + Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH))); /** * The HTTP verb that will be used for a QUERY (versus a UPDATE or other @@ -584,7 +596,20 @@ opts.addRequestParam("queryId", queryId.toString()); - checkResponseCode(doConnect(opts)); + HttpResponse response = null; + try { + // Issue request, check response status code. + checkResponseCode(response = doConnect(opts)); + } finally { + /* + * Ensure that the http response entity is consumed so that the http + * connection will be released in a timely fashion. + */ + try { + EntityUtils.consume(response.getEntity()); + } catch (IOException ex) { + } + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-13 16:57:22
|
Revision: 7957 http://sourceforge.net/p/bigdata/code/7957 Author: thompsonbry Date: 2014-03-13 16:57:19 +0000 (Thu, 13 Mar 2014) Log Message: ----------- Examination of #853. Unable to replicate the problem. Ticket is closed. I did identify some problems with the ability to specify GET versus POST for a SPARQL QUERY. That issue has been fixed. I wrote unit tests which verify that POST and GET for a SPARQL QUERY are both allowed on the follower. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-03-13 16:21:32 UTC (rev 7956) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-03-13 16:57:19 UTC (rev 7957) @@ -49,6 +49,7 @@ import com.bigdata.ha.halog.IHALogReader; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.Quorum; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; @@ -3477,4 +3478,65 @@ } } + /** + * Test verifies that we can POST a SPARQL query to a follower. + * + * @see <a href="http://trac.bigdata.com/ticket/853"> Follower does not + * accept POST of idempotent operations (HA) </a> + */ + public void test_postQueryOnFollowers() throws Exception { + + final ABC abc = new ABC(false/*sequential*/); // simultaneous start. + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + /* + * Verify that query on all nodes is allowed. + */ + for (RemoteRepository r : repos) { + + r.setQueryMethod("GET"); + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + // Change the maximum length of a GET for a Query. + for(RemoteRepository r : repos) { + + r.setMaxRequestURLLength(1); + + } + + // Run with the new length. All requests should be POSTs. + for (RemoteRepository r : repos) { + + r.setQueryMethod("POST"); + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:21:32 UTC (rev 7956) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:57:19 UTC (rev 7957) @@ -141,15 +141,28 @@ static protected final String UTF8 = "UTF-8"; /** - * Note: The default is <code>false</code>. This supports use cases where - * the end points are read/write databases and http caching must be defeated - * in order to gain access to the most recent committed state of the end - * point. + * The name of the system property that may be used to specify the default + * HTTP method (GET or POST) for a SPARQL QUERY or other indempotent + * request. * + * @see #DEFAULT_QUERY_METHOD + * + * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of + * maximum length before converting an HTTP GET to an HTTP POST </a> + */ + static public final String QUERY_METHOD = RemoteRepository.class + .getName() + ".queryMethod"; + + /** + * Note: The default is {@value #DEFAULT_QUERY_METHOD}. This supports use + * cases where the end points are read/write databases and http caching must + * be defeated in order to gain access to the most recent committed state of + * the end point. + * * @see #getQueryMethod() * @see #setQueryMethod(String) */ - static private final String DEFAULT_QUERY_METHOD = "POST"; + static public final String DEFAULT_QUERY_METHOD = "POST"; /** * The name of the system property that may be used to specify the maximum @@ -159,7 +172,8 @@ * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of * maximum length before converting an HTTP GET to an HTTP POST </a> */ - static public final String MAX_REQUEST_URL_LENGTH = "maxRequestURLLength"; + static public final String MAX_REQUEST_URL_LENGTH = RemoteRepository.class + .getName() + ".maxRequestURLLength"; /** * The default maximum limit on a requestURL before the request is converted @@ -192,15 +206,13 @@ * The maximum requestURL length before the request is converted into a POST * using a <code>application/x-www-form-urlencoded</code> request entity. */ - private volatile int maxRequestURLLength = Integer.parseInt(System - .getProperty(MAX_REQUEST_URL_LENGTH, - Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH))); + private volatile int maxRequestURLLength; /** * The HTTP verb that will be used for a QUERY (versus a UPDATE or other * mutation operation). */ - private volatile String queryMethod = DEFAULT_QUERY_METHOD; + private volatile String queryMethod; /** * Return the maximum requestURL length before the request is converted into @@ -228,7 +240,7 @@ /** * Return the HTTP verb that will be used for a QUERY (versus an UPDATE or - * other mutation operations) (default {@value #DEFAULT_IS_GET}). POST can + * other mutation operations) (default {@value #DEFAULT_QUERY_METHOD}). POST can * often handle larger queries than GET due to limits at the HTTP client * layer and will defeat http caching and thus provide a current view of the * committed state of the SPARQL end point when the end point is a @@ -312,6 +324,12 @@ this.executor = executor; + setMaxRequestURLLength(Integer.parseInt(System.getProperty( + MAX_REQUEST_URL_LENGTH, + Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH)))); + + setQueryMethod(System.getProperty(QUERY_METHOD, DEFAULT_QUERY_METHOD)); + } @Override @@ -971,7 +989,7 @@ */ protected void setupConnectOptions() { - opts.method = "POST"; + opts.method = getQueryMethod(); if(update) { Modified: branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-03-13 16:21:32 UTC (rev 7956) +++ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-03-13 16:57:19 UTC (rev 7957) @@ -3477,4 +3477,65 @@ } } + /** + * Test verifies that we can POST a SPARQL query to a follower. + * + * @see <a href="http://trac.bigdata.com/ticket/853"> Follower does not + * accept POST of idempotent operations (HA) </a> + */ + public void test_postQueryOnFollowers() throws Exception { + + final ABC abc = new ABC(false/*sequential*/); // simultaneous start. + + final HAGlue serverA = abc.serverA, serverB = abc.serverB, serverC = abc.serverC; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + final RemoteRepository[] repos = new RemoteRepository[3]; + repos[0] = getRemoteRepository(serverA); + repos[1] = getRemoteRepository(serverB); + repos[2] = getRemoteRepository(serverC); + + /* + * Verify that query on all nodes is allowed. + */ + for (RemoteRepository r : repos) { + + r.setQueryMethod("GET"); + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + // Change the maximum length of a GET for a Query. + for(RemoteRepository r : repos) { + + r.setMaxRequestURLLength(1); + + } + + // Run with the new length. All requests should be POSTs. + for (RemoteRepository r : repos) { + + r.setQueryMethod("POST"); + + // Should be empty. + assertEquals(0L, + countResults(r.prepareTupleQuery("SELECT * {?a ?b ?c}") + .evaluate())); + + } + + } + } Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:21:32 UTC (rev 7956) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2014-03-13 16:57:19 UTC (rev 7957) @@ -141,15 +141,28 @@ static protected final String UTF8 = "UTF-8"; /** - * Note: The default is <code>false</code>. This supports use cases where - * the end points are read/write databases and http caching must be defeated - * in order to gain access to the most recent committed state of the end - * point. + * The name of the system property that may be used to specify the default + * HTTP method (GET or POST) for a SPARQL QUERY or other indempotent + * request. * + * @see #DEFAULT_QUERY_METHOD + * + * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of + * maximum length before converting an HTTP GET to an HTTP POST </a> + */ + static public final String QUERY_METHOD = RemoteRepository.class + .getName() + ".queryMethod"; + + /** + * Note: The default is {@value #DEFAULT_QUERY_METHOD}. This supports use + * cases where the end points are read/write databases and http caching must + * be defeated in order to gain access to the most recent committed state of + * the end point. + * * @see #getQueryMethod() * @see #setQueryMethod(String) */ - static private final String DEFAULT_QUERY_METHOD = "POST"; + static public final String DEFAULT_QUERY_METHOD = "POST"; /** * The name of the system property that may be used to specify the maximum @@ -159,7 +172,8 @@ * @see <a href="http://trac.bigdata.com/ticket/854"> Allow overrride of * maximum length before converting an HTTP GET to an HTTP POST </a> */ - static public final String MAX_REQUEST_URL_LENGTH = "maxRequestURLLength"; + static public final String MAX_REQUEST_URL_LENGTH = RemoteRepository.class + .getName() + ".maxRequestURLLength"; /** * The default maximum limit on a requestURL before the request is converted @@ -192,15 +206,13 @@ * The maximum requestURL length before the request is converted into a POST * using a <code>application/x-www-form-urlencoded</code> request entity. */ - private volatile int maxRequestURLLength = Integer.parseInt(System - .getProperty(MAX_REQUEST_URL_LENGTH, - Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH))); + private volatile int maxRequestURLLength; /** * The HTTP verb that will be used for a QUERY (versus a UPDATE or other * mutation operation). */ - private volatile String queryMethod = DEFAULT_QUERY_METHOD; + private volatile String queryMethod; /** * Return the maximum requestURL length before the request is converted into @@ -228,7 +240,7 @@ /** * Return the HTTP verb that will be used for a QUERY (versus an UPDATE or - * other mutation operations) (default {@value #DEFAULT_IS_GET}). POST can + * other mutation operations) (default {@value #DEFAULT_QUERY_METHOD}). POST can * often handle larger queries than GET due to limits at the HTTP client * layer and will defeat http caching and thus provide a current view of the * committed state of the SPARQL end point when the end point is a @@ -312,6 +324,12 @@ this.executor = executor; + setMaxRequestURLLength(Integer.parseInt(System.getProperty( + MAX_REQUEST_URL_LENGTH, + Integer.toString(DEFAULT_MAX_REQUEST_URL_LENGTH)))); + + setQueryMethod(System.getProperty(QUERY_METHOD, DEFAULT_QUERY_METHOD)); + } @Override @@ -1017,7 +1035,7 @@ */ protected void setupConnectOptions() { - opts.method = "POST"; + opts.method = getQueryMethod(); if(update) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-03-14 21:39:16
|
Revision: 7969 http://sourceforge.net/p/bigdata/code/7969 Author: thompsonbry Date: 2014-03-14 21:39:12 +0000 (Fri, 14 Mar 2014) Log Message: ----------- Modified build.xml in the main branch and the RDR branch to fix the javadoc build. Removed a character in both branches in the PR.java file that was causing problems with javadoc generation (non-UTF-8). See #810 (Expose GAS Service) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java branches/BIGDATA_RELEASE_1_3_0/build.xml branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java branches/RDR/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-03-14 21:23:52 UTC (rev 7968) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-03-14 21:39:12 UTC (rev 7969) @@ -54,7 +54,7 @@ * <dd>sum( neighbor_value / neighbor_num_out_edges ) over the in-edges of the * graph.</dd> * <dt>Apply</dt> - * <dd>value = <i>resetProb</i> + (1.0 \xD0 <i>resetProb</i>) * gatherSum</dd> + * <dd>value = <i>resetProb</i> + (1.0 - <i>resetProb</i>) * gatherSum</dd> * <dt>Scatter</dt> * <dd>if (a) value has significantly changed <code>(fabs(old-new) GT * <i>epsilon</i>)</code>; or (b) iterations LT limit</dd> Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-03-14 21:23:52 UTC (rev 7968) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-03-14 21:39:12 UTC (rev 7969) @@ -385,9 +385,6 @@ overview="${bigdata.dir}/overview.html" windowtitle="bigdata® v${build.ver}" classpathref="build.classpath" - package="true" - protected="true" - public="true" private="false" > <arg value="-J-Xmx1000m" /> @@ -401,6 +398,7 @@ <packageset dir="${bigdata.dir}/bigdata-sails/src/samples" /> <packageset dir="${bigdata.dir}/bigdata-gom/src/java" /> <packageset dir="${bigdata.dir}/bigdata-gom/src/samples" /> + <packageset dir="${bigdata.dir}/bigdata-gas/src/java" /> <packageset dir="${bigdata.dir}/ctc-striterators/src/java" /> <doctitle> <![CDATA[<h1>bigdata® v${build.ver}</h1>]]></doctitle> Modified: branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java =================================================================== --- branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-03-14 21:23:52 UTC (rev 7968) +++ branches/RDR/bigdata-gas/src/java/com/bigdata/rdf/graph/analytics/PR.java 2014-03-14 21:39:12 UTC (rev 7969) @@ -55,7 +55,7 @@ * <dd>sum( neighbor_value / neighbor_num_out_edges ) over the in-edges of the * graph.</dd> * <dt>Apply</dt> - * <dd>value = <i>resetProb</i> + (1.0 \xD0 <i>resetProb</i>) * gatherSum</dd> + * <dd>value = <i>resetProb</i> + (1.0 - <i>resetProb</i>) * gatherSum</dd> * <dt>Scatter</dt> * <dd>if (a) value has significantly changed <code>(fabs(old-new) GT * <i>epsilon</i>)</code>; or (b) iterations LT limit</dd> Modified: branches/RDR/build.xml =================================================================== --- branches/RDR/build.xml 2014-03-14 21:23:52 UTC (rev 7968) +++ branches/RDR/build.xml 2014-03-14 21:39:12 UTC (rev 7969) @@ -394,9 +394,6 @@ overview="${bigdata.dir}/overview.html" windowtitle="bigdata® v${build.ver}" classpathref="build.classpath" - package="true" - protected="true" - public="true" private="false" > <arg value="-J-Xmx1000m" /> @@ -410,6 +407,7 @@ <packageset dir="${bigdata.dir}/bigdata-sails/src/samples" /> <packageset dir="${bigdata.dir}/bigdata-gom/src/java" /> <packageset dir="${bigdata.dir}/bigdata-gom/src/samples" /> + <packageset dir="${bigdata.dir}/bigdata-gas/src/java" /> <packageset dir="${bigdata.dir}/ctc-striterators/src/java" /> <doctitle> <![CDATA[<h1>bigdata® v${build.ver}</h1>]]></doctitle> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-05-08 18:24:22
|
Revision: 8233 http://sourceforge.net/p/bigdata/code/8233 Author: mrpersonick Date: 2014-05-08 18:24:19 +0000 (Thu, 08 May 2014) Log Message: ----------- New branch for blueprints. Modified Paths: -------------- branches/BLUEPRINTS/bigdata/src/resources/logging/log4j.properties branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket275.java Added Paths: ----------- branches/BLUEPRINTS/ branches/BLUEPRINTS/bigdata-war/src/WEB-INF/GraphStore.properties branches/BLUEPRINTS/graph-example-1.xml Index: branches/BLUEPRINTS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0 2014-05-08 18:09:18 UTC (rev 8232) +++ branches/BLUEPRINTS 2014-05-08 18:24:19 UTC (rev 8233) Property changes on: branches/BLUEPRINTS ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,31 ## +ant-build +src +bin +bigdata*.jar +ant-release +standalone +test* +countersfinal.xml +events.jnl +.settings +*.jnl +TestInsertRate.out +SYSTAP-BBT-result.txt +U10load+query +*.hprof +com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv +commit-log.txt +eventLog +dist +bigdata-test +com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv +DIST.bigdata-*.tgz +REL.bigdata-*.tgz +queryLog* +queryRunState* +sparql.txt +benchmark +CI +bsbm10-dataset.nt.gz +bsbm10-dataset.nt.zip +benchmark* Added: svn:mergeinfo ## -0,0 +1,20 ## +/branches/BIGDATA_MGC_HA1_HA5:8025-8122 +/branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 +/branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BTREE_BUFFER_BRANCH:2004-2045 +/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 +/branches/INT64_BRANCH:4486-4522 +/branches/JOURNAL_HA_BRANCH:2596-4066 +/branches/LARGE_LITERALS_REFACTOR:4175-4387 +/branches/LEXICON_REFACTOR_BRANCH:2633-3304 +/branches/MGC_1_3_0:7609-7752 +/branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 +/branches/RDR:7665-8159 +/branches/READ_CACHE:7215-7271 +/branches/RWSTORE_1_1_0_DEBUG:5896-5935 +/branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 +/branches/ZK_DISCONNECT_HANDLING:7465-7484 +/branches/bugfix-btm:2594-3237 +/branches/dev-btm:2574-2730 +/branches/fko:3150-3194 +/trunk:3392-3437,3656-4061 \ No newline at end of property Modified: branches/BLUEPRINTS/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/logging/log4j.properties 2014-05-08 18:09:18 UTC (rev 8232) +++ branches/BLUEPRINTS/bigdata/src/resources/logging/log4j.properties 2014-05-08 18:24:19 UTC (rev 8233) @@ -16,7 +16,9 @@ log4j.logger.com.bigdata.rdf.store.DataLoader=INFO log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO -#log4j.logger.com.bigdata.rdf.rio.StatementBuffer=ALL +log4j.logger.com.bigdata.rdf.sail.webapp.HALoadBalancerServlet=ERROR + +#log4j.logger.com.bigdata.blueprints=ALL #log4j.logger.com.bigdata.rdf.sail.TestProvenanceQuery=ALL #log4j.logger.com.bigdata.rdf.sail.TestSids=ALL #log4j.logger.com.bigdata.rdf.sail.ProxyBigdataSailTestCase=ALL Modified: branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq 2014-05-08 18:09:18 UTC (rev 8232) +++ branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq 2014-05-08 18:24:19 UTC (rev 8233) @@ -4,14 +4,11 @@ ?user <http://arvados.org/schema/api_token> <token:ckedd> . { ?user <http://arvados.org/schema/user_is_admin> true . - ?s ?p ?o . - FILTER strStarts(str(?s), "http://arvados.org/schema/modified") . } union { - ?user <http://arvados.org/schema/user_is_admin> false . ?user <http://arvados.org/schema/permission/can_read> ?s . +} ?s ?p ?o . FILTER strStarts(str(?s), "http://arvados.org/schema/modified") . -} } \ No newline at end of file Modified: branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket275.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket275.java 2014-05-08 18:09:18 UTC (rev 8232) +++ branches/BLUEPRINTS/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket275.java 2014-05-08 18:24:19 UTC (rev 8233) @@ -95,7 +95,14 @@ RDFFormat.TURTLE); conn.commit(); - final String query = "SELECT ?lookup WHERE { ?lookup <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <os:class/Lookup> . ?lookup <os:prop/lookup/majorType> ?majorType . OPTIONAL{?lookup <os:prop/lookup/minorType> ?minorType}. FILTER(STR(?majorType) = ?argMajorType). FILTER(!bound(?minorType))}"; + final String query = "SELECT ?lookup " + + "WHERE { " + + "?lookup <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <os:class/Lookup> . " + + "?lookup <os:prop/lookup/majorType> ?majorType . " + + "OPTIONAL{?lookup <os:prop/lookup/minorType> ?minorType}. " + + "FILTER(STR(?majorType) = ?argMajorType). " + + "FILTER(!bound(?minorType))" + + "}"; final TupleQuery q = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); q.setBinding("argMajorType", conn.getValueFactory() Added: branches/BLUEPRINTS/bigdata-war/src/WEB-INF/GraphStore.properties =================================================================== --- branches/BLUEPRINTS/bigdata-war/src/WEB-INF/GraphStore.properties (rev 0) +++ branches/BLUEPRINTS/bigdata-war/src/WEB-INF/GraphStore.properties 2014-05-08 18:24:19 UTC (rev 8233) @@ -0,0 +1,40 @@ +# +# Note: These options are applied when the journal and the triple store are +# first created. + +## +## Journal options. +## + +# The backing file. This contains all your data. You want to put this someplace +# safe. The default locator will wind up in the directory from which you start +# your servlet container. +com.bigdata.journal.AbstractJournal.file=bigdata.jnl + +# The persistence engine. Use 'Disk' for the WORM or 'DiskRW' for the RWStore. +com.bigdata.journal.AbstractJournal.bufferMode=DiskRW + +# Setup for the RWStore recycler rather than session protection. +com.bigdata.service.AbstractTransactionService.minReleaseAge=1 + +com.bigdata.btree.writeRetentionQueue.capacity=4000 +com.bigdata.btree.BTree.branchingFactor=128 + +# 200M initial extent. +com.bigdata.journal.AbstractJournal.initialExtent=209715200 +com.bigdata.journal.AbstractJournal.maximumExtent=209715200 + +## +## Setup for QUADS mode without the full text index. +## +com.bigdata.rdf.sail.truthMaintenance=false +com.bigdata.rdf.store.AbstractTripleStore.quads=false +com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=false +com.bigdata.rdf.store.AbstractTripleStore.textIndex=true +com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=com.bigdata.rdf.axioms.NoAxioms + +# Bump up the branching factor for the lexicon indices on the default kb. +com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=400 + +# Bump up the branching factor for the statement indices on the default kb. +com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=1024 Added: branches/BLUEPRINTS/graph-example-1.xml =================================================================== --- branches/BLUEPRINTS/graph-example-1.xml (rev 0) +++ branches/BLUEPRINTS/graph-example-1.xml 2014-05-08 18:24:19 UTC (rev 8233) @@ -0,0 +1,54 @@ +<?xml version="1.0" encoding="UTF-8"?> +<graphml xmlns="http://graphml.graphdrawing.org/xmlns" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns + http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> + <key id="weight" for="edge" attr.name="weight" attr.type="float"/> + <key id="name" for="node" attr.name="name" attr.type="string"/> + <key id="age" for="node" attr.name="age" attr.type="int"/> + <key id="lang" for="node" attr.name="lang" attr.type="string"/> + <graph id="G" edgedefault="directed"> + <node id="1"> + <data key="name">marko</data> + <data key="age">29</data> + </node> + <node id="2"> + <data key="name">vadas</data> + <data key="age">27</data> + </node> + <node id="3"> + <data key="name">lop</data> + <data key="lang">java</data> + </node> + <node id="4"> + <data key="name">josh</data> + <data key="age">32</data> + </node> + <node id="5"> + <data key="name">ripple</data> + <data key="lang">java</data> + </node> + <node id="6"> + <data key="name">peter</data> + <data key="age">35</data> + </node> + <edge id="7" source="1" target="2" label="knows"> + <data key="weight">0.5</data> + </edge> + <edge id="8" source="1" target="4" label="knows"> + <data key="weight">1.0</data> + </edge> + <edge id="9" source="1" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="10" source="4" target="5" label="created"> + <data key="weight">1.0</data> + </edge> + <edge id="11" source="4" target="3" label="created"> + <data key="weight">0.4</data> + </edge> + <edge id="12" source="6" target="3" label="created"> + <data key="weight">0.2</data> + </edge> + </graph> +</graphml> \ No newline at end of file This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-06-05 23:23:46
|
Revision: 8450 http://sourceforge.net/p/bigdata/code/8450 Author: tobycraig Date: 2014-06-05 23:23:41 +0000 (Thu, 05 Jun 2014) Log Message: ----------- Modified Paths: -------------- branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js Added Paths: ----------- branches/WORKBENCH_QUERY_HISTORY/ Index: branches/WORKBENCH_QUERY_HISTORY =================================================================== --- branches/BIGDATA_RELEASE_1_3_0 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY 2014-06-05 23:23:41 UTC (rev 8450) Property changes on: branches/WORKBENCH_QUERY_HISTORY ___________________________________________________________________ Added: svn:ignore ## -0,0 +1,31 ## +ant-build +src +bin +bigdata*.jar +ant-release +standalone +test* +countersfinal.xml +events.jnl +.settings +*.jnl +TestInsertRate.out +SYSTAP-BBT-result.txt +U10load+query +*.hprof +com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv +commit-log.txt +eventLog +dist +bigdata-test +com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv +DIST.bigdata-*.tgz +REL.bigdata-*.tgz +queryLog* +queryRunState* +sparql.txt +benchmark +CI +bsbm10-dataset.nt.gz +bsbm10-dataset.nt.zip +benchmark* Added: svn:mergeinfo ## -0,0 +1,20 ## +/branches/BIGDATA_MGC_HA1_HA5:8025-8122 +/branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 +/branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BTREE_BUFFER_BRANCH:2004-2045 +/branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 +/branches/INT64_BRANCH:4486-4522 +/branches/JOURNAL_HA_BRANCH:2596-4066 +/branches/LARGE_LITERALS_REFACTOR:4175-4387 +/branches/LEXICON_REFACTOR_BRANCH:2633-3304 +/branches/MGC_1_3_0:7609-7752 +/branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 +/branches/RDR:7665-8159 +/branches/READ_CACHE:7215-7271 +/branches/RWSTORE_1_1_0_DEBUG:5896-5935 +/branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 +/branches/ZK_DISCONNECT_HANDLING:7465-7484 +/branches/bugfix-btm:2594-3237 +/branches/dev-btm:2574-2730 +/branches/fko:3150-3194 +/trunk:3392-3437,3656-4061 \ No newline at end of property Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/css/style.css 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/css/style.css 2014-06-05 23:23:41 UTC (rev 8450) @@ -247,7 +247,7 @@ border: none; } -.advanced-features, #query-response, #query-pagination, #query-explanation, #query-export-container, #update-response, #update-clear-container, #explore-results, #namespace-properties { +.advanced-features, #query-response, #query-pagination, #query-explanation, #query-history, #query-export-container, #update-response, #update-clear-container, #explore-results, #namespace-properties { display: none; } @@ -325,6 +325,10 @@ border: 1px solid #e1e1e1; } +#query-history .query { + white-space: pre; +} + #query-export-container { text-align: right; } Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/index.html 2014-06-05 23:23:41 UTC (rev 8450) @@ -132,6 +132,19 @@ <div id="query-explanation" class="box"> </div> + <div id="query-history" class="box"> + <table> + <thead> + <tr> + <th>Time</th> + <th>Query</th> + <th>Results</th> + </tr> + </thead> + <tbody></tbody> + </table> + </div> + <div id="query-export-container" class="box"> <button id="query-export">Export</button> <button id="query-response-clear">Clear</button> Modified: branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/js/workbench.js 2014-06-05 14:48:07 UTC (rev 8449) +++ branches/WORKBENCH_QUERY_HISTORY/bigdata-war/src/html/js/workbench.js 2014-06-05 23:23:41 UTC (rev 8450) @@ -250,7 +250,7 @@ data: data, contentType: 'application/xml', success: function() { $('#new-namespace-name').val(''); getNamespaces(); }, - error: function(jqXHR, textStatus, errorThrown) { debugger;alert(jqXHR.responseText); } + error: function(jqXHR, textStatus, errorThrown) { alert(jqXHR.responseText); } }; $.ajax(RW_URL_PREFIX + 'namespace', settings); } @@ -632,6 +632,13 @@ }); EDITORS.query.addKeyMap({'Ctrl-Enter': submitQuery}); +$('#query-history').on('click', '.query', loadHistory); + +function loadHistory() { + EDITORS.query.setValue(this.innerText); + EDITORS.query.focus(); +} + function submitQuery(e) { try { e.preventDefault(); @@ -641,10 +648,38 @@ EDITORS.query.save(); // do nothing if query is empty - if($('#query-box').val().trim() == '') { + var query = $('#query-box').val().trim(); + if(query == '') { return; } + var queryExists = false; + + // see if this query is already in the history + $('#query-history tbody tr').each(function(i, row) { + if($(row).find('.query')[0].innerText == query) { + // clear the old results and set the time to now + $(row).find('.query-time').text(new Date().toISOString()); + $(row).find('.query-results').text('...'); + // move it to the top + $(row).prependTo('#query-history tbody'); + queryExists = true; + return false; + } + }); + + if(!queryExists) { + // add this query to the history + var row = $('<tr>').prependTo($('#query-history tbody')); + row.append('<td class="query-time">' + new Date().toISOString() + '</td>'); + var cell = $('<td class="query">').appendTo(row); + cell.text(query); + cell.html(cell.html().replace('\n', '<br>')); + row.append('<td class="query-results">...</td>'); + } + + $('#query-history').show(); + var url = RO_URL_PREFIX + 'namespace/' + NAMESPACE + '/sparql'; var settings = { type: 'POST', @@ -804,6 +839,10 @@ $('#download-link').remove(); } +function updateResultCount(count) { + $('#query-history tbody tr:first td.query-results').text(count); +} + function showQueryResults(data) { $('#query-response').empty(); $('#query-export-rdf').hide(); @@ -833,6 +872,7 @@ table.append(tr); } } + updateResultCount(rows.length); } else { // JSON // save data for export and pagination @@ -841,6 +881,7 @@ if(typeof(data.boolean) != 'undefined') { // ASK query table.append('<tr><td>' + data.boolean + '</td></tr>').addClass('boolean'); + updateResultCount('' + data.boolean); return; } @@ -882,6 +923,7 @@ table.append(thead); $('#total-results').html(data.results.bindings.length); + updateResultCount(data.results.bindings.length); setNumberOfPages(); showPage(1); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |