You can subscribe to this list here.
2006 |
Jan
|
Feb
|
Mar
(414) |
Apr
(123) |
May
(448) |
Jun
(180) |
Jul
(17) |
Aug
(49) |
Sep
(3) |
Oct
(92) |
Nov
(101) |
Dec
(64) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2007 |
Jan
(132) |
Feb
(230) |
Mar
(146) |
Apr
(146) |
May
|
Jun
|
Jul
(34) |
Aug
(4) |
Sep
(3) |
Oct
(10) |
Nov
(12) |
Dec
(24) |
2008 |
Jan
(6) |
Feb
|
Mar
|
Apr
|
May
(1) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(11) |
Nov
(4) |
Dec
|
2009 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(1) |
Oct
|
Nov
|
Dec
|
From: Bryan T. <tho...@us...> - 2007-03-27 14:35:14
|
Update of /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6717/src/java/com/bigdata/rdf Modified Files: TempTripleStore.java TripleStore.java RdfKeyBuilder.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: TempTripleStore.java =================================================================== RCS file: /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf/TempTripleStore.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** TempTripleStore.java 17 Feb 2007 21:34:57 -0000 1.2 --- TempTripleStore.java 27 Mar 2007 14:35:08 -0000 1.3 *************** *** 49,52 **** --- 49,53 ---- import java.util.Locale; + import java.util.UUID; import org.apache.log4j.Logger; *************** *** 110,113 **** --- 111,115 ---- ndx = registerIndex(name, new BTree(this, BTree.DEFAULT_BRANCHING_FACTOR, + UUID.randomUUID(), StatementSerializer.INSTANCE)); Index: RdfKeyBuilder.java =================================================================== RCS file: /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf/RdfKeyBuilder.java,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -d -r1.7 -r1.8 *** RdfKeyBuilder.java 9 Feb 2007 20:18:56 -0000 1.7 --- RdfKeyBuilder.java 27 Mar 2007 14:35:08 -0000 1.8 *************** *** 307,311 **** /* ! * @todo if we know that the bnode id is a uuid that we generated * then we should encode that using faster logic that this unicode * conversion and stick the sort key on the bnode so that we do --- 307,311 ---- /* ! * @todo if we know that the bnode id is a segmentUUID that we generated * then we should encode that using faster logic that this unicode * conversion and stick the sort key on the bnode so that we do Index: TripleStore.java =================================================================== RCS file: /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf/TripleStore.java,v retrieving revision 1.23 retrieving revision 1.24 diff -C2 -d -r1.23 -r1.24 *** TripleStore.java 15 Mar 2007 16:11:54 -0000 1.23 --- TripleStore.java 27 Mar 2007 14:35:08 -0000 1.24 *************** *** 57,60 **** --- 57,61 ---- import java.util.Locale; import java.util.Properties; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 268,272 **** ndx_termId = ndx = registerIndex(name_termId, new BTree(this, ! BTree.DEFAULT_BRANCHING_FACTOR, TermIdSerializer.INSTANCE)); } --- 269,274 ---- ndx_termId = ndx = registerIndex(name_termId, new BTree(this, ! BTree.DEFAULT_BRANCHING_FACTOR, UUID.randomUUID(), ! TermIdSerializer.INSTANCE)); } *************** *** 286,289 **** --- 288,292 ---- ndx_idTerm = ndx = registerIndex(name_idTerm, new BTree(this, BTree.DEFAULT_BRANCHING_FACTOR, + UUID.randomUUID(), RdfValueSerializer.INSTANCE)); *************** *** 312,315 **** --- 315,319 ---- ndx = registerIndex(name, new BTree(this, BTree.DEFAULT_BRANCHING_FACTOR, + UUID.randomUUID(), StatementSerializer.INSTANCE)); |
From: Bryan T. <tho...@us...> - 2007-03-27 14:35:12
|
Update of /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf/rio In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6717/src/java/com/bigdata/rdf/rio Modified Files: BulkLoaderBuffer.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: BulkLoaderBuffer.java =================================================================== RCS file: /cvsroot/cweb/bigdata-rdf/src/java/com/bigdata/rdf/rio/BulkLoaderBuffer.java,v retrieving revision 1.9 retrieving revision 1.10 diff -C2 -d -r1.9 -r1.10 *** BulkLoaderBuffer.java 15 Feb 2007 22:01:25 -0000 1.9 --- BulkLoaderBuffer.java 27 Mar 2007 14:35:08 -0000 1.10 *************** *** 56,59 **** --- 56,60 ---- import com.bigdata.io.ByteBufferOutputStream; + import com.bigdata.objndx.IIndex; import com.bigdata.objndx.IndexSegment; import com.bigdata.objndx.IndexSegmentBuilder; *************** *** 154,162 **** final long begin = System.currentTimeMillis(); ! new IndexSegmentBuilder(outFile, null, numTerms, ! new TermIdIterator(this), branchingFactor, ! TermIdSerializer.INSTANCE, ! useChecksum, recordCompressor, ! errorRate); final long elapsed = System.currentTimeMillis() - begin; --- 155,162 ---- final long begin = System.currentTimeMillis(); ! new IndexSegmentBuilder(outFile, null, numTerms, new TermIdIterator( ! this), branchingFactor, TermIdSerializer.INSTANCE, useChecksum, ! recordCompressor, errorRate, store.getTermIdIndex() ! .getIndexUUID()); final long elapsed = System.currentTimeMillis() - begin; *************** *** 185,194 **** final long begin = System.currentTimeMillis(); ! new IndexSegmentBuilder(outFile, null, ! numTerms, new TermIterator(this), ! branchingFactor, ! RdfValueSerializer.INSTANCE, ! useChecksum, recordCompressor, ! errorRate); final long elapsed = System.currentTimeMillis() - begin; --- 185,192 ---- final long begin = System.currentTimeMillis(); ! new IndexSegmentBuilder(outFile, null, numTerms, ! new TermIterator(this), branchingFactor, ! RdfValueSerializer.INSTANCE, useChecksum, recordCompressor, ! errorRate, store.getIdTermIndex().getIndexUUID()); final long elapsed = System.currentTimeMillis() - begin; *************** *** 210,219 **** final long begin = System.currentTimeMillis(); new IndexSegmentBuilder(outFile, null, numStmts, ! new UnknownStatementIterator(keyOrder,this), ! branchingFactor, ! StatementSerializer.INSTANCE, ! useChecksum, recordCompressor, ! errorRate); final long elapsed = System.currentTimeMillis() - begin; --- 208,230 ---- final long begin = System.currentTimeMillis(); + final IIndex ndx; + switch(keyOrder) { + case SPO: + ndx = store.getSPOIndex(); + break; + case POS: + ndx = store.getPOSIndex(); + break; + case OSP: + ndx = store.getOSPIndex(); + break; + default: + throw new AssertionError("Unknown keyOrder=" + keyOrder); + } + new IndexSegmentBuilder(outFile, null, numStmts, ! new UnknownStatementIterator(keyOrder, this), branchingFactor, ! StatementSerializer.INSTANCE, useChecksum, recordCompressor, ! errorRate, ndx.getIndexUUID()); final long elapsed = System.currentTimeMillis() - begin; |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:58
|
Update of /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/test/com/bigdata/objndx Modified Files: TestIndexSegmentWithBloomFilter.java TestIndexSegmentBuilderWithLargeTrees.java TestIncrementalWrite.java TestReopen.java AbstractBTreeTestCase.java TestRestartSafe.java TestNodeSerializer.java TestTouch.java TestCommit.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: TestIndexSegmentBuilderWithLargeTrees.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestIndexSegmentBuilderWithLargeTrees.java,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -d -r1.13 -r1.14 *** TestIndexSegmentBuilderWithLargeTrees.java 22 Mar 2007 21:11:24 -0000 1.13 --- TestIndexSegmentBuilderWithLargeTrees.java 27 Mar 2007 14:34:21 -0000 1.14 *************** *** 51,54 **** --- 51,55 ---- import java.io.IOException; import java.util.Properties; + import java.util.UUID; import com.bigdata.journal.BufferMode; *************** *** 105,109 **** Journal journal = new Journal(getProperties()); ! BTree btree = new BTree(journal, branchingFactor, SimpleEntry.Serializer.INSTANCE); --- 106,110 ---- Journal journal = new Journal(getProperties()); ! BTree btree = new BTree(journal, branchingFactor, UUID.randomUUID(), SimpleEntry.Serializer.INSTANCE); Index: TestIndexSegmentWithBloomFilter.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestIndexSegmentWithBloomFilter.java,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -d -r1.11 -r1.12 *** TestIndexSegmentWithBloomFilter.java 22 Mar 2007 21:11:24 -0000 1.11 --- TestIndexSegmentWithBloomFilter.java 27 Mar 2007 14:34:21 -0000 1.12 *************** *** 53,58 **** import java.io.IOException; import java.util.Properties; - import com.bigdata.cache.HardReferenceQueue; import com.bigdata.journal.BufferMode; import com.bigdata.journal.Journal; --- 53,58 ---- import java.io.IOException; import java.util.Properties; + import java.util.UUID; import com.bigdata.journal.BufferMode; import com.bigdata.journal.Journal; *************** *** 139,152 **** Journal journal = new Journal(properties); ! // A modest leaf queue capacity. ! final int leafQueueCapacity = 500; ! ! final int nscan = 10; ! ! BTree btree = new BTree(journal, branchingFactor, ! new HardReferenceQueue<PO>(new DefaultEvictionListener(), ! leafQueueCapacity, nscan), ! SimpleEntry.Serializer.INSTANCE, null // no record compressor ! ); return btree; --- 139,144 ---- Journal journal = new Journal(properties); ! BTree btree = new BTree(journal, branchingFactor, UUID.randomUUID(), ! SimpleEntry.Serializer.INSTANCE); return btree; Index: TestReopen.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestReopen.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** TestReopen.java 15 Mar 2007 16:11:09 -0000 1.1 --- TestReopen.java 27 Mar 2007 14:34:21 -0000 1.2 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.objndx; + import java.util.UUID; + import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.SimpleMemoryRawStore; *************** *** 110,113 **** --- 112,117 ---- IRawStore store = new SimpleMemoryRawStore(); + final UUID indexUUID = UUID.randomUUID(); + /* * The btree under test. *************** *** 117,121 **** * by the default fixture factory). */ ! BTree btree = new BTree(store, 3, SimpleEntry.Serializer.INSTANCE); /* --- 121,125 ---- * by the default fixture factory). */ ! BTree btree = new BTree(store, 3, indexUUID, SimpleEntry.Serializer.INSTANCE); /* *************** *** 126,130 **** * and leaves onto the store. */ ! BTree groundTruth = new BTree(store, 3, SimpleEntry.Serializer.INSTANCE); final int limit = 10000; --- 130,134 ---- * and leaves onto the store. */ ! BTree groundTruth = new BTree(store, 3, indexUUID, SimpleEntry.Serializer.INSTANCE); final int limit = 10000; Index: TestTouch.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestTouch.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** TestTouch.java 9 Feb 2007 16:13:18 -0000 1.5 --- TestTouch.java 27 Mar 2007 14:34:21 -0000 1.6 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.objndx; + import java.util.UUID; + import com.bigdata.rawstore.SimpleMemoryRawStore; *************** *** 102,105 **** --- 104,108 ---- new SimpleMemoryRawStore(), branchingFactor, + UUID.randomUUID(), leafQueue, SimpleEntry.Serializer.INSTANCE, *************** *** 173,176 **** --- 176,180 ---- new SimpleMemoryRawStore(), branchingFactor, + UUID.randomUUID(), leafQueue, SimpleEntry.Serializer.INSTANCE, *************** *** 246,249 **** --- 250,254 ---- new SimpleMemoryRawStore(), branchingFactor, + UUID.randomUUID(), leafQueue, valSer, Index: TestCommit.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestCommit.java,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -d -r1.13 -r1.14 *** TestCommit.java 11 Mar 2007 11:42:50 -0000 1.13 --- TestCommit.java 27 Mar 2007 14:34:21 -0000 1.14 *************** *** 48,54 **** package com.bigdata.objndx; import junit.framework.TestCase2; - import com.bigdata.cache.HardReferenceQueue; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.SimpleMemoryRawStore; --- 48,55 ---- package com.bigdata.objndx; + import java.util.UUID; + import junit.framework.TestCase2; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.SimpleMemoryRawStore; *************** *** 59,64 **** * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ - * - * FIXME This suite should verify that */ public class TestCommit extends TestCase2 { --- 60,63 ---- *************** *** 94,105 **** { ! BTree btree = new BTree(store, ! branchingFactor, ! new HardReferenceQueue<PO>(new DefaultEvictionListener(), ! BTree.DEFAULT_HARD_REF_QUEUE_CAPACITY, ! BTree.DEFAULT_HARD_REF_QUEUE_SCAN), ! SimpleEntry.Serializer.INSTANCE, ! null // no record compressor ! ); assertTrue(btree.root.isDirty()); --- 93,98 ---- { ! BTree btree = new BTree(store, branchingFactor, UUID.randomUUID(), ! SimpleEntry.Serializer.INSTANCE); assertTrue(btree.root.isDirty()); Index: AbstractBTreeTestCase.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/AbstractBTreeTestCase.java,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -d -r1.31 -r1.32 *** AbstractBTreeTestCase.java 8 Mar 2007 18:14:05 -0000 1.31 --- AbstractBTreeTestCase.java 27 Mar 2007 14:34:21 -0000 1.32 *************** *** 52,55 **** --- 52,56 ---- import java.util.Random; import java.util.TreeMap; + import java.util.UUID; import junit.framework.AssertionFailedError; *************** *** 492,496 **** final int nscan = 10; ! BTree btree = new BTree(store, branchingFactor, new HardReferenceQueue<PO>(new NoEvictionListener(), leafQueueCapacity, nscan), --- 493,497 ---- final int nscan = 10; ! BTree btree = new BTree(store, branchingFactor, UUID.randomUUID(), new HardReferenceQueue<PO>(new NoEvictionListener(), leafQueueCapacity, nscan), *************** *** 1554,1557 **** --- 1555,1560 ---- assert actual != null; + assertEquals("indexUUID",expected.getIndexUUID(), actual.getIndexUUID()); + // The #of entries must agree. assertEquals("entryCount",expected.getEntryCount(), actual.getEntryCount()); Index: TestRestartSafe.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestRestartSafe.java,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -d -r1.7 -r1.8 *** TestRestartSafe.java 22 Mar 2007 21:11:24 -0000 1.7 --- TestRestartSafe.java 27 Mar 2007 14:34:21 -0000 1.8 *************** *** 51,54 **** --- 51,55 ---- import java.io.IOException; import java.util.Properties; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 145,149 **** public BTree getBTree(int branchingFactor, Journal journal) { ! BTree btree = new BTree(journal, branchingFactor, SimpleEntry.Serializer.INSTANCE); --- 146,150 ---- public BTree getBTree(int branchingFactor, Journal journal) { ! BTree btree = new BTree(journal, branchingFactor, UUID.randomUUID(), SimpleEntry.Serializer.INSTANCE); Index: TestIncrementalWrite.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestIncrementalWrite.java,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -d -r1.7 -r1.8 *** TestIncrementalWrite.java 9 Feb 2007 16:13:18 -0000 1.7 --- TestIncrementalWrite.java 27 Mar 2007 14:34:21 -0000 1.8 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.objndx; + import java.util.UUID; + import com.bigdata.cache.HardReferenceQueue; import com.bigdata.rawstore.IRawStore; *************** *** 83,86 **** --- 85,89 ---- BTree btree = new BTree(store, branchingFactor, + UUID.randomUUID(), new MyHardReferenceQueue<PO>(new DefaultEvictionListener(), queueCapacity, queueScan), Index: TestNodeSerializer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/objndx/TestNodeSerializer.java,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -d -r1.16 -r1.17 *** TestNodeSerializer.java 12 Mar 2007 18:06:12 -0000 1.16 --- TestNodeSerializer.java 27 Mar 2007 14:34:21 -0000 1.17 *************** *** 52,55 **** --- 52,56 ---- import java.util.Set; import java.util.TreeSet; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 278,282 **** final int nscan = 10; ! BTree btree = new BTree(store, branchingFactor, new HardReferenceQueue<PO>(new NoEvictionListener(), leafQueueCapacity, nscan), --- 279,283 ---- final int nscan = 10; ! BTree btree = new BTree(store, branchingFactor, UUID.randomUUID(), new HardReferenceQueue<PO>(new NoEvictionListener(), leafQueueCapacity, nscan), |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:58
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/scaleup Modified Files: MasterJournal.java SlaveJournal.java PartitionedIndexView.java MetadataIndex.java AbstractPartitionTask.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: AbstractPartitionTask.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup/AbstractPartitionTask.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** AbstractPartitionTask.java 11 Mar 2007 11:42:42 -0000 1.2 --- AbstractPartitionTask.java 27 Mar 2007 14:34:22 -0000 1.3 *************** *** 45,48 **** --- 45,49 ---- import java.io.File; + import java.util.UUID; import java.util.concurrent.Executors; *************** *** 107,110 **** --- 108,112 ---- protected final double errorRate; protected final String name; + protected final UUID indexUUID; protected final int partId; protected final byte[] fromKey; *************** *** 172,176 **** */ public AbstractPartitionTask(MasterJournal master, String name, ! int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey) { --- 174,178 ---- */ public AbstractPartitionTask(MasterJournal master, String name, ! UUID indexUUID, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey) { *************** *** 179,182 **** --- 181,185 ---- this.errorRate = errorRate; this.name = name; + this.indexUUID = indexUUID; this.partId = partId; this.fromKey = fromKey; *************** *** 227,235 **** * The output segment identifier. */ ! public BuildTask(MasterJournal master, String name, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, IResourceMetadata src, int segId) { ! super(master,name,branchingFactor,errorRate,partId,fromKey,toKey); this.src = src; --- 230,239 ---- * The output segment identifier. */ ! public BuildTask(MasterJournal master, String name, UUID indexUUID, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, IResourceMetadata src, int segId) { ! super(master, name, indexUUID, branchingFactor, errorRate, partId, ! fromKey, toKey); this.src = src; *************** *** 256,260 **** fromKey, toKey), src.rangeIterator(fromKey, toKey), branchingFactor, valSer, useChecksum, recordCompressor, ! errorRate); IResourceMetadata[] resources = new SegmentMetadata[] { new SegmentMetadata( --- 260,264 ---- fromKey, toKey), src.rangeIterator(fromKey, toKey), branchingFactor, valSer, useChecksum, recordCompressor, ! errorRate, indexUUID); IResourceMetadata[] resources = new SegmentMetadata[] { new SegmentMetadata( *************** *** 291,300 **** */ protected AbstractMergeTask(MasterJournal master, String name, ! int branchingFactor, double errorRate, int partId, ! byte[] fromKey, byte[] toKey, int segId, boolean fullCompactingMerge) { ! super(master, name, branchingFactor, errorRate, partId, fromKey, ! toKey); this.segId = segId; --- 295,304 ---- */ protected AbstractMergeTask(MasterJournal master, String name, ! UUID indexUUID, int branchingFactor, double errorRate, ! int partId, byte[] fromKey, byte[] toKey, int segId, boolean fullCompactingMerge) { ! super(master, name, indexUUID, branchingFactor, errorRate, partId, ! fromKey, toKey); this.segId = segId; *************** *** 343,347 **** new IndexSegmentBuilder(outFile, null, mergeItr.nentries, new MergedEntryIterator(mergeItr), branchingFactor, valSer, ! useChecksum, recordCompressor, errorRate); // close the merged leaf iterator (and release its buffer/file). --- 347,351 ---- new IndexSegmentBuilder(outFile, null, mergeItr.nentries, new MergedEntryIterator(mergeItr), branchingFactor, valSer, ! useChecksum, recordCompressor, errorRate, indexUUID); // close the merged leaf iterator (and release its buffer/file). *************** *** 429,439 **** * The output segment identifier. */ ! public MergeTask(MasterJournal master, String name, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, IResourceMetadata[] resources, int segId) { ! super(master, name, branchingFactor, errorRate, partId, fromKey, ! toKey, segId, false); this.resources = resources; --- 433,443 ---- * The output segment identifier. */ ! public MergeTask(MasterJournal master, String name, UUID indexUUID, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, IResourceMetadata[] resources, int segId) { ! super(master, name, indexUUID, branchingFactor, errorRate, partId, ! fromKey, toKey, segId, false); this.resources = resources; *************** *** 483,492 **** * merge operation. */ ! public FullMergeTask(MasterJournal master, String name, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, long commitTime, int segId) { ! super(master, name, branchingFactor, errorRate, partId, fromKey, ! toKey, segId, true); this.commitTime = commitTime; --- 487,496 ---- * merge operation. */ ! public FullMergeTask(MasterJournal master, String name, UUID indexUUID, int branchingFactor, double errorRate, int partId, byte[] fromKey, byte[] toKey, long commitTime, int segId) { ! super(master, name, indexUUID, branchingFactor, errorRate, partId, ! fromKey, toKey, segId, true); this.commitTime = commitTime; Index: MetadataIndex.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup/MetadataIndex.java,v retrieving revision 1.6 retrieving revision 1.7 diff -C2 -d -r1.6 -r1.7 *** MetadataIndex.java 11 Mar 2007 11:42:42 -0000 1.6 --- MetadataIndex.java 27 Mar 2007 14:34:22 -0000 1.7 *************** *** 44,47 **** --- 44,55 ---- package com.bigdata.scaleup; + import java.io.Externalizable; + import java.io.IOException; + import java.io.ObjectInput; + import java.io.ObjectOutput; + import java.util.UUID; + + import org.CognitiveWeb.extser.LongPacker; + import com.bigdata.isolation.IsolatedBTree; import com.bigdata.journal.Journal; *************** *** 49,52 **** --- 57,61 ---- import com.bigdata.objndx.BTree; import com.bigdata.objndx.BTreeMetadata; + import com.bigdata.objndx.IIndex; import com.bigdata.objndx.IndexSegment; import com.bigdata.rawstore.IRawStore; *************** *** 59,65 **** * The values are {@link PartitionMetadata} objects. * - * @todo locator logic on a cluster (a socket address in addition to the other - * information). - * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ --- 68,71 ---- *************** *** 72,85 **** * transaction remains which can read those data. This metadata must be * restart-safe so that resources are eventually deleted. - * - * @todo define a UUID so that is at least possible to rename a partitioned - * index? the uuid would be store in the metadata record for the metadata - * index and in each index segment generated for that metadata index. we - * could also define a partition uuid. finally, each btree and index - * segment could have its own uuid. the index segment would also carry the - * uuid of the partition and the partitioned index. This would also make - * it possible to determine which index segments belong to which - * partitions of which partitioned indices and effectively reconstruct the - * metadata index for a partitioned index from the data on the ground. */ public class MetadataIndex extends BTree { --- 78,81 ---- *************** *** 88,91 **** --- 84,89 ---- * The name of the metadata index, which is the always the same as the name * under which the corresponding {@link PartitionedIndexView} was registered. + * + * @todo rename as managedIndexName (and the access method as well). */ private final String name; *************** *** 102,105 **** --- 100,137 ---- /** + * The unique identifier for the index whose data metadata is managed by + * this {@link MetadataIndex}. + * <p> + * When using a scale-out index the same <i>indexUUID</i> MUST be assigned + * to each mutable and immutable B+Tree having data for any partition of + * that scale-out index. This makes it possible to work backwards from the + * B+Tree data structures and identify the index to which they belong. This + * field is that UUID. Note that the inherited {@link #getIndexUUID()} + * method provides the UUID of the metadata index NOT the managed index! + * + * @see IIndex#getIndexUUID() + */ + protected final UUID managedIndexUUID; + + /** + * The unique identifier for the index whose data metadata is managed by + * this {@link MetadataIndex}. + * <p> + * When using a scale-out index the same <i>indexUUID</i> MUST be assigned + * to each mutable and immutable B+Tree having data for any partition of + * that scale-out index. This makes it possible to work backwards from the + * B+Tree data structures and identify the index to which they belong. This + * field is that UUID. Note that the inherited {@link #getIndexUUID()} + * method provides the UUID of the metadata index NOT the managed index! + * + * @see IIndex#getIndexUUID() + */ + public UUID getManagedIndexUUID() { + + return managedIndexUUID; + + } + + /** * Create a new {@link MetadataIndex}. * *************** *** 108,121 **** * @param branchingFactor * The branching factor. ! * @param name ! * The name of the metadata index - this MUST be the name under ! * which the corresponding {@link PartitionedIndexView} was ! * registered. */ ! public MetadataIndex(IRawStore store, int branchingFactor, String name) { ! super(store, branchingFactor, PartitionMetadata.Serializer.INSTANCE); ! this.name = name; } --- 140,160 ---- * @param branchingFactor * The branching factor. ! * @param indexUUID ! * The unique identifier for the metadata index. ! * @param managedIndexUUID ! * The unique identifier for the managed scale-out index. ! * @param managedIndexName ! * The name of the managed scale out index. */ ! public MetadataIndex(IRawStore store, int branchingFactor, UUID indexUUID, ! UUID managedIndexUUID, String managedIndexName) { ! super(store, branchingFactor, indexUUID, ! PartitionMetadata.Serializer.INSTANCE); ! this.name = managedIndexName; ! ! // ! this.managedIndexUUID = managedIndexUUID; } *************** *** 125,129 **** super(store, metadata); ! name = ((MetadataIndexMetadata)metadata).name; } --- 164,170 ---- super(store, metadata); ! name = ((MetadataIndexMetadata)metadata).getName(); ! ! managedIndexUUID = ((MetadataIndexMetadata)metadata).getManagedIndexUUID(); } *************** *** 142,154 **** * @version $Id$ */ ! public static class MetadataIndexMetadata extends BTreeMetadata { private static final long serialVersionUID = -7309267778881420043L; /** * The name of the metadata index, which is the always the same as the name * under which the corresponding {@link PartitionedIndexView} was registered. */ ! public final String name; /** --- 183,226 ---- * @version $Id$ */ ! public static class MetadataIndexMetadata extends BTreeMetadata implements Externalizable { private static final long serialVersionUID = -7309267778881420043L; + private String name; + private UUID managedIndexUUID; + /** * The name of the metadata index, which is the always the same as the name * under which the corresponding {@link PartitionedIndexView} was registered. */ ! public final String getName() { ! ! return name; ! ! } ! ! /** ! * The unique identifier for the index whose data metadata is managed by ! * this {@link MetadataIndex}. ! * <p> ! * When using a scale-out index the same <i>indexUUID</i> MUST be assigned ! * to each mutable and immutable B+Tree having data for any partition of ! * that scale-out index. This makes it possible to work backwards from the ! * B+Tree data structures and identify the index to which they belong. This ! * field is that UUID. Note that the inherited {@link #getIndexUUID()} ! * method provides the UUID of the metadata index NOT the managed index! ! */ ! public final UUID getManagedIndexUUID() { ! ! return managedIndexUUID; ! ! } ! ! /** ! * De-serialization constructor. ! */ ! public MetadataIndexMetadata() { ! ! } /** *************** *** 159,166 **** super(mdi); ! this.name = mdi.name; } } --- 231,274 ---- super(mdi); ! this.name = mdi.getName(); ! ! this.managedIndexUUID = mdi.getManagedIndexUUID(); } + private static final transient int VERSION0 = 0x0; + + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + super.readExternal(in); + + final int version = (int)LongPacker.unpackLong(in); + + if (version != VERSION0) { + + throw new IOException("Unknown version: version=" + version); + + } + + name = in.readUTF(); + + managedIndexUUID = new UUID(in.readLong()/*MSB*/,in.readLong()/*LSB*/); + + } + + public void writeExternal(ObjectOutput out) throws IOException { + + super.writeExternal(out); + + LongPacker.packLong(out,VERSION0); + + out.writeUTF(name); + + out.writeLong(managedIndexUUID.getMostSignificantBits()); + + out.writeLong(managedIndexUUID.getLeastSignificantBits()); + + } + } Index: MasterJournal.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup/MasterJournal.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** MasterJournal.java 17 Mar 2007 23:14:58 -0000 1.3 --- MasterJournal.java 27 Mar 2007 14:34:22 -0000 1.4 *************** *** 777,783 **** */ final BTree newBTree = (oldBTree instanceof UnisolatedBTree ? new UnisolatedBTree( ! newJournal, oldBTree.getBranchingFactor()) : new BTree(newJournal, oldBTree.getBranchingFactor(), ! oldBTree.getNodeSerializer().getValueSerializer())); // Register the btree under the same name on the new slave. --- 777,785 ---- */ final BTree newBTree = (oldBTree instanceof UnisolatedBTree ? new UnisolatedBTree( ! newJournal, oldBTree.getBranchingFactor(), oldBTree ! .getIndexUUID()) : new BTree(newJournal, oldBTree.getBranchingFactor(), ! oldBTree.getIndexUUID(), oldBTree ! .getNodeSerializer().getValueSerializer())); // Register the btree under the same name on the new slave. *************** *** 920,924 **** .getEntryCount(), oldIndex.btree.getRoot().entryIterator(), mseg, Value.Serializer.INSTANCE, true/* useChecksum */, ! null/* new RecordCompressor() */, 0d); /* --- 922,927 ---- .getEntryCount(), oldIndex.btree.getRoot().entryIterator(), mseg, Value.Serializer.INSTANCE, true/* useChecksum */, ! null/* new RecordCompressor() */, 0d, oldIndex.btree ! .getIndexUUID()); /* *************** *** 960,968 **** // build the merged index segment. ! IndexSegmentBuilder builder = new IndexSegmentBuilder(outFile, ! null, mergeItr.nentries, new MergedEntryIterator(mergeItr), ! mseg, oldIndex.btree.getNodeSerializer() ! .getValueSerializer(), false/* useChecksum */, ! null/* recordCompressor */, 0d/* errorRate */); // close the merged leaf iterator (and release its buffer/file). --- 963,971 ---- // build the merged index segment. ! new IndexSegmentBuilder(outFile, null, mergeItr.nentries, ! new MergedEntryIterator(mergeItr), mseg, oldIndex.btree ! .getNodeSerializer().getValueSerializer(), ! false/* useChecksum */, null/* recordCompressor */, ! 0d/* errorRate */, oldIndex.btree.getIndexUUID()); // close the merged leaf iterator (and release its buffer/file). Index: SlaveJournal.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup/SlaveJournal.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** SlaveJournal.java 15 Mar 2007 16:11:10 -0000 1.5 --- SlaveJournal.java 27 Mar 2007 14:34:22 -0000 1.6 *************** *** 46,49 **** --- 46,50 ---- import java.io.File; import java.util.Properties; + import java.util.UUID; import com.bigdata.isolation.IIsolatableIndex; *************** *** 188,198 **** public IIndex registerIndex(String name) { ! return registerIndex(name, new UnisolatedBTree(this)); } /** ! * Registers and returns a {@link PartitionedIndexView} under the given name and ! * assigns the supplied {@link IIndex} to absorb writes for that * {@link PartitionedIndexView}. * <p> --- 189,199 ---- public IIndex registerIndex(String name) { ! return registerIndex(name, new UnisolatedBTree(this, UUID.randomUUID())); } /** ! * Registers and returns a {@link PartitionedIndexView} under the given name ! * and assigns the supplied {@link IIndex} to absorb writes for that * {@link PartitionedIndexView}. * <p> *************** *** 207,216 **** * Note: You MUST {@link #commit()} before the registered index will be * either restart-safe or visible to new transactions. - * - * @todo use a prototype model so that the registered btree type is - * preserved? (Only the metadata extensions are preserved right now). - * One way to do this is by putting the constructor on the metadata - * object. Another is to make the btree Serializable and then just - * declare everything else as transient. */ public IIndex registerIndex(String name, IIndex btree) { --- 208,211 ---- *************** *** 236,242 **** } ! MetadataIndex mdi = new MetadataIndex(this, ! BTree.DEFAULT_BRANCHING_FACTOR, name); // create the initial partition which can accept any key. --- 231,244 ---- } ! ! /* ! * @todo the assigned random UUID for the metadata index must be used by ! * all B+Tree objects having data for the metadata index so once we ! * support partitions in the metadata index itself this UUID must be ! * propagated to all of those downstream objects. ! */ MetadataIndex mdi = new MetadataIndex(this, ! BTree.DEFAULT_BRANCHING_FACTOR, UUID.randomUUID(), btree ! .getIndexUUID(), name); // create the initial partition which can accept any key. Index: PartitionedIndexView.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/scaleup/PartitionedIndexView.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** PartitionedIndexView.java 11 Mar 2007 11:42:38 -0000 1.1 --- PartitionedIndexView.java 27 Mar 2007 14:34:22 -0000 1.2 *************** *** 52,55 **** --- 52,56 ---- import java.util.Map; import java.util.NoSuchElementException; + import java.util.UUID; import com.bigdata.journal.ICommitter; *************** *** 62,70 **** import com.bigdata.objndx.BatchRemove; import com.bigdata.objndx.EmptyEntryIterator; - import com.bigdata.objndx.ReadOnlyFusedView; import com.bigdata.objndx.IEntryIterator; import com.bigdata.objndx.IFusedView; import com.bigdata.objndx.IIndex; import com.bigdata.objndx.IndexSegment; /** --- 63,71 ---- import com.bigdata.objndx.BatchRemove; import com.bigdata.objndx.EmptyEntryIterator; import com.bigdata.objndx.IEntryIterator; import com.bigdata.objndx.IFusedView; import com.bigdata.objndx.IIndex; import com.bigdata.objndx.IndexSegment; + import com.bigdata.objndx.ReadOnlyFusedView; /** *************** *** 347,350 **** --- 348,364 ---- } + + /* + * IIndex + */ + + /** + * The UUID for the scale-out index. + */ + public UUID getIndexUUID() { + + return mdi.getManagedIndexUUID(); + + } /* |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:57
|
Update of /cvsroot/cweb/bigdata/src/test/com/bigdata/journal In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/test/com/bigdata/journal Modified Files: TestReadCommittedTx.java TestNamedIndices.java AbstractTestTxRunState.java AbstractBTreeWithJournalTestCase.java TestConflictResolution.java TestTx.java TestReadOnlyTx.java StressTestConcurrent.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: AbstractBTreeWithJournalTestCase.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/AbstractBTreeWithJournalTestCase.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** AbstractBTreeWithJournalTestCase.java 22 Mar 2007 21:11:24 -0000 1.3 --- AbstractBTreeWithJournalTestCase.java 27 Mar 2007 14:34:20 -0000 1.4 *************** *** 49,52 **** --- 49,53 ---- import java.util.Properties; + import java.util.UUID; import com.bigdata.objndx.AbstractBTreeTestCase; *************** *** 117,121 **** Journal journal = new Journal(getProperties()); ! BTree btree = new BTree(journal, branchingFactor, SimpleEntry.Serializer.INSTANCE); --- 118,122 ---- Journal journal = new Journal(getProperties()); ! BTree btree = new BTree(journal, branchingFactor, UUID.randomUUID(), SimpleEntry.Serializer.INSTANCE); Index: TestConflictResolution.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/TestConflictResolution.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** TestConflictResolution.java 15 Mar 2007 16:11:09 -0000 1.5 --- TestConflictResolution.java 27 Mar 2007 14:34:21 -0000 1.6 *************** *** 48,52 **** package com.bigdata.journal; ! import java.util.Properties; import com.bigdata.isolation.IConflictResolver; --- 48,52 ---- package com.bigdata.journal; ! import java.util.UUID; import com.bigdata.isolation.IConflictResolver; *************** *** 142,146 **** */ ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 142,147 ---- */ ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID ! .randomUUID())); journal.commit(); *************** *** 217,221 **** journal.registerIndex(name, new UnisolatedBTree(journal, ! new SingleValueConflictResolver(k1, v1c))); journal.commit(); --- 218,222 ---- journal.registerIndex(name, new UnisolatedBTree(journal, ! UUID.randomUUID(), new SingleValueConflictResolver(k1, v1c))); journal.commit(); Index: TestNamedIndices.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/TestNamedIndices.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** TestNamedIndices.java 11 Mar 2007 11:42:34 -0000 1.3 --- TestNamedIndices.java 27 Mar 2007 14:34:20 -0000 1.4 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.journal; + import java.util.UUID; + import com.bigdata.objndx.BTree; import com.bigdata.objndx.SimpleEntry; *************** *** 90,96 **** Journal journal = new Journal(getProperties()); ! String name = "abc"; ! BTree btree = new BTree(journal, 3, SimpleEntry.Serializer.INSTANCE); assertNull(journal.getIndex(name)); --- 92,101 ---- Journal journal = new Journal(getProperties()); ! final String name = "abc"; ! final UUID indexUUID = UUID.randomUUID(); ! ! BTree btree = new BTree(journal, 3, indexUUID, ! SimpleEntry.Serializer.INSTANCE); assertNull(journal.getIndex(name)); *************** *** 120,123 **** --- 125,129 ---- assertNotNull("btree", btree); + assertEquals("indexUUID", indexUUID, btree.getIndexUUID() ); assertEquals("entryCount", 1, btree.getEntryCount()); assertEquals(v0, btree.lookup(k0)); Index: TestTx.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/TestTx.java,v retrieving revision 1.21 retrieving revision 1.22 diff -C2 -d -r1.21 -r1.22 *** TestTx.java 15 Mar 2007 16:11:09 -0000 1.21 --- TestTx.java 27 Mar 2007 14:34:21 -0000 1.22 *************** *** 48,52 **** package com.bigdata.journal; ! import java.util.Properties; import com.bigdata.isolation.IsolatedBTree; --- 48,52 ---- package com.bigdata.journal; ! import java.util.UUID; import com.bigdata.isolation.IsolatedBTree; *************** *** 121,125 **** // register index in unisolated scope, but do not commit yet. ! journal.registerIndex(name, new UnisolatedBTree(journal, 3)); // start tx1. --- 121,126 ---- // register index in unisolated scope, but do not commit yet. ! journal.registerIndex(name, new UnisolatedBTree(journal, 3, UUID ! .randomUUID())); // start tx1. *************** *** 163,167 **** { ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 164,168 ---- { ! journal.registerIndex(name, new UnisolatedBTree(journal,UUID.randomUUID())); journal.commit(); *************** *** 221,225 **** IIndex index = journal.registerIndex(name, new UnisolatedBTree( ! journal, branchingFactor)); assertNull(index.insert(k1, v1)); --- 222,226 ---- IIndex index = journal.registerIndex(name, new UnisolatedBTree( ! journal, branchingFactor, UUID.randomUUID())); assertNull(index.insert(k1, v1)); *************** *** 327,331 **** journal.registerIndex(name, new UnisolatedBTree(journal, ! branchingFactor)); assert(journal.commit()!=0L); --- 328,332 ---- journal.registerIndex(name, new UnisolatedBTree(journal, ! branchingFactor, UUID.randomUUID())); assert(journal.commit()!=0L); *************** *** 437,441 **** * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 438,443 ---- * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID ! .randomUUID())); journal.commit(); *************** *** 526,530 **** * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 528,532 ---- * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID.randomUUID())); journal.commit(); *************** *** 633,637 **** * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 635,639 ---- * register an index and commit the journal. */ ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID.randomUUID())); journal.commit(); *************** *** 1160,1164 **** { ! journal.registerIndex(name,new UnisolatedBTree(journal)); journal.commit(); --- 1162,1166 ---- { ! journal.registerIndex(name,new UnisolatedBTree(journal,UUID.randomUUID())); journal.commit(); *************** *** 1248,1252 **** { ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 1250,1254 ---- { ! journal.registerIndex(name, new UnisolatedBTree(journal,UUID.randomUUID())); journal.commit(); Index: StressTestConcurrent.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/StressTestConcurrent.java,v retrieving revision 1.10 retrieving revision 1.11 diff -C2 -d -r1.10 -r1.11 *** StressTestConcurrent.java 22 Mar 2007 21:11:24 -0000 1.10 --- StressTestConcurrent.java 27 Mar 2007 14:34:21 -0000 1.11 *************** *** 54,57 **** --- 54,58 ---- import java.util.Properties; import java.util.Random; + import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; *************** *** 147,151 **** { // Setup the named index and commit the journal. ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 148,152 ---- { // Setup the named index and commit the journal. ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID.randomUUID())); journal.commit(); Index: TestReadCommittedTx.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/TestReadCommittedTx.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** TestReadCommittedTx.java 28 Feb 2007 13:59:09 -0000 1.2 --- TestReadCommittedTx.java 27 Mar 2007 14:34:20 -0000 1.3 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.journal; + import java.util.UUID; + import com.bigdata.isolation.UnisolatedBTree; import com.bigdata.objndx.IIndex; *************** *** 91,95 **** */ IIndex ndx = journal.registerIndex(name, new UnisolatedBTree( ! journal)); ndx.insert(k1, v1); --- 93,97 ---- */ IIndex ndx = journal.registerIndex(name, new UnisolatedBTree( ! journal, UUID.randomUUID())); ndx.insert(k1, v1); *************** *** 191,195 **** // register an index and commit the journal. ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 193,198 ---- // register an index and commit the journal. ! journal.registerIndex(name, new UnisolatedBTree(journal, ! UUID.randomUUID())); journal.commit(); Index: AbstractTestTxRunState.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/AbstractTestTxRunState.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** AbstractTestTxRunState.java 15 Mar 2007 16:11:09 -0000 1.2 --- AbstractTestTxRunState.java 27 Mar 2007 14:34:20 -0000 1.3 *************** *** 49,52 **** --- 49,53 ---- import java.io.IOException; + import java.util.UUID; import junit.framework.TestSuite; *************** *** 481,485 **** { ! journal.registerIndex(name, new UnisolatedBTree(journal)); journal.commit(); --- 482,487 ---- { ! journal.registerIndex(name, new UnisolatedBTree(journal, UUID ! .randomUUID())); journal.commit(); Index: TestReadOnlyTx.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/journal/TestReadOnlyTx.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** TestReadOnlyTx.java 28 Feb 2007 13:59:09 -0000 1.5 --- TestReadOnlyTx.java 27 Mar 2007 14:34:21 -0000 1.6 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.journal; + import java.util.UUID; + import com.bigdata.isolation.UnisolatedBTree; import com.bigdata.objndx.IIndex; *************** *** 91,95 **** */ IIndex ndx = journal.registerIndex(name, new UnisolatedBTree( ! journal)); ndx.insert(k1, v1); --- 93,97 ---- */ IIndex ndx = journal.registerIndex(name, new UnisolatedBTree( ! journal, UUID.randomUUID())); ndx.insert(k1, v1); |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:56
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/rawstore In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/rawstore Modified Files: Addr.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: Addr.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/rawstore/Addr.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** Addr.java 15 Feb 2007 20:59:21 -0000 1.2 --- Addr.java 27 Mar 2007 14:34:21 -0000 1.3 *************** *** 44,49 **** package com.bigdata.rawstore; ! import java.io.DataInputStream; ! import java.io.DataOutputStream; import java.io.IOException; --- 44,49 ---- package com.bigdata.rawstore; ! import java.io.DataInput; ! import java.io.DataOutput; import java.io.IOException; *************** *** 165,169 **** * @throws IOException */ ! public static void pack(DataOutputStream os,long addr) throws IOException { final int offset = Addr.getOffset(addr); --- 165,169 ---- * @throws IOException */ ! public static void pack(DataOutput os,long addr) throws IOException { final int offset = Addr.getOffset(addr); *************** *** 186,190 **** * @throws IOException */ ! public static long unpack(DataInputStream is) throws IOException { long v = LongPacker.unpackLong(is); --- 186,190 ---- * @throws IOException */ ! public static long unpack(DataInput is) throws IOException { long v = LongPacker.unpackLong(is); |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:33
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/service In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/service Modified Files: IMetadataService.java AbstractServer.java DataServer.java MetadataService.java TransactionService.java IDataService.java DataServiceClient.java Added Files: AbstractClient.java MetadataServer.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: DataServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/DataServer.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** DataServer.java 23 Mar 2007 20:01:25 -0000 1.3 --- DataServer.java 27 Mar 2007 14:34:23 -0000 1.4 *************** *** 52,55 **** --- 52,56 ---- import java.util.Properties; + import com.bigdata.journal.IJournal; import com.sun.jini.start.LifeCycle; *************** *** 100,103 **** --- 101,132 ---- /** + * Extends the behavior to close and delete the journal in use by the data + * service. + */ + protected void destroy() { + + DataService service = (DataService)impl; + + super.destroy(); + + try { + + IJournal journal = service.journal; + + log.info("Closing and deleting: "+journal.getFile()); + + journal.closeAndDelete(); + + log.info("Journal deleted."); + + } catch (Throwable t) { + + log.warn("Could not delete journal: " + t, t); + + } + + } + + /** * Adds jini administration interfaces to the basic {@link DataService}. * *************** *** 144,179 **** public void run() { ! server.shutdownNow(); ! ! log.info("Deleting state."); - try { - - journal.closeAndDelete(); - - log.info("Journal deleted."); - - } catch (Throwable t) { - - log.warn("Could not delete journal: " + t, t); - - } - - if (!server.serviceIdFile.delete()) { - - log.warn("Could not delete file: " - + server.serviceIdFile); - - } - - try { - Thread.sleep(3); - } catch (InterruptedException ex) { - } - log.info("Service stopped."); - System.exit(1); - } --- 173,180 ---- public void run() { ! server.destroy(); log.info("Service stopped."); } Index: IDataService.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/IDataService.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** IDataService.java 22 Mar 2007 21:11:24 -0000 1.3 --- IDataService.java 27 Mar 2007 14:34:24 -0000 1.4 *************** *** 82,85 **** --- 82,92 ---- * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * @todo add support for triggers. unisolated triggers must be asynchronous if + * they will take actions with high latency (such as writing on a + * different index partition, which could be remote). Low latency actions + * might include emitting asynchronous messages. transactional triggers + * can have more flexibility since they are under less of a latency + * constraint. */ public interface IDataService extends IRemoteTxCommitProtocol { Index: DataServiceClient.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/DataServiceClient.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** DataServiceClient.java 22 Mar 2007 21:11:24 -0000 1.3 --- DataServiceClient.java 27 Mar 2007 14:34:24 -0000 1.4 *************** *** 84,96 **** } - - /* - * @todo implement remote data service client talking to NIO service - * instance. this needs to locate the transaction manager service and - * the metadata service for each index used by the client. - */ - abstract private class NIODataServiceClient implements IDataService { - - } /** --- 84,87 ---- *************** *** 118,125 **** } - // public void map(long tx, String name, byte[] fromKey, byte[] toKey, IMapOp op) throws InterruptedException, ExecutionException { - // delegate.map(tx, name, fromKey, toKey, op); - // } - public RangeQueryResult rangeQuery(long tx, String name, byte[] fromKey, byte[] toKey, int flags) throws InterruptedException, ExecutionException, IOException { return delegate.rangeQuery(tx, name, fromKey, toKey, flags); --- 109,112 ---- --- NEW FILE: AbstractClient.java --- /** The Notice below must appear in each file of the Source Code of any copy you distribute of the Licensed Product. Contributors to any Modifications may add their own copyright notices to identify their own contributions. License: The contents of this file are subject to the CognitiveWeb Open Source License Version 1.1 (the License). You may not copy or use this file, in either source code or executable form, except in compliance with the License. You may obtain a copy of the License from http://www.CognitiveWeb.org/legal/license/ Software distributed under the License is distributed on an AS IS basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. Copyrights: Portions created by or assigned to CognitiveWeb are Copyright (c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact information for CognitiveWeb is available at http://www.CognitiveWeb.org Portions Copyright (c) 2002-2003 Bryan Thompson. Acknowledgements: Special thanks to the developers of the Jabber Open Source License 1.0 (JOSL), from which this License was derived. This License contains terms that differ from JOSL. Special thanks to the CognitiveWeb Open Source Contributors for their suggestions and support of the Cognitive Web. Modifications: */ /* * Created on Mar 24, 2007 */ package com.bigdata.service; import java.io.IOException; import java.rmi.Remote; import net.jini.config.Configuration; import net.jini.config.ConfigurationException; import net.jini.config.ConfigurationProvider; import net.jini.core.discovery.LookupLocator; import net.jini.core.lookup.ServiceRegistrar; import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryEvent; import net.jini.discovery.DiscoveryListener; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.LookupDiscovery; import net.jini.discovery.LookupDiscoveryManager; import org.apache.log4j.Logger; import com.bigdata.journal.ITransactionManager; /** * Abstract base class for a bigdata client. * <p> * Clients are configured to perform service lookup with a jini group that * identifies the bigdata federation. Clients begin by discovering the * {@link IMetadataService}. Clients use the {@link IMetadataService} to manage * indices (add/drop/proxy). Once a client has a proxy for an index, it carries * out read and write operations using that proxy. The proxy is responsible for * transparently discovering the {@link IDataService}s on which the index * partitions are located and directing read and write operations appropriately. * <p> * A client may discover and use an {@link ITransactionManager} if needs to use * transactions as opposed to unisolated reads and writes. When the client * requests a transaction, the transaction manager responds with a long integer * containing the transaction identifier - this is simply the unique start time * assigned to that transaction by the transaction manager. The client then * provides that transaction identifier for operations that are isolated within * the transaction. When the client is done with the transaction, it must use * the transaction manager to either abort or commit the transaction. * (Transactions that fail to progress may be eventually aborted.) * <p> * When using unisolated operations, the client does not need to resolve or use * the transaction manager and it simply specifies <code>0L</code> as the * transaction identifier for its read and write operations. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ public class AbstractClient implements DiscoveryListener { public static final transient Logger log = Logger .getLogger(AbstractClient.class); /** * The label in the {@link Configuration} file for the service * description. */ protected final static transient String SERVICE_LABEL = "ServiceDescription"; /** * The label in the {@link Configuration} file for the service advertisment * data. */ protected final static transient String ADVERT_LABEL = "AdvertDescription"; private DiscoveryManagement discoveryManager; private Configuration config; /** * The exported proxy for the service implementation object. */ protected Remote proxy; /** * Server startup reads {@link Configuration} data from the file(s) named by * <i>args</i>, starts the service, and advertises the service for * discovery. Aside from the server class to start, the behavior is more or * less entirely parameterized by the {@link Configuration}. * * @param args * The command line arguments. */ protected AbstractClient(String[] args) { // @todo verify that this belongs here. System.setSecurityManager(new SecurityManager()); LookupLocator[] unicastLocators = null; String[] groups = null; try { config = ConfigurationProvider.getInstance(args); /* * Extract how the client will discover services from the * Configuration. */ groups = (String[]) config.getEntry(ADVERT_LABEL, "groups", String[].class, LookupDiscovery.ALL_GROUPS/* default */); unicastLocators = (LookupLocator[]) config .getEntry(ADVERT_LABEL, "unicastLocators", LookupLocator[].class, null/* default */); } catch (ConfigurationException ex) { log.fatal("Configuration error: " + ex, ex); System.exit(1); } try { /* * Note: This class will perform multicast discovery if ALL_GROUPS * is specified and otherwise requires you to specify one or more * unicast locators (URIs of hosts running discovery services). As * an alternative, you can use LookupDiscovery, which always does * multicast discovery. */ discoveryManager = new LookupDiscoveryManager(groups, unicastLocators, this ); // discoveryManager = new LookupDiscovery(groups); } catch (IOException ex) { log.fatal("Lookup service discovery error: " + ex, ex); try { discoveryManager.terminate(); } catch (Throwable t) { /* ignore */ } System.exit(1); } } /** * Return the data service matched on this registrar. * * @param registrar * * @return The data service or <code>null</code> if none was matched. * * @todo this belongs in the metadata service since it needs to discover * data services. It also needs to know when data services start and * stop so it needs updates based on the service template. * * @todo the client on the other hand needs to discover a single metadata * service and a single transaction manager service. if either the * metadata service or the transaction manager service goes down, then * it needs to discover another service so that it can keep working. * * @todo we need to describe the services to be discovered by their primary * interface and only search within a designated group that * corresponds to the bigdata federation of interest - that group is * part of the client configuration. */ public IDataService getDataService(ServiceRegistrar registrar) { Class[] classes = new Class[] {IDataService.class}; ServiceTemplate template = new ServiceTemplate(null, classes, null); IDataService proxy = null; try { proxy = (IDataService) registrar.lookup(template); } catch(java.rmi.RemoteException e) { log.warn(e); } return proxy; } /** * Return an {@link IMetadataService}. * * @param registrar * A service registrar to query. * * @return An {@link IMetadataService} if one was found using that * registrar. */ public IMetadataService getMetadataService(ServiceRegistrar registrar) { Class[] classes = new Class[] {IMetadataService.class}; ServiceTemplate template = new ServiceTemplate(null, classes, null); IMetadataService proxy = null; try { proxy = (IMetadataService) registrar.lookup(template); } catch(java.rmi.RemoteException e) { log.warn(e); } return proxy; } /** * Return an {@link IMetadataService}. * * @param a * An array of registrars to query. * * @return An {@link IMetadataService} if one was found. * * @todo while the client only needs a single metadata service, the data * services themselves must register with all metadata services * discovered in their group (and I must sort out how the determine * primary vs secondary metadata services, e.g., by a status on the * service or some custom api). */ public IMetadataService getMetadataService(ServiceRegistrar[] a) { IMetadataService proxy = null; for(int i=0; i<a.length && proxy == null; i++) { proxy = getMetadataService(a[i]); } return proxy; } /** * Invoked when a lookup service is discarded. */ public void discarded(DiscoveryEvent arg0) { log.info(""+arg0); // TODO Auto-generated method stub } /** * Invoked when a lookup service is discovered. */ public void discovered(DiscoveryEvent arg0) { log.info(""+arg0); // TODO Auto-generated method stub } } Index: TransactionService.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/TransactionService.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** TransactionService.java 17 Mar 2007 23:14:58 -0000 1.2 --- TransactionService.java 27 Mar 2007 14:34:23 -0000 1.3 *************** *** 97,100 **** --- 97,108 ---- * @todo track ground states so that we known when we can release old journals * and index segments? + * + * @todo the transactional model might include a counter for the #of clients + * that have started work on a transaction in order to support distributed + * start/commit protocols. if clients use a workflow model, then they + * could pass the responsibility for the counter along with the + * transaction identifier rather than decrementing the counter themselves. + * It might be good to be able to identify which clients are still working + * on a given transaction. */ public class TransactionService implements ITransactionManager, IServiceShutdown { Index: MetadataService.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/MetadataService.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** MetadataService.java 22 Mar 2007 15:04:15 -0000 1.2 --- MetadataService.java 27 Mar 2007 14:34:23 -0000 1.3 *************** *** 60,63 **** --- 60,89 ---- * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * FIXME Tag each index with a UUID. The UUID needs to appear in the index + * metadata record for each journal and index segment. When it is an named + * (scale-out) index, the UUID of the scale-out index must be used for each + * B+Tree metadata record having data for that index. This allows us to map + * backwards from the data structures to the metadata index. Document this in + * the UML model. (I still need to get the correct index UUID to each BTree + * constuctor since they are all using a Random UUID right now.) + * + * @todo Provide a means to reconstruct the metadata index from the journal and + * index segment data files. We tag each journal and index segment with a + * UUID. Each index is also tagged with a UUID, and that UUID is written + * into the metadata record for the index on each journal and index + * segment. Based on those UUIDs we are able to work backwards from the + * data on disk and identify the indices to which they belong. That + * information in combination with the timestamps in the metadata records + * and the first/last keys in the index partition is sufficient to + * regenerate the metadata indices. + * + * @todo A temporal/immortable database can be realized if we never delete old + * journals since they contain the historical committed states of the + * database. The use of index segments would still provide fast read + * performance on recent data, while a suitable twist on the metadata + * index would allow access to those historical states. (E.g., you have to + * be able to access the historical state of the metadata index that + * corresponds to the commit time of interest for the database.) */ public class MetadataService implements IMetadataService, IServiceShutdown { *************** *** 69,73 **** * {@link MetadataIndex} and {@link MasterJournal}. */ ! protected final Journal journal; public MetadataService(Properties properties) { --- 95,99 ---- * {@link MetadataIndex} and {@link MasterJournal}. */ ! protected Journal journal; public MetadataService(Properties properties) { *************** *** 78,89 **** */ ! throw new UnsupportedOperationException(); ! } - public static void main(String[] args) { - - } - public InetSocketAddress getDataService(String name,byte[] key) { // TODO Auto-generated method stub --- 104,111 ---- */ ! journal = new Journal(properties); ! } public InetSocketAddress getDataService(String name,byte[] key) { // TODO Auto-generated method stub --- NEW FILE: MetadataServer.java --- /** The Notice below must appear in each file of the Source Code of any copy you distribute of the Licensed Product. Contributors to any Modifications may add their own copyright notices to identify their own contributions. License: The contents of this file are subject to the CognitiveWeb Open Source License Version 1.1 (the License). You may not copy or use this file, in either source code or executable form, except in compliance with the License. You may obtain a copy of the License from http://www.CognitiveWeb.org/legal/license/ Software distributed under the License is distributed on an AS IS basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. Copyrights: Portions created by or assigned to CognitiveWeb are Copyright (c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact information for CognitiveWeb is available at http://www.CognitiveWeb.org Portions Copyright (c) 2002-2003 Bryan Thompson. Acknowledgements: Special thanks to the developers of the Jabber Open Source License 1.0 (JOSL), from which this License was derived. This License contains terms that differ from JOSL. Special thanks to the CognitiveWeb Open Source Contributors for their suggestions and support of the Cognitive Web. Modifications: */ /* * Created on Mar 24, 2007 */ package com.bigdata.service; import java.rmi.Remote; import java.rmi.RemoteException; import java.util.Properties; import net.jini.core.lookup.ServiceMatches; import net.jini.core.lookup.ServiceRegistrar; import net.jini.core.lookup.ServiceTemplate; import com.bigdata.journal.IJournal; import com.sun.jini.start.LifeCycle; /** * A metadata server. * <p> * The metadata server is used to manage the life cycles of scale-out indices * and exposes proxies for read and write operations on indices to clients. * Clients use index proxies, which automatically direct reads and writes to the * {@link IDataService} on which specific index partitions are located. * <p> * On startup, the metadata service discovers active data services configured in * the same group. While running, it tracks when data services start and stop so * that it can (re-)allocate index partitions as necessary. * <p> * The metadata server uses a write through pipeline to replicate its data onto * registered secondary metadata servers. If the metadata server fails, clients * will automatically fail over to a secondary metadata server. Only the primary * metadata server actively tracks the state of data services since secondaries * are updated via the write through pipeline to ensure consistency. Secondary * metadata servers will notice if the primary dies and elect a new master. * * @todo note that the service update registration is _persistent_ (assuming * that the service registrar is persistent I suppose) so that will add a * wrinkle to how a bigdata instance must be configured. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ public class MetadataServer extends AbstractServer { /** * @param args */ public MetadataServer(String[] args) { super(args); } /** * @param args * @param lifeCycle */ public MetadataServer(String[] args, LifeCycle lifeCycle) { super(args, lifeCycle); } protected Remote newService(Properties properties) { return new AdministrableMetadataService(this,properties); } /** * Return an {@link IMetadataService}. * * @param registrar * A service registrar to query. * * @return An {@link IMetadataService} if one was found using that * registrar. */ public IMetadataService getMetadataService(ServiceRegistrar registrar) { Class[] classes = new Class[] {IMetadataService.class}; ServiceTemplate template = new ServiceTemplate(null, classes, null); IMetadataService proxy = null; try { proxy = (IMetadataService) registrar.lookup(template); } catch(java.rmi.RemoteException e) { log.warn(e); } return proxy; } /** * Return the data service(s) matched on this registrar. * * @param registrar * * @return The data service or <code>null</code> if none was matched. * * @todo we need to describe the services to be discovered by their primary * interface and only search within a designated group that * corresponds to the bigdata federation of interest - that group is * part of the client configuration. * * @todo how do we ensure that we have seen all data services? If we query * each registrar as it is discovered and then register for updates * there are two ways in which we could miss some instances: (1) new * data services register between the query and the registration for * updates; and (2) the query will not return _ALL_ data services * registered, but only as match as the match limit. */ public ServiceMatches getDataServices(ServiceRegistrar registrar) { Class[] classes = new Class[] {IDataService.class}; ServiceTemplate template = new ServiceTemplate(null, classes, null); try { return registrar.lookup(template,0); } catch(java.rmi.RemoteException e) { log.warn(e); return null; } } /** * Extends the behavior to close and delete the journal in use by the * metadata service. */ protected void destroy() { MetadataService service = (MetadataService)impl; super.destroy(); try { IJournal journal = service.journal; log.info("Closing and deleting: "+journal.getFile()); journal.closeAndDelete(); log.info("Journal deleted."); } catch (Throwable t) { log.warn("Could not delete journal: " + t, t); } } /** * Adds jini administration interfaces to the basic {@link MetadataService}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ protected static class AdministrableMetadataService extends MetadataService implements Remote, RemoteAdministrable, RemoteDestroyAdmin { protected AbstractServer server; /** * @param properties */ public AdministrableMetadataService(AbstractServer server, Properties properties) { super(properties); this.server = server; } public Object getAdmin() throws RemoteException { log.info(""); return server.proxy; } /* * DestroyAdmin */ /** * Destroy the service and deletes any files containing resources (<em>application data</em>) * that was in use by that service. * * @throws RemoteException */ public void destroy() throws RemoteException { log.info(""); new Thread() { public void run() { server.destroy(); log.info("Service stopped."); } }.start(); } } } Index: AbstractServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/AbstractServer.java,v retrieving revision 1.4 retrieving revision 1.5 diff -C2 -d -r1.4 -r1.5 *** AbstractServer.java 23 Mar 2007 20:01:25 -0000 1.4 --- AbstractServer.java 27 Mar 2007 14:34:23 -0000 1.5 *************** *** 178,182 **** * The service implementation object. */ ! private Remote impl; /** * The exported proxy for the service implementation object. --- 178,182 ---- * The service implementation object. */ ! protected Remote impl; /** * The exported proxy for the service implementation object. *************** *** 640,643 **** --- 640,666 ---- } + + /** + * Contract is to shutdown the services and <em>destroys</em> its + * persistent state. This implementation calls {@link #shutdownNow()} and + * then deletes the {@link #serviceIdFile}. + * <p> + * Concrete subclasses SHOULD extend this method to destroy their persistent + * state. + */ + protected void destroy() { + + shutdownNow(); + + log.info("Deleting: "+serviceIdFile); + + if (!serviceIdFile.delete()) { + + log.warn("Could not delete file: " + + serviceIdFile); + + } + + } // /** Index: IMetadataService.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/IMetadataService.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** IMetadataService.java 22 Mar 2007 15:04:15 -0000 1.2 --- IMetadataService.java 27 Mar 2007 14:34:23 -0000 1.3 *************** *** 48,52 **** --- 48,54 ---- package com.bigdata.service; + import java.io.IOException; import java.net.InetSocketAddress; + import java.rmi.Remote; /** *************** *** 54,70 **** * <p> * The metadata service maintains locator information for the data service ! * instances responsible for each partition in the named index. Partitions ! * are automatically split when they overflow (~200M) and joined when they ! * underflow (~50M). * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ ! public interface IMetadataService { /** * The approximate number of entries in the index (non-transactional). */ ! public int getEntryCount(String name); /** --- 56,75 ---- * <p> * The metadata service maintains locator information for the data service ! * instances responsible for each partition in the named index. Partitions are ! * automatically split when they overflow (~200M) and joined when they underflow ! * (~50M). ! * <p> ! * Note: methods on this interface MUST throw {@link IOException} in order to be ! * compatible with RMI. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ ! public interface IMetadataService extends Remote { /** * The approximate number of entries in the index (non-transactional). */ ! public int getEntryCount(String name) throws IOException; /** *************** *** 76,80 **** * @return */ ! public int rangeCount(String name,byte[] fromKey,byte[] toKey); /** --- 81,85 ---- * @return */ ! public int rangeCount(String name,byte[] fromKey,byte[] toKey) throws IOException; /** *************** *** 95,99 **** * index partitions surrounding that partition. */ ! public InetSocketAddress getDataService(String name, byte[] key); } --- 100,104 ---- * index partitions surrounding that partition. */ ! public InetSocketAddress getDataService(String name, byte[] key) throws IOException; } |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:33
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/objndx Modified Files: IndexSegmentBuilder.java BTree.java IndexSegmentFileStore.java IIndex.java ReadOnlyFusedView.java AbstractBTree.java IndexSegmentExtensionMetadata.java ReadOnlyIndex.java BTreeMetadata.java IndexSegment.java IndexSegmentMetadata.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: IndexSegmentFileStore.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IndexSegmentFileStore.java,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -d -r1.11 -r1.12 *** IndexSegmentFileStore.java 15 Mar 2007 16:11:08 -0000 1.11 --- IndexSegmentFileStore.java 27 Mar 2007 14:34:22 -0000 1.12 *************** *** 172,176 **** try { ! Class cl = Class.forName(extensionMetadata.className); Constructor ctor = cl --- 172,176 ---- try { ! Class cl = Class.forName(extensionMetadata.getClassName()); Constructor ctor = cl Index: BTreeMetadata.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/BTreeMetadata.java,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -d -r1.14 -r1.15 *** BTreeMetadata.java 15 Mar 2007 16:11:08 -0000 1.14 --- BTreeMetadata.java 27 Mar 2007 14:34:22 -0000 1.15 *************** *** 2,7 **** --- 2,13 ---- import java.io.Externalizable; + import java.io.IOException; + import java.io.ObjectInput; + import java.io.ObjectOutput; import java.io.Serializable; import java.nio.ByteBuffer; + import java.util.UUID; + + import org.CognitiveWeb.extser.LongPacker; import com.bigdata.io.SerializerUtil; *************** *** 14,22 **** * be re-loaded from the store. * </p> ! * ! * @todo The metadata record is extensible since it uses default java ! * serialization. That makes it a bit fat, which we could address by ! * implementing {@link Externalizable} but this is probably not much of an ! * issue. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> --- 20,28 ---- * be re-loaded from the store. * </p> ! * <p> ! * Note: Derived classes SHOULD extend the {@link Externalizable} interface and ! * explicitly manage serialization versions so that their metadata may evolve in ! * a backward compatible manner. ! * </p> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> *************** *** 25,57 **** * @see BTree#newMetadata(), which you must override if you subclass this class. */ ! public class BTreeMetadata implements Serializable { private static final long serialVersionUID = 4370669592664382720L; ! /** ! * The address of the root node or leaf. ! */ ! public final long addrRoot; ! public final int branchingFactor; ! public final int height; ! public final int nnodes; ! public final int nleaves; ! public final int nentries; ! public final IValueSerializer valueSer; /** ! * The name of the class that will be used to re-load the index. */ ! public final String className; ! public final RecordCompressor recordCompressor; ! public final boolean useChecksum; /** --- 31,113 ---- * @see BTree#newMetadata(), which you must override if you subclass this class. */ ! public class BTreeMetadata implements Serializable, Externalizable { private static final long serialVersionUID = 4370669592664382720L; ! private long addrRoot; ! private int branchingFactor; ! private int height; ! private int nnodes; ! private int nleaves; ! private int nentries; ! private IValueSerializer valueSer; ! ! private String className; ! ! private RecordCompressor recordCompressor; ! ! private boolean useChecksum; ! ! private UUID indexUUID; /** ! * The address of the root node or leaf. */ ! public final long getRootAddr() { ! ! return addrRoot; ! ! } ! public final int getBranchingFactor() {return branchingFactor;} ! ! public final int getHeight() {return height;} ! public final int getNodeCount() {return nnodes;} ! ! public final int getLeafCount() {return nleaves;} ! ! public final int getEntryCount() {return nentries;} ! ! public final IValueSerializer getValueSerializer() {return valueSer;} ! ! /** ! * The name of a class derived from {@link BTree} that will be used to ! * re-load the index. ! */ ! public final String getClassName() {return className;} ! ! /** ! * The object that will handle record (de-)compressor -or- <code>null</code> ! * iff records are not compressed. ! */ ! public final RecordCompressor getRecordCompressor() {return recordCompressor;} ! ! /** ! * True iff node/leaf checksums are in use. ! */ ! public final boolean getUseChecksum() {return useChecksum;} ! ! /** ! * The unique identifier for the index whose data is accessible from this ! * metadata record. ! * <p> ! * All {@link AbstractBTree}s having data for the same index will have the ! * same {@link #indexUUID}. A partitioned index is comprised of mutable ! * {@link BTree}s and historical read-only {@link IndexSegment}s, all of ! * which will have the same {@link #indexUUID} if they have data for the ! * same scale-out index. ! */ ! public final UUID getIndexUUID() { ! ! return indexUUID; ! ! } /** *************** *** 73,82 **** } ! // /** ! // * De-serialization constructor. ! // */ ! // public BTreeMetadata() { ! // ! // } /** --- 129,138 ---- } ! /** ! * De-serialization constructor. ! */ ! public BTreeMetadata() { ! ! } /** *************** *** 110,113 **** --- 166,171 ---- this.useChecksum = btree.nodeSer.useChecksum; + this.indexUUID = btree.indexUUID; + /* * Note: This can not be invoked here since a derived class will not *************** *** 169,176 **** --- 227,301 ---- .getClass().getName())); sb.append(", useChecksum=" + useChecksum); + sb.append(", indexUUID="+indexUUID); return sb.toString(); } + + private static transient final int VERSION0 = 0x0; + + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + final int version = (int)LongPacker.unpackLong(in); + + if (version != VERSION0) { + + throw new IOException("Unknown version: version=" + version); + + } + + addrRoot = Addr.unpack(in); + + branchingFactor = (int)LongPacker.unpackLong(in); + + height = (int)LongPacker.unpackLong(in); + + nnodes = (int)LongPacker.unpackLong(in); + + nleaves = (int)LongPacker.unpackLong(in); + + nentries = (int)LongPacker.unpackLong(in); + + valueSer = (IValueSerializer)in.readObject(); + + className = in.readUTF(); + + recordCompressor = (RecordCompressor)in.readObject(); + + useChecksum = in.readBoolean(); + + indexUUID = new UUID(in.readLong()/*MSB*/,in.readLong()/*LSB*/); + + } + + public void writeExternal(ObjectOutput out) throws IOException { + + LongPacker.packLong(out,VERSION0); + + Addr.pack(out, addrRoot); + + LongPacker.packLong(out, branchingFactor); + + LongPacker.packLong(out, height); + + LongPacker.packLong(out, nnodes); + + LongPacker.packLong(out, nleaves); + + LongPacker.packLong(out, nentries); + + out.writeObject(valueSer); + + out.writeUTF(className); + + out.writeObject( recordCompressor ); + + out.writeBoolean(useChecksum); + + out.writeLong(indexUUID.getMostSignificantBits()); + + out.writeLong(indexUUID.getLeastSignificantBits()); + + } } Index: IIndex.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IIndex.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** IIndex.java 8 Feb 2007 21:32:12 -0000 1.1 --- IIndex.java 27 Mar 2007 14:34:22 -0000 1.2 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.objndx; + import java.util.UUID; + /** * <p> *************** *** 59,61 **** --- 61,73 ---- public interface IIndex extends ISimpleBTree, IBatchBTree { + /** + * The unique identifier for the index whose data is stored in this B+Tree + * data structure. When using a scale-out index the same <i>indexUUID</i> + * MUST be assigned to each mutable and immutable B+Tree having data for any + * partition of that scale-out index. This makes it possible to work + * backwards from the B+Tree data structures and identify the index to which + * they belong. + */ + public UUID getIndexUUID(); + } Index: BTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/BTree.java,v retrieving revision 1.39 retrieving revision 1.40 diff -C2 -d -r1.39 -r1.40 *** BTree.java 15 Mar 2007 16:11:08 -0000 1.39 --- BTree.java 27 Mar 2007 14:34:22 -0000 1.40 *************** *** 48,51 **** --- 48,52 ---- import java.lang.reflect.Constructor; + import java.util.UUID; import com.bigdata.cache.HardReferenceQueue; *************** *** 307,316 **** } - public NodeSerializer getNodeSerializer() { - - return nodeSer; - - } - /** * The metadata record used to load the last state of the index that was --- 308,311 ---- *************** *** 354,382 **** protected int nentries; - // public IAbstractNode getRoot() { - // - // return root; - // - // } - /** ! * Constructor for a new btree with a default hard reference queue policy * and no record compression. * * @param store * @param branchingFactor ! * @param valSer */ ! public BTree(IRawStore store, int branchingFactor, IValueSerializer valSer) { ! this(store, branchingFactor, new HardReferenceQueue<PO>( new DefaultEvictionListener(), BTree.DEFAULT_HARD_REF_QUEUE_CAPACITY, ! BTree.DEFAULT_HARD_REF_QUEUE_SCAN), valSer, null/*recordCompressor*/); } /** ! * Constructor for a new btree. * * @param store --- 349,380 ---- protected int nentries; /** ! * Constructor for a new B+Tree with a default hard reference queue policy * and no record compression. * * @param store + * The persistence store. * @param branchingFactor ! * The branching factor. ! * @param indexUUID ! * The unique identifier for the index. All B+Tree objects having ! * data for the same scale-out index MUST have the same ! * indexUUID. Otherwise a {@link UUID#randomUUID()} SHOULD be ! * used. ! * @param valueSer ! * Object that knows how to (de-)serialize the values in a ! * {@link Leaf}. */ ! public BTree(IRawStore store, int branchingFactor, UUID indexUUID, IValueSerializer valSer) { ! this(store, branchingFactor, indexUUID, new HardReferenceQueue<PO>( new DefaultEvictionListener(), BTree.DEFAULT_HARD_REF_QUEUE_CAPACITY, ! BTree.DEFAULT_HARD_REF_QUEUE_SCAN), valSer, null/* recordCompressor */); } /** ! * Constructor for a new B+Tree. * * @param store *************** *** 384,387 **** --- 382,390 ---- * @param branchingFactor * The branching factor. + * @param indexUUID + * The unique identifier for the index. All B+Tree objects having + * data for the same scale-out index MUST have the same + * indexUUID. Otherwise a {@link UUID#randomUUID()} SHOULD be + * used. * @param headReferenceQueue * The hard reference queue. The minimum capacity is 2 to avoid *************** *** 401,404 **** --- 404,408 ---- IRawStore store, int branchingFactor, + UUID indexUUID, HardReferenceQueue<PO> hardReferenceQueue, IValueSerializer valueSer, *************** *** 421,425 **** * has never been observed. */ ! !store.isFullyBuffered()/* useChecksum */ ); --- 425,430 ---- * has never been observed. */ ! !store.isFullyBuffered(),/* useChecksum */ ! indexUUID ); *************** *** 442,446 **** /** ! * Constructor for an existing btree. * * @param store --- 447,451 ---- /** ! * Load an existing B+Tree from the store. * * @param store *************** *** 451,471 **** * The hard reference queue for {@link Leaf}s. * ! * @see BTreeMetadata#load(IRawStore, long), which will re-load a ! * {@link BTree} or derived class from its {@link BTreeMetadata} ! * record. * * @see #newMetadata(), which must be overriden if you subclass * {@link BTreeMetadata} */ ! public BTree(IRawStore store, BTreeMetadata metadata, HardReferenceQueue<PO> hardReferenceQueue) { ! super(store, metadata.branchingFactor, 0/* initialBufferCapacity will be estimated */, hardReferenceQueue, PackedAddressSerializer.INSTANCE, ! metadata.valueSer, NodeFactory.INSTANCE, ! metadata.recordCompressor,// ! metadata.useChecksum // use checksum iff used on create. ); --- 456,477 ---- * The hard reference queue for {@link Leaf}s. * ! * @see #load(IRawStore, long), which will re-load a {@link BTree} or ! * derived class from the {@link Addr address} of its ! * {@link BTreeMetadata metadata} record. * * @see #newMetadata(), which must be overriden if you subclass * {@link BTreeMetadata} */ ! protected BTree(IRawStore store, BTreeMetadata metadata, HardReferenceQueue<PO> hardReferenceQueue) { ! super(store, metadata.getBranchingFactor(), 0/* initialBufferCapacity will be estimated */, hardReferenceQueue, PackedAddressSerializer.INSTANCE, ! metadata.getValueSerializer(), NodeFactory.INSTANCE, ! metadata.getRecordCompressor(),// ! metadata.getUseChecksum(), // use checksum iff used on create. ! metadata.getIndexUUID() ); *************** *** 474,481 **** // initialize mutable fields from the immutable metadata record. ! this.height = metadata.height; ! this.nnodes = metadata.nnodes; ! this.nleaves = metadata.nleaves; ! this.nentries = metadata.nentries; /* --- 480,487 ---- // initialize mutable fields from the immutable metadata record. ! this.height = metadata.getHeight(); ! this.nnodes = metadata.getNodeCount(); ! this.nleaves = metadata.getLeafCount(); ! this.nentries = metadata.getEntryCount(); /* *************** *** 541,545 **** */ ! root = readNodeOrLeaf(metadata.addrRoot); } --- 547,551 ---- */ ! root = readNodeOrLeaf(metadata.getRootAddr()); } *************** *** 650,654 **** if (metadata != null && (root == null || !root.dirty ! && metadata.addrRoot == root.getIdentity())) { /* --- 656,660 ---- if (metadata != null && (root == null || !root.dirty ! && metadata.getRootAddr() == root.getIdentity())) { /* *************** *** 727,731 **** try { ! Class cl = Class.forName(metadata.className); Constructor ctor = cl.getConstructor(new Class[] { --- 733,737 ---- try { ! Class cl = Class.forName(metadata.getClassName()); Constructor ctor = cl.getConstructor(new Class[] { Index: IndexSegmentBuilder.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IndexSegmentBuilder.java,v retrieving revision 1.28 retrieving revision 1.29 diff -C2 -d -r1.28 -r1.29 *** IndexSegmentBuilder.java 15 Mar 2007 16:11:08 -0000 1.28 --- IndexSegmentBuilder.java 27 Mar 2007 14:34:22 -0000 1.29 *************** *** 59,62 **** --- 59,63 ---- import java.text.NumberFormat; import java.util.NoSuchElementException; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 198,201 **** --- 199,212 ---- /** + * The unique identifier for the index whose data is stored in this B+Tree + * data structure. When using a scale-out index the same <i>indexUUID</i> + * MUST be assigned to each mutable and immutable B+Tree having data for any + * partition of that scale-out index. This makes it possible to work + * backwards from the B+Tree data structures and identify the index to which + * they belong. + */ + final protected UUID indexUUID; + + /** * Used to serialize the nodes and leaves of the output tree. */ *************** *** 400,404 **** this(outFile, tmpDir, btree.getEntryCount(), btree.entryIterator(), m, btree.nodeSer.valueSerializer, true/* useChecksum */, ! null/* new RecordCompressor() */, errorRate); } --- 411,415 ---- this(outFile, tmpDir, btree.getEntryCount(), btree.entryIterator(), m, btree.nodeSer.valueSerializer, true/* useChecksum */, ! null/* new RecordCompressor() */, errorRate, btree.indexUUID); } *************** *** 448,451 **** --- 459,471 ---- * option should only be enabled if you know that point access * tests are a hotspot for an index. + * @param indexUUID + * The unique identifier for the index whose data is stored in + * this B+Tree data structure. When using a scale-out index the + * same <i>indexUUID</i> MUST be assigned to each mutable and + * immutable B+Tree having data for any partition of that + * scale-out index. This makes it possible to work backwards from + * the B+Tree data structures and identify the index to which + * they belong. See {@link AbstractBTree#getIndexUUID()}. + * * @throws IOException */ *************** *** 458,462 **** IEntryIterator entryIterator, final int m, IValueSerializer valueSerializer, boolean useChecksum, ! RecordCompressor recordCompressor, final double errorRate // , final Map<String, Serializable> metadataMap ) --- 478,483 ---- IEntryIterator entryIterator, final int m, IValueSerializer valueSerializer, boolean useChecksum, ! RecordCompressor recordCompressor, final double errorRate, ! final UUID indexUUID // , final Map<String, Serializable> metadataMap ) *************** *** 469,475 **** --- 490,498 ---- assert valueSerializer != null; assert errorRate >= 0d; + assert indexUUID != null; this.useChecksum = useChecksum; this.recordCompressor = recordCompressor; + this.indexUUID = indexUUID; final long begin = System.currentTimeMillis(); *************** *** 1452,1456 **** plan.nentries, maxNodeOrLeafLength, addrLeaves, addrNodes, addrRoot, addrExtensionMetadata, addrBloom, errorRate, out ! .length(), now); md.write(out); --- 1475,1479 ---- plan.nentries, maxNodeOrLeafLength, addrLeaves, addrNodes, addrRoot, addrExtensionMetadata, addrBloom, errorRate, out ! .length(), indexUUID, now); md.write(out); Index: IndexSegmentMetadata.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IndexSegmentMetadata.java,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -d -r1.11 -r1.12 *** IndexSegmentMetadata.java 8 Mar 2007 18:14:05 -0000 1.11 --- IndexSegmentMetadata.java 27 Mar 2007 14:34:23 -0000 1.12 *************** *** 43,47 **** * MUST be adjusted down from its original value of 256. */ ! static final int SIZEOF_UNUSED = 256; /** --- 43,47 ---- * MUST be adjusted down from its original value of 256. */ ! static final int SIZEOF_UNUSED = 240; /** *************** *** 52,56 **** SIZEOF_VERSION + // Bytes.SIZEOF_LONG + // timestamp0 ! Bytes.SIZEOF_UUID + // index segment UUID. SIZEOF_BRANCHING_FACTOR + // branchingFactor SIZEOF_COUNTS * 4 + // height, #leaves, #nodes, #entries --- 52,56 ---- SIZEOF_VERSION + // Bytes.SIZEOF_LONG + // timestamp0 ! Bytes.SIZEOF_UUID + // segment UUID. SIZEOF_BRANCHING_FACTOR + // branchingFactor SIZEOF_COUNTS * 4 + // height, #leaves, #nodes, #entries *************** *** 60,63 **** --- 60,64 ---- Bytes.SIZEOF_DOUBLE + // errorRate Bytes.SIZEOF_LONG + // file size + Bytes.SIZEOF_UUID + // index UUID. SIZEOF_UNUSED + // available bytes for future versions. Bytes.SIZEOF_LONG // timestamp1 *************** *** 75,81 **** /** ! * UUID for this {@link IndexSegment}. */ ! final public UUID uuid; /** --- 76,85 ---- /** ! * UUID for this {@link IndexSegment} (it is a unique identifier for ! * the index segment resource). ! * ! * @see #indexUUID */ ! final public UUID segmentUUID; /** *************** *** 171,174 **** --- 175,192 ---- /** + * The unique identifier for the index whose data is on this + * {@link IndexSegment}. + * <p> + * All {@link AbstractBTree}s having data for the same index will have the + * same {@link #indexUUID}. A partitioned index is comprised of mutable + * {@link BTree}s and historical read-only {@link IndexSegment}s, all of + * which will have the same {@link #indexUUID} if they have data for the + * same scale-out index. + * + * @see #segmentUUID + */ + final public UUID indexUUID; + + /** * Timestamp when the {@link IndexSegment} was generated. */ *************** *** 207,211 **** final long timestamp0 = raf.readLong(); ! uuid = new UUID(raf.readLong()/*MSB*/, raf.readLong()/*LSB*/); branchingFactor = raf.readInt(); --- 225,229 ---- final long timestamp0 = raf.readLong(); ! segmentUUID = new UUID(raf.readLong()/*MSB*/, raf.readLong()/*LSB*/); branchingFactor = raf.readInt(); *************** *** 242,245 **** --- 260,265 ---- } + indexUUID = new UUID(raf.readLong()/*MSB*/, raf.readLong()/*LSB*/); + raf.skipBytes(SIZEOF_UNUSED); *************** *** 266,270 **** int maxNodeOrLeafLength, long addrLeaves, long addrNodes, long addrRoot, long addrExtensionMetadata, long addrBloom, ! double errorRate, long length, long timestamp) { assert branchingFactor >= BTree.MIN_BRANCHING_FACTOR; --- 286,290 ---- int maxNodeOrLeafLength, long addrLeaves, long addrNodes, long addrRoot, long addrExtensionMetadata, long addrBloom, ! double errorRate, long length, UUID indexUUID, long timestamp) { assert branchingFactor >= BTree.MIN_BRANCHING_FACTOR; *************** *** 307,311 **** assert timestamp != 0L; ! this.uuid = UUID.randomUUID(); this.branchingFactor = branchingFactor; --- 327,331 ---- assert timestamp != 0L; ! this.segmentUUID = UUID.randomUUID(); this.branchingFactor = branchingFactor; *************** *** 337,340 **** --- 357,362 ---- this.length = length; + this.indexUUID = indexUUID; + this.timestamp = timestamp; *************** *** 359,365 **** raf.writeLong(timestamp); ! raf.writeLong(uuid.getMostSignificantBits()); ! raf.writeLong(uuid.getLeastSignificantBits()); raf.writeInt(branchingFactor); --- 381,387 ---- raf.writeLong(timestamp); ! raf.writeLong(segmentUUID.getMostSignificantBits()); ! raf.writeLong(segmentUUID.getLeastSignificantBits()); raf.writeInt(branchingFactor); *************** *** 390,393 **** --- 412,419 ---- raf.writeLong(length); + + raf.writeLong(indexUUID.getMostSignificantBits()); + + raf.writeLong(indexUUID.getLeastSignificantBits()); raf.skipBytes(SIZEOF_UNUSED); *************** *** 405,409 **** sb.append("magic="+Integer.toHexString(MAGIC)); ! sb.append(", uuid="+uuid); sb.append(", branchingFactor="+branchingFactor); sb.append(", height=" + height); --- 431,435 ---- sb.append("magic="+Integer.toHexString(MAGIC)); ! sb.append(", segmentUUID="+segmentUUID); sb.append(", branchingFactor="+branchingFactor); sb.append(", height=" + height); *************** *** 420,423 **** --- 446,450 ---- sb.append(", errorRate=" + errorRate); sb.append(", length=" + length); + sb.append(", indexUUID="+indexUUID); sb.append(", timestamp=" + new Date(timestamp)); Index: IndexSegmentExtensionMetadata.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IndexSegmentExtensionMetadata.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** IndexSegmentExtensionMetadata.java 8 Mar 2007 18:14:05 -0000 1.1 --- IndexSegmentExtensionMetadata.java 27 Mar 2007 14:34:22 -0000 1.2 *************** *** 48,71 **** package com.bigdata.objndx; import java.io.Serializable; import com.bigdata.io.SerializerUtil; /** ! * The base class for variable length metadataMap and extension metadataMap for an ! * {@link IndexSegment} as persisted on an {@link IndexSegmentFileStore}. The ! * {@link IndexSegmentMetadata} class is NOT extensible and is used solely for ! * fixed length metadataMap common to all {@link IndexSegment}s, including the ! * root addresses required to bootstrap the load of an {@link IndexSegment} from ! * a file. In contrast, this class provides for both required variable length ! * metadataMap and arbitrary extension metadataMap for an {@link IndexSegment}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ ! public class IndexSegmentExtensionMetadata implements Serializable { private static final long serialVersionUID = 4846316492768402991L; /** * Either {@link IndexSegment} or a derived class that will be instantiated --- 48,92 ---- package com.bigdata.objndx; + import java.io.Externalizable; + import java.io.IOException; + import java.io.ObjectInput; + import java.io.ObjectOutput; import java.io.Serializable; + import java.util.UUID; + + import org.CognitiveWeb.extser.LongPacker; import com.bigdata.io.SerializerUtil; /** ! * <p> ! * The base class for variable length metadata and extension metadata for an ! * {@link IndexSegment} as persisted on an {@link IndexSegmentFileStore}. ! * </p> ! * <p> ! * Note: The {@link IndexSegmentMetadata} class is NOT extensible and is used ! * solely for fixed length metadata common to all {@link IndexSegment}s, ! * including the root addresses required to bootstrap the load of an ! * {@link IndexSegment} from a file. In contrast, this class provides for both ! * required variable length metadata and arbitrary extension metadata for an ! * {@link IndexSegment}. ! * </p> ! * <p> ! * Note: Derived classes SHOULD extend the {@link Externalizable} interface and ! * explicitly manage serialization versions so that their metadata may evolve in ! * a backward compatible manner. ! * </p> * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ ! public class IndexSegmentExtensionMetadata implements Serializable, Externalizable { private static final long serialVersionUID = 4846316492768402991L; + private String className; + private IValueSerializer valSer; + private RecordCompressor recordCompressor; + /** * Either {@link IndexSegment} or a derived class that will be instantiated *************** *** 73,82 **** * {@link IndexSegmentFileStore#load()} */ ! public final String className; /** * The serializer used for the values in the leaves of the index. */ ! public final IValueSerializer valSer; /** --- 94,111 ---- * {@link IndexSegmentFileStore#load()} */ ! public final String getClassName() { ! ! return className; ! ! } /** * The serializer used for the values in the leaves of the index. */ ! public final IValueSerializer getValueSerializer() { ! ! return valSer; ! ! } /** *************** *** 86,114 **** * @todo modify to use an interface. */ ! final public RecordCompressor recordCompressor; ! ! // /** ! // * When non-null, a map containing extension metadata. ! // * ! // * @see #getMetadata(String name) ! // */ ! // final private Map<String, Serializable> metadataMap; ! // ! // /** ! // * Return the metadata object stored under the key. ! // * ! // * @param name ! // * The key. ! // * ! // * @return The metadata object or <code>null</code> if there is nothing ! // * stored under that key. ! // */ ! // public Serializable getMetadata(String name) { ! // ! // if(metadataMap==null) return null; ! // ! // return metadataMap.get(name); ! // ! // } /** --- 115,130 ---- * @todo modify to use an interface. */ ! final public RecordCompressor getRecordCompressor() { ! ! return recordCompressor; ! ! } ! ! /** ! * De-serialization constructor. ! */ ! public IndexSegmentExtensionMetadata() { ! ! } /** *************** *** 127,139 **** * write the nodes and leaves of the {@link IndexSegment}. */ - // * - // * @param metadataMap - // * An optional serializable map containing application defined - // * extension metadata. The map will be serialized with the - // * {@link IndexSegmentExtensionMetadata} object as part of the - // * {@link IndexSegmentFileStore}. public IndexSegmentExtensionMetadata(Class cl, IValueSerializer valSer, RecordCompressor recordCompressor) { - // Map<String, Serializable> metadataMap) { if( cl == null ) throw new IllegalArgumentException(); --- 143,148 ---- *************** *** 154,159 **** this.recordCompressor = recordCompressor; - // this.metadataMap = metadataMap; - } --- 163,166 ---- *************** *** 177,179 **** --- 184,218 ---- } + private static final transient int VERSION0 = 0x0; + + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + final int version = (int)LongPacker.unpackLong(in); + + if (version != VERSION0) { + + throw new IOException("Unknown version: version=" + version); + + } + + className = in.readUTF(); + + valSer = (IValueSerializer) in.readObject(); + + recordCompressor = (RecordCompressor) in.readObject(); + + } + + public void writeExternal(ObjectOutput out) throws IOException { + + LongPacker.packLong(out,VERSION0); + + out.writeUTF(className); + + out.writeObject(valSer); + + out.writeObject(recordCompressor); + + } + } Index: AbstractBTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/AbstractBTree.java,v retrieving revision 1.20 retrieving revision 1.21 diff -C2 -d -r1.20 -r1.21 *** AbstractBTree.java 15 Mar 2007 16:11:08 -0000 1.20 --- AbstractBTree.java 27 Mar 2007 14:34:22 -0000 1.21 *************** *** 51,54 **** --- 51,55 ---- import java.nio.ByteBuffer; import java.util.Iterator; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 144,147 **** --- 145,158 ---- /** + * The unique identifier for the index whose data is stored in this B+Tree + * data structure. When using a scale-out index the same <i>indexUUID</i> + * MUST be assigned to each mutable and immutable B+Tree having data for any + * partition of that scale-out index. This makes it possible to work + * backwards from the B+Tree data structures and identify the index to which + * they belong. + */ + final protected UUID indexUUID; + + /** * The branching factor for the btree. */ *************** *** 304,307 **** --- 315,326 ---- * reads are against memory which is presumably already parity * checked. + * @param indexUUID + * The unique identifier for the index whose data is stored in + * this B+Tree data structure. When using a scale-out index the + * same <i>indexUUID</i> MUST be assigned to each mutable and + * immutable B+Tree having data for any partition of that + * scale-out index. This makes it possible to work backwards from + * the B+Tree data structures and identify the index to which + * they belong. */ protected AbstractBTree(IRawStore store, int branchingFactor, *************** *** 310,314 **** IAddressSerializer addrSer, IValueSerializer valueSer, INodeFactory nodeFactory, RecordCompressor recordCompressor, ! boolean useChecksum) { assert store != null; --- 329,333 ---- IAddressSerializer addrSer, IValueSerializer valueSer, INodeFactory nodeFactory, RecordCompressor recordCompressor, ! boolean useChecksum, UUID indexUUID) { assert store != null; *************** *** 324,327 **** --- 343,348 ---- assert nodeFactory != null; + if(indexUUID == null) throw new IllegalArgumentException("indexUUID"); + this.store = store; *************** *** 334,337 **** --- 355,360 ---- valueSer, recordCompressor, useChecksum); + this.indexUUID = indexUUID; + } *************** *** 477,481 **** * {@link IIndex}. */ ! public NodeSerializer getNodeSerializer() { return nodeSer; --- 500,504 ---- * {@link IIndex}. */ ! final public NodeSerializer getNodeSerializer() { return nodeSer; *************** *** 484,487 **** --- 507,524 ---- /** + * The unique identifier for the index whose data is stored in this B+Tree + * data structure. When using a scale-out index the same <i>indexUUID</i> + * MUST be assigned to each mutable and immutable B+Tree having data for any + * partition of that scale-out index. This makes it possible to work + * backwards from the B+Tree data structures and identify the index to which + * they belong. + */ + final public UUID getIndexUUID() { + + return indexUUID; + + } + + /** * The root of the btree. This is initially a leaf until the leaf is split, * at which point it is replaced by a node. The root is also replaced each Index: IndexSegment.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/IndexSegment.java,v retrieving revision 1.18 retrieving revision 1.19 diff -C2 -d -r1.18 -r1.19 *** IndexSegment.java 15 Mar 2007 16:11:08 -0000 1.18 --- IndexSegment.java 27 Mar 2007 14:34:22 -0000 1.19 *************** *** 132,139 **** new CustomAddressSerializer(Addr .getOffset(fileStore.metadata.addrNodes)), ! fileStore.extensionMetadata.valSer, ImmutableNodeFactory.INSTANCE, ! fileStore.extensionMetadata.recordCompressor, ! fileStore.metadata.useChecksum); // Type-safe reference to the backing store. --- 132,139 ---- new CustomAddressSerializer(Addr .getOffset(fileStore.metadata.addrNodes)), ! fileStore.extensionMetadata.getValueSerializer(), ImmutableNodeFactory.INSTANCE, ! fileStore.extensionMetadata.getRecordCompressor(), ! fileStore.metadata.useChecksum, fileStore.metadata.indexUUID); // Type-safe reference to the backing store. Index: ReadOnlyIndex.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/ReadOnlyIndex.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** ReadOnlyIndex.java 17 Feb 2007 21:34:21 -0000 1.1 --- ReadOnlyIndex.java 27 Mar 2007 14:34:22 -0000 1.2 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.objndx; + import java.util.UUID; + /** * A fly-weight wrapper that does not permit write operations and reads *************** *** 66,69 **** --- 68,75 ---- } + + public UUID getIndexUUID() { + return src.getIndexUUID(); + } public boolean contains(byte[] key) { Index: ReadOnlyFusedView.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/objndx/ReadOnlyFusedView.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** ReadOnlyFusedView.java 11 Mar 2007 11:41:45 -0000 1.1 --- ReadOnlyFusedView.java 27 Mar 2007 14:34:22 -0000 1.2 *************** *** 50,53 **** --- 50,54 ---- import java.util.Arrays; import java.util.NoSuchElementException; + import java.util.UUID; /** *************** *** 89,92 **** --- 90,99 ---- * for a given key by lookup() and which value is retained by * rangeQuery(). + * + * @exception IllegalArgumentException + * if a source is used more than once. + * @exception IllegalArgumentException + * unless all sources have the same + * {@link IIndex#getIndexUUID()} */ public ReadOnlyFusedView(final AbstractBTree[] srcs) { *************** *** 116,119 **** --- 123,131 ---- throw new IllegalArgumentException( "source used more than once"); + + if (srcs[i].getIndexUUID().equals(srcs[j].getIndexUUID())) { + throw new IllegalArgumentException( + "Sources have different index UUIDs"); + } } *************** *** 125,128 **** --- 137,144 ---- } + public UUID getIndexUUID() { + return srcs[0].getIndexUUID(); + } + /** * Write operations are not supported on the view. |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:33
|
Update of /cvsroot/cweb/bigdata/src/test/com/bigdata/isolation In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/test/com/bigdata/isolation Modified Files: TestIsolatedBTree.java TestUnisolatedBTree.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: TestUnisolatedBTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/isolation/TestUnisolatedBTree.java,v retrieving revision 1.6 retrieving revision 1.7 diff -C2 -d -r1.6 -r1.7 *** TestUnisolatedBTree.java 11 Mar 2007 11:42:49 -0000 1.6 --- TestUnisolatedBTree.java 27 Mar 2007 14:34:24 -0000 1.7 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.isolation; + import java.util.UUID; + import com.bigdata.objndx.AbstractBTree; import com.bigdata.objndx.AbstractBTreeTestCase; *************** *** 98,102 **** { ! UnisolatedBTree btree = new UnisolatedBTree(store,branchingFactor,null); assertTrue(store==btree.getStore()); --- 100,105 ---- { ! UnisolatedBTree btree = new UnisolatedBTree(store,branchingFactor,UUID.randomUUID(), ! null); assertTrue(store==btree.getStore()); *************** *** 116,120 **** UnisolatedBTree btree = new UnisolatedBTree(store, branchingFactor, ! conflictResolver); assertTrue(store == btree.getStore()); --- 119,123 ---- UnisolatedBTree btree = new UnisolatedBTree(store, branchingFactor, ! UUID.randomUUID(), conflictResolver); assertTrue(store == btree.getStore()); *************** *** 147,151 **** UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, null); /* --- 150,154 ---- UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, UUID.randomUUID(), null); /* *************** *** 224,228 **** UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, null); btree.insert(k3,v3); --- 227,231 ---- UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, UUID.randomUUID(), null); btree.insert(k3,v3); *************** *** 286,290 **** UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, null); /* --- 289,293 ---- UnisolatedBTree btree = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, UUID.randomUUID(), null); /* *************** *** 396,400 **** final byte[] v7a = new byte[]{7,1}; ! UnisolatedBTree btree = new UnisolatedBTree(store, 3, null); /* --- 399,403 ---- final byte[] v7a = new byte[]{7,1}; ! UnisolatedBTree btree = new UnisolatedBTree(store, 3, UUID.randomUUID(), null); /* Index: TestIsolatedBTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/isolation/TestIsolatedBTree.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** TestIsolatedBTree.java 15 Mar 2007 16:11:14 -0000 1.5 --- TestIsolatedBTree.java 27 Mar 2007 14:34:24 -0000 1.6 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.isolation; + import java.util.UUID; + import com.bigdata.journal.TestTx; import com.bigdata.objndx.AbstractBTreeTestCase; *************** *** 124,128 **** { ! UnisolatedBTree src = new UnisolatedBTree(store,branchingFactor,null); IsolatedBTree btree = new IsolatedBTree(store,src); --- 126,131 ---- { ! UnisolatedBTree src = new UnisolatedBTree(store, branchingFactor, ! UUID.randomUUID(), null); IsolatedBTree btree = new IsolatedBTree(store,src); *************** *** 146,150 **** UnisolatedBTree src = new UnisolatedBTree(store, branchingFactor, ! conflictResolver); IsolatedBTree btree = new IsolatedBTree(store,src); --- 149,153 ---- UnisolatedBTree src = new UnisolatedBTree(store, branchingFactor, ! UUID.randomUUID(), conflictResolver); IsolatedBTree btree = new IsolatedBTree(store,src); *************** *** 186,190 **** */ UnisolatedBTree src = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, null); src.insert(k3, v3); --- 189,193 ---- */ UnisolatedBTree src = new UnisolatedBTree(new SimpleMemoryRawStore(), ! 3, UUID.randomUUID(), null); src.insert(k3, v3); |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:32
|
Update of /cvsroot/cweb/bigdata/src/test/com/bigdata/scaleup In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/test/com/bigdata/scaleup Modified Files: TestMetadataIndex.java TestPartitionedJournal.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: TestMetadataIndex.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/scaleup/TestMetadataIndex.java,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -d -r1.11 -r1.12 *** TestMetadataIndex.java 22 Mar 2007 21:11:25 -0000 1.11 --- TestMetadataIndex.java 27 Mar 2007 14:34:24 -0000 1.12 *************** *** 52,55 **** --- 52,56 ---- import java.util.Properties; import java.util.Random; + import java.util.UUID; import org.apache.log4j.Level; *************** *** 131,136 **** IRawStore store = new SimpleMemoryRawStore(); // create the metadata index. ! MetadataIndex md = new MetadataIndex(store,3,"abc"); /* --- 132,142 ---- IRawStore store = new SimpleMemoryRawStore(); + final UUID indexUUID = UUID.randomUUID(); + + final UUID managedIndexUUID = UUID.randomUUID(); + // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); /* *************** *** 200,205 **** Journal store = new Journal(properties); // create the metadata index. ! MetadataIndex md = new MetadataIndex(store,3,"abc"); /* --- 206,216 ---- Journal store = new Journal(properties); + final UUID indexUUID = UUID.randomUUID(); + + final UUID managedIndexUUID = UUID.randomUUID(); + // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); /* *************** *** 291,296 **** IRawStore store = new SimpleMemoryRawStore(); // create the metadata index. ! MetadataIndex md = new MetadataIndex(store,3,"abc"); /* --- 302,312 ---- IRawStore store = new SimpleMemoryRawStore(); + final UUID indexUUID = UUID.randomUUID(); + + final UUID managedIndexUUID = UUID.randomUUID(); + // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); /* *************** *** 400,405 **** Journal store = new Journal(properties); ! // partition metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, "abc"); // define a single partition with no segments. --- 416,426 ---- Journal store = new Journal(properties); ! final UUID indexUUID = UUID.randomUUID(); ! ! final UUID managedIndexUUID = UUID.randomUUID(); ! ! // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); // define a single partition with no segments. *************** *** 407,411 **** // btree to be filled with data. ! BTree btree = new BTree(store, 3, SimpleEntry.Serializer.INSTANCE); /* --- 428,433 ---- // btree to be filled with data. ! BTree btree = new BTree(store, 3, managedIndexUUID, ! SimpleEntry.Serializer.INSTANCE); /* *************** *** 501,506 **** Journal store = new Journal(properties); ! // partition metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, "abc"); // define a single partition with no segments. --- 523,533 ---- Journal store = new Journal(properties); ! final UUID indexUUID = UUID.randomUUID(); ! ! final UUID managedIndexUUID = UUID.randomUUID(); ! ! // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); // define a single partition with no segments. *************** *** 508,512 **** // btree to be filled with data. ! BTree btree = new BTree(store, 3, SimpleEntry.Serializer.INSTANCE); /* --- 535,540 ---- // btree to be filled with data. ! BTree btree = new BTree(store, 3, managedIndexUUID, ! SimpleEntry.Serializer.INSTANCE); /* *************** *** 572,576 **** * create a new btree and insert the other keys/values into this btree. */ ! btree = new BTree(store,3,SimpleEntry.Serializer.INSTANCE); btree.insert(new BatchInsert(values2.length, keys2, values2)); --- 600,605 ---- * create a new btree and insert the other keys/values into this btree. */ ! btree = new BTree(store, 3, managedIndexUUID, ! SimpleEntry.Serializer.INSTANCE); btree.insert(new BatchInsert(values2.length, keys2, values2)); *************** *** 597,601 **** new MergedEntryIterator(mergeItr), 100, btree .getNodeSerializer().getValueSerializer(), ! false/* useChecksum */, null/* recordCompressor */, 0d/* errorRate */); /* --- 626,631 ---- new MergedEntryIterator(mergeItr), 100, btree .getNodeSerializer().getValueSerializer(), ! false/* useChecksum */, null/* recordCompressor */, 0d/* errorRate */, ! btree.getIndexUUID()); /* *************** *** 671,676 **** Journal store = new Journal(properties); ! // partition metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, "abc"); // define a single partition with no segments. --- 701,711 ---- Journal store = new Journal(properties); ! final UUID indexUUID = UUID.randomUUID(); ! ! final UUID managedIndexUUID = UUID.randomUUID(); ! ! // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); // define a single partition with no segments. *************** *** 701,705 **** // ground truth btree. ! BTree groundTruth = new BTree(store, mmut, SimpleEntry.Serializer.INSTANCE); // the current index segment and null if there is none yet. --- 736,740 ---- // ground truth btree. ! BTree groundTruth = new BTree(store, mmut, managedIndexUUID, SimpleEntry.Serializer.INSTANCE); // the current index segment and null if there is none yet. *************** *** 711,715 **** // test data btree - new tree on each trial! ! BTree testData = new BTree(store, mmut, SimpleEntry.Serializer.INSTANCE); /* --- 746,750 ---- // test data btree - new tree on each trial! ! BTree testData = new BTree(store, mmut, managedIndexUUID, SimpleEntry.Serializer.INSTANCE); /* *************** *** 863,867 **** .getNodeSerializer().getValueSerializer(), false/* useChecksum */, null/* recordCompressor */, ! 0d/* errorRate */); // close the merged leaf iterator (and release its buffer/file). --- 898,902 ---- .getNodeSerializer().getValueSerializer(), false/* useChecksum */, null/* recordCompressor */, ! 0d/* errorRate */, testData.getIndexUUID()); // close the merged leaf iterator (and release its buffer/file). *************** *** 965,970 **** Journal store = new Journal(properties); ! // partition metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, "abc"); // define a single partition with no segments. --- 1000,1010 ---- Journal store = new Journal(properties); ! final UUID indexUUID = UUID.randomUUID(); ! ! final UUID managedIndexUUID = UUID.randomUUID(); ! ! // create the metadata index. ! MetadataIndex md = new MetadataIndex(store, 3, indexUUID, ! managedIndexUUID, "abc"); // define a single partition with no segments. *************** *** 972,976 **** // btree to be filled with data. ! BTree btree = new BTree(store, 3, SimpleEntry.Serializer.INSTANCE); /* --- 1012,1016 ---- // btree to be filled with data. ! BTree btree = new BTree(store, 3, managedIndexUUID, SimpleEntry.Serializer.INSTANCE); /* *************** *** 1036,1040 **** * create a new btree and insert the other keys/values into this btree. */ ! btree = new BTree(store,3,SimpleEntry.Serializer.INSTANCE); btree.insert(new BatchInsert(values2.length, keys2, values2)); --- 1076,1080 ---- * create a new btree and insert the other keys/values into this btree. */ ! btree = new BTree(store,3, managedIndexUUID, SimpleEntry.Serializer.INSTANCE); btree.insert(new BatchInsert(values2.length, keys2, values2)); Index: TestPartitionedJournal.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/scaleup/TestPartitionedJournal.java,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -d -r1.7 -r1.8 *** TestPartitionedJournal.java 22 Mar 2007 21:11:25 -0000 1.7 --- TestPartitionedJournal.java 27 Mar 2007 14:34:24 -0000 1.8 *************** *** 51,54 **** --- 51,55 ---- import java.util.Properties; import java.util.Random; + import java.util.UUID; import junit.framework.TestCase2; *************** *** 141,145 **** final String name = "abc"; ! IIndex index = new UnisolatedBTree(journal); assertNull(journal.getIndex(name)); --- 142,146 ---- final String name = "abc"; ! IIndex index = new UnisolatedBTree(journal, UUID.randomUUID()); assertNull(journal.getIndex(name)); *************** *** 205,209 **** final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal))); journal.overflow(); --- 206,210 ---- final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal, UUID.randomUUID()))); journal.overflow(); *************** *** 237,241 **** final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal, 3))); final TestData data = new TestData(journal.migrationThreshold-1); --- 238,243 ---- final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal, ! 3, UUID.randomUUID()))); final TestData data = new TestData(journal.migrationThreshold-1); *************** *** 285,289 **** final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal))); final TestData data = new TestData(journal.migrationThreshold); --- 287,292 ---- final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal, ! UUID.randomUUID()))); final TestData data = new TestData(journal.migrationThreshold); *************** *** 348,355 **** final String name = "abc"; ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal))); final UnisolatedBTree groundTruth = new UnisolatedBTree( ! new SimpleMemoryRawStore()); final int ntrials = 10; --- 351,361 ---- final String name = "abc"; ! final UUID indexUUID = UUID.randomUUID(); ! ! assertNotNull(journal.registerIndex(name, new UnisolatedBTree(journal, ! indexUUID))); final UnisolatedBTree groundTruth = new UnisolatedBTree( ! new SimpleMemoryRawStore(), indexUUID); final int ntrials = 10; |
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/journal In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/journal Modified Files: TemporaryStore.java Name2Addr.java CommitRecordIndex.java AbstractJournal.java RootBlockView.java ReadCommittedTx.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: ReadCommittedTx.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/ReadCommittedTx.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** ReadCommittedTx.java 15 Mar 2007 16:11:13 -0000 1.3 --- ReadCommittedTx.java 27 Mar 2007 14:34:23 -0000 1.4 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.journal; + import java.util.UUID; + import com.bigdata.isolation.IIsolatableIndex; import com.bigdata.isolation.IIsolatedIndex; *************** *** 244,247 **** --- 246,253 ---- } + public UUID getIndexUUID() { + return getIndex().getIndexUUID(); + } + public boolean contains(byte[] key) { return getIndex().contains(key); Index: Name2Addr.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/Name2Addr.java,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -d -r1.7 -r1.8 *** Name2Addr.java 15 Mar 2007 16:11:12 -0000 1.7 --- Name2Addr.java 27 Mar 2007 14:34:23 -0000 1.8 *************** *** 8,11 **** --- 8,12 ---- import java.util.Locale; import java.util.Map; + import java.util.UUID; import org.CognitiveWeb.extser.LongPacker; *************** *** 70,74 **** public Name2Addr(IRawStore store) { ! super(store, DEFAULT_BRANCHING_FACTOR, ValueSerializer.INSTANCE); // this.journal = store; --- 71,76 ---- public Name2Addr(IRawStore store) { ! super(store, DEFAULT_BRANCHING_FACTOR, UUID.randomUUID(), ! ValueSerializer.INSTANCE); // this.journal = store; Index: CommitRecordIndex.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** CommitRecordIndex.java 12 Mar 2007 18:06:11 -0000 1.3 --- CommitRecordIndex.java 27 Mar 2007 14:34:23 -0000 1.4 *************** *** 4,7 **** --- 4,8 ---- import java.io.DataOutputStream; import java.io.IOException; + import java.util.UUID; import org.CognitiveWeb.extser.LongPacker; *************** *** 50,54 **** public CommitRecordIndex(IRawStore store) { ! super(store, DEFAULT_BRANCHING_FACTOR, ValueSerializer.INSTANCE); } --- 51,56 ---- public CommitRecordIndex(IRawStore store) { ! super(store, DEFAULT_BRANCHING_FACTOR, UUID.randomUUID(), ! ValueSerializer.INSTANCE); } Index: AbstractJournal.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/AbstractJournal.java,v retrieving revision 1.6 retrieving revision 1.7 diff -C2 -d -r1.6 -r1.7 *** AbstractJournal.java 23 Mar 2007 20:01:26 -0000 1.6 --- AbstractJournal.java 27 Mar 2007 14:34:23 -0000 1.7 *************** *** 1536,1540 **** public IIndex registerIndex(String name) { ! return registerIndex( name, new UnisolatedBTree(this)); } --- 1536,1540 ---- public IIndex registerIndex(String name) { ! return registerIndex(name, new UnisolatedBTree(this, UUID.randomUUID())); } Index: TemporaryStore.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/TemporaryStore.java,v retrieving revision 1.8 retrieving revision 1.9 diff -C2 -d -r1.8 -r1.9 *** TemporaryStore.java 12 Mar 2007 18:06:11 -0000 1.8 --- TemporaryStore.java 27 Mar 2007 14:34:23 -0000 1.9 *************** *** 48,51 **** --- 48,53 ---- package com.bigdata.journal; + import java.util.UUID; + import com.bigdata.objndx.BTree; import com.bigdata.objndx.ByteArrayValueSerializer; *************** *** 110,113 **** --- 112,116 ---- return registerIndex(name, new BTree(this, BTree.DEFAULT_BRANCHING_FACTOR, + UUID.randomUUID(), ByteArrayValueSerializer.INSTANCE)); Index: RootBlockView.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/RootBlockView.java,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -d -r1.14 -r1.15 *** RootBlockView.java 22 Mar 2007 21:11:25 -0000 1.14 --- RootBlockView.java 27 Mar 2007 14:34:23 -0000 1.15 *************** *** 150,154 **** * are no historical {@link ICommitRecord}s (this is true when * the store is first created). ! * @param uuid * The unique journal identifier. */ --- 150,154 ---- * are no historical {@link ICommitRecord}s (this is true when * the store is first created). ! * @param segmentUUID * The unique journal identifier. */ *************** *** 392,396 **** sb.append(", commitRecordAddr="+Addr.toString(getCommitRecordAddr())); sb.append(", commitRecordIndexAddr="+Addr.toString(getCommitRecordIndexAddr())); ! sb.append(", uuid="+getUUID()); sb.append("}"); --- 392,396 ---- sb.append(", commitRecordAddr="+Addr.toString(getCommitRecordAddr())); sb.append(", commitRecordIndexAddr="+Addr.toString(getCommitRecordIndexAddr())); ! sb.append(", segmentUUID="+getUUID()); sb.append("}"); |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:29
|
Update of /cvsroot/cweb/bigdata/lib In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/lib Added Files: cweb-commons-1.1-b2-dev.jar Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. --- NEW FILE: cweb-commons-1.1-b2-dev.jar --- (This appears to be a binary file; contents omitted.) |
From: Bryan T. <tho...@us...> - 2007-03-27 14:34:27
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/isolation In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6186/src/java/com/bigdata/isolation Modified Files: UnisolatedIndexSegment.java IsolatedBTree.java UnisolatedBTree.java Log Message: Added indexUUID to AbstractBTree so that each scale-out index may have a unique indentifier. Modified the BTreeMetadata class and derived classes to use Externalizable, to support explicit versioning of the metadata record, and to have private fields since they can not be final with Externalizable. Index: UnisolatedIndexSegment.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/isolation/UnisolatedIndexSegment.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** UnisolatedIndexSegment.java 15 Mar 2007 16:11:13 -0000 1.2 --- UnisolatedIndexSegment.java 27 Mar 2007 14:34:23 -0000 1.3 *************** *** 75,81 **** * operations). There are no direct tests of this class at this time. * ! * @todo define extension that stores the index name and uuid for the named ! * index to which the segment belongs (add method to {@link AbstractBTree} ! * to allow subclassing {@link IndexSegmentExtensionMetadata}). * * @todo add a boolean flag to mark index segments that are the final result of --- 75,82 ---- * operations). There are no direct tests of this class at this time. * ! * @todo define extension that stores the index name for a named index to which ! * the segment belongs (add method to {@link AbstractBTree} to allow ! * subclassing {@link IndexSegmentExtensionMetadata})? (Note that we already ! * store the indexUUID). * * @todo add a boolean flag to mark index segments that are the final result of *************** *** 88,93 **** * the metadataMap for a distributed index. * ! * @todo examine the format of the uuid. can we use part of it as the unique ! * basis for one up identifiers within a parition? * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> --- 89,94 ---- * the metadataMap for a distributed index. * ! * @todo examine the format of the segmentUUID. can we use part of it as the ! * unique basis for one up identifiers within a parition? * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> Index: UnisolatedBTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/isolation/UnisolatedBTree.java,v retrieving revision 1.9 retrieving revision 1.10 diff -C2 -d -r1.9 -r1.10 *** UnisolatedBTree.java 15 Mar 2007 16:11:13 -0000 1.9 --- UnisolatedBTree.java 27 Mar 2007 14:34:23 -0000 1.10 *************** *** 48,51 **** --- 48,59 ---- package com.bigdata.isolation; + import java.io.Externalizable; + import java.io.IOException; + import java.io.ObjectInput; + import java.io.ObjectOutput; + import java.util.UUID; + + import org.CognitiveWeb.extser.LongPacker; + import com.bigdata.objndx.BTree; import com.bigdata.objndx.BTreeMetadata; *************** *** 162,169 **** * * @param store */ ! public UnisolatedBTree(IRawStore store) { ! this(store, DEFAULT_BRANCHING_FACTOR, null); } --- 170,178 ---- * * @param store + * @param indexUUID */ ! public UnisolatedBTree(IRawStore store, UUID indexUUID) { ! this(store, DEFAULT_BRANCHING_FACTOR, indexUUID, null); } *************** *** 174,182 **** * * @param store * @param conflictResolver */ ! public UnisolatedBTree(IRawStore store, IConflictResolver conflictResolver) { ! this(store, DEFAULT_BRANCHING_FACTOR, conflictResolver); } --- 183,192 ---- * * @param store + * @param indexUUID * @param conflictResolver */ ! public UnisolatedBTree(IRawStore store, UUID indexUUID, IConflictResolver conflictResolver) { ! this(store, DEFAULT_BRANCHING_FACTOR, indexUUID, conflictResolver); } *************** *** 187,194 **** * @param store * @param branchingFactor */ ! public UnisolatedBTree(IRawStore store, int branchingFactor) { ! this(store, branchingFactor, null); } --- 197,205 ---- * @param store * @param branchingFactor + * @param indexUUID */ ! public UnisolatedBTree(IRawStore store, int branchingFactor, UUID indexUUID) { ! this(store, branchingFactor, indexUUID, null); } *************** *** 210,216 **** * transactions (aka serialization orders). */ ! public UnisolatedBTree(IRawStore store, int branchingFactor, IConflictResolver conflictResolver ) { ! super(store, branchingFactor, Value.Serializer.INSTANCE ); this.conflictResolver = conflictResolver; --- 221,228 ---- * transactions (aka serialization orders). */ ! public UnisolatedBTree(IRawStore store, int branchingFactor, ! UUID indexUUID, IConflictResolver conflictResolver) { ! super(store, branchingFactor, indexUUID, Value.Serializer.INSTANCE ); this.conflictResolver = conflictResolver; *************** *** 229,233 **** super(store,metadata); ! this.conflictResolver = ((UnisolatedBTreeMetadata) metadata).conflictResolver; } --- 241,245 ---- super(store,metadata); ! this.conflictResolver = ((UnisolatedBTreeMetadata) metadata).getConflictResolver(); } *************** *** 245,253 **** * @version $Id$ */ ! public static class UnisolatedBTreeMetadata extends BTreeMetadata { private static final long serialVersionUID = -4938674944860230200L; ! public final IConflictResolver conflictResolver; /** --- 257,278 ---- * @version $Id$ */ ! public static class UnisolatedBTreeMetadata extends BTreeMetadata implements Externalizable { private static final long serialVersionUID = -4938674944860230200L; ! private IConflictResolver conflictResolver; ! ! public IConflictResolver getConflictResolver() { ! ! return conflictResolver; ! ! } ! ! /** ! * De-serialization constructor. ! */ ! public UnisolatedBTreeMetadata() { ! ! } /** *************** *** 262,265 **** --- 287,318 ---- } + private static final transient int VERSION0 = 0x0; + + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + + super.readExternal(in); + + final int version = (int)LongPacker.unpackLong(in); + + if (version != VERSION0) { + + throw new IOException("Unknown version: version=" + version); + + } + + conflictResolver = (IConflictResolver) in.readObject(); + + } + + public void writeExternal(ObjectOutput out) throws IOException { + + super.writeExternal(out); + + LongPacker.packLong(out,VERSION0); + + out.writeObject(conflictResolver); + + } + } Index: IsolatedBTree.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/isolation/IsolatedBTree.java,v retrieving revision 1.9 retrieving revision 1.10 diff -C2 -d -r1.9 -r1.10 *** IsolatedBTree.java 15 Mar 2007 16:11:13 -0000 1.9 --- IsolatedBTree.java 27 Mar 2007 14:34:23 -0000 1.10 *************** *** 139,143 **** public IsolatedBTree(IRawStore store, UnisolatedBTree src) { ! super(store, src.getBranchingFactor(), src.getConflictResolver()); this.src = src; --- 139,144 ---- public IsolatedBTree(IRawStore store, UnisolatedBTree src) { ! super(store, src.getBranchingFactor(), src.getIndexUUID(), src ! .getConflictResolver()); this.src = src; *************** *** 515,518 **** --- 516,520 ---- tmp = new BTree(getStore(), // same store. getBranchingFactor(), // same branching factor + src.getIndexUUID(), // same indexUUID. ByteArrayValueSerializer.INSTANCE // byte[] values. ); |
Update of /cvsroot/cweb/bigdata/src/resources/config/standalone In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv30146/src/resources/config/standalone Added Files: ServerStarter.config README.txt DataServer1.properties DataServer0.properties DataServer1.config DataServer0.config Log Message: Added "standalone" bigdata instance configuration example. This just starts two data servers on the same host right now, but I plan to expand it to a full local bigdata instance. --- NEW FILE: ServerStarter.config --- import java.io.File; import com.sun.jini.start.ServiceDescriptor; import com.sun.jini.start.NonActivatableServiceDescriptor; /* * Sample configuration for starting a service using the Jini * ServiceStarter. * * Note: DO NOT include anything outside of com.sun.jini.start and the * core Java packages or you will get a ClassNotFoundException when * the ServiceStarter tries to resolve the imports! If you see this * anyway, then add "jini-ext.jar" to the classpath and it will * resolve the ConfigurationException class and then show you the * stack trace so that you can figure out what is going on (I find * that it can help tremendously to have "jini-ext.jar" in the * classpath when working on this configuration - and can even appear * to be necessary for the server to start! * * @see http://java.sun.com/products/jini/2.0/doc/api/com/sun/jini/start/ServiceStarter.html */ com.sun.jini.start { /* * Code base for downloadable code exposed by this service. */ private static codebase = "http://proto.cognitiveweb.org/maven-repository/bigdata/jars/"; /* * @todo restrict the policy to what is actually required by the * service. Among other things, we only need access to a * temporary directory and to the directory in which the journals * and index segments will be stored, not general read/write on * the disk. */ private static policy = "policy.all"; /* * The directory containing the various JARs. */ private static libdir = "lib"+File.separator; /* * Declare dependencies for the server here. */ private static classpath = // jini libdir+"jini-core.jar"+File.pathSeparator+ libdir+"jini-ext.jar"+File.pathSeparator+ libdir+"reggie.jar"+File.pathSeparator+ libdir+"sun-util.jar"+File.pathSeparator+ // utility JARs. libdir+"log4j-1.2.8.jar"+File.pathSeparator+ libdir+"ctc_utils-5-4-2005.jar"+File.pathSeparator+ libdir+"lgpl-utils-1.0-b1-dev.jar"+File.pathSeparator+ libdir+"cweb-extser-0.1-b2-dev.jar"+File.pathSeparator+ // ICU (unicode support). libdir+"icu4j-3_6.jar"+File.pathSeparator+ // test suites only! libdir+"junit-3.8.1.jar"+File.pathSeparator+ libdir+"cweb-junit-ext-1.1-b2-dev.jar"+File.pathSeparator+ // main bigdata JAR. //libdir+ "bigdata.jar" ; /* * Server(s) to run. All servers will run in the same VM. * * @todo start metadata server, transaction server, load-balancer, * and job scheduler. */ static serviceDescriptors = new ServiceDescriptor[] { /* * data server(s) */ new NonActivatableServiceDescriptor( codebase, policy, classpath, "com.bigdata.service.DataServer", new String[] { "src/resources/config/standalone/DataServer0.config" }) , new NonActivatableServiceDescriptor( codebase, policy, classpath, "com.bigdata.service.DataServer", new String[] { "src/resources/config/standalone/DataServer1.config" }) }; } --- NEW FILE: DataServer0.properties --- # DataServer configuration. file=DataServer0.jnl --- NEW FILE: DataServer1.config --- import java.io.File; import net.jini.jeri.BasicILFactory; import net.jini.jeri.BasicJeriExporter; import net.jini.jeri.tcp.TcpServerEndpoint; import net.jini.discovery.LookupDiscovery; import net.jini.core.discovery.LookupLocator; import net.jini.core.entry.Entry; import net.jini.lookup.entry.Name; import net.jini.lookup.entry.Comment; import net.jini.lookup.entry.Address; import net.jini.lookup.entry.Location; import net.jini.lookup.entry.ServiceInfo; /* * Declares how the service will provision itself. */ ServiceDescription { /* * This object is used to export the service proxy. The choice here effects * the protocol that will be used for communications between the clients and * the service. * * @todo Explore JERI nio option and customization support for serialization. */ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(0), new BasicILFactory()); /* * The name of the property file containing the configuration information for * the service itself (where it will locate its files, etc). */ propertyFile = new File("src/resources/config/standalone/DataServer1.properties"); /* * The file on which the serviceID will be written. */ serviceIdFile = new File("DataServer1.id"); } /* * Declares how the service will advertise itself. */ AdvertDescription { /* * Entry attributes used to describe the service. */ entries = new Entry[] { new Name("DataService1"), // human facing name. new ServiceInfo("bigdata", // product or package name "SYSTAP,LLC", // manufacturer "SYSTAP,LLC", // vendor "0.1-beta", // version "DataService", // model "serial#" // serialNumber ) }; /* * Note: multicast discovery is always used if LookupDiscovery.ALL_GROUPS is * specified. */ // groups = LookupDiscovery.ALL_GROUPS; groups = new String[]{"bigdata"}; /* * One or more unicast URIs of the form jini://host/ or jini://host:port/. * This MAY be an empty array if you want to use multicast discovery _and_ * you have specified LookupDiscovery.ALL_GROUPS above. */ unicastLocators = new LookupLocator[] { // empty new LookupLocator("jini://localhost/") }; } --- NEW FILE: DataServer1.properties --- # DataServer configuration. file=DataServer1.jnl --- NEW FILE: README.txt --- A sample configuration for a standalone bigdata instance (single host, one or more data servers). --- NEW FILE: DataServer0.config --- import java.io.File; import net.jini.jeri.BasicILFactory; import net.jini.jeri.BasicJeriExporter; import net.jini.jeri.tcp.TcpServerEndpoint; import net.jini.discovery.LookupDiscovery; import net.jini.core.discovery.LookupLocator; import net.jini.core.entry.Entry; import net.jini.lookup.entry.Name; import net.jini.lookup.entry.Comment; import net.jini.lookup.entry.Address; import net.jini.lookup.entry.Location; import net.jini.lookup.entry.ServiceInfo; /* * Declares how the service will provision itself. */ ServiceDescription { /* * This object is used to export the service proxy. The choice here effects * the protocol that will be used for communications between the clients and * the service. * * @todo Explore JERI nio option and customization support for serialization. */ exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(0), new BasicILFactory()); /* * The name of the property file containing the configuration information for * the service itself (where it will locate its files, etc). */ propertyFile = new File("src/resources/config/standalone/DataServer0.properties"); /* * The file on which the serviceID will be written. */ serviceIdFile = new File("DataServer0.id"); } /* * Declares how the service will advertise itself. */ AdvertDescription { /* * Entry attributes used to describe the service. */ entries = new Entry[] { new Name("DataService0"), // human facing name. new ServiceInfo("bigdata", // product or package name "SYSTAP,LLC", // manufacturer "SYSTAP,LLC", // vendor "0.1-beta", // version "DataService", // model "serial#" // serialNumber ) }; /* * Note: multicast discovery is always used if LookupDiscovery.ALL_GROUPS is * specified. */ // groups = LookupDiscovery.ALL_GROUPS; groups = new String[]{"bigdata"}; /* * One or more unicast URIs of the form jini://host/ or jini://host:port/. * This MAY be an empty array if you want to use multicast discovery _and_ * you have specified LookupDiscovery.ALL_GROUPS above. */ unicastLocators = new LookupLocator[] { // empty new LookupLocator("jini://localhost/") }; } |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:31
|
Update of /cvsroot/cweb/bigdata/src/resources/config In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv30146/src/resources/config Added Files: README.txt Log Message: Added "standalone" bigdata instance configuration example. This just starts two data servers on the same host right now, but I plan to expand it to a full local bigdata instance. --- NEW FILE: README.txt --- This package contains some sample configurations. |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:30
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/service In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv30146/src/java/com/bigdata/service Modified Files: AbstractServer.java DataServer.java Log Message: Added "standalone" bigdata instance configuration example. This just starts two data servers on the same host right now, but I plan to expand it to a full local bigdata instance. Index: DataServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/DataServer.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** DataServer.java 23 Mar 2007 18:42:45 -0000 1.2 --- DataServer.java 23 Mar 2007 20:01:25 -0000 1.3 *************** *** 57,60 **** --- 57,70 ---- * The bigdata data server. * + * @todo reduce the permissions required to start the server with the server + * starter. + * + * @see src/resources/config for sample configurations. + * + * @todo write tests against an standalone installation and then see what it + * looks like when the data services are running on more than one host. + * note that unisolated operations can be tested without a transaction + * server. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ *************** *** 130,191 **** log.info(""); ! boolean destroyEnabled = true; ! if (destroyEnabled) { ! ! new Thread() { ! ! public void run() { ! server.shutdownNow(); ! try { ! journal.closeAndDelete(); ! log.info("Journal deleted."); ! ! } catch (Throwable t) { ! log.warn("Could not delete persistent state: " + t, ! t); ! } ! if (!server.serviceIdFile.delete()) { ! log.warn("Could not delete file: " ! + server.serviceIdFile); ! } ! try { ! Thread.sleep(3); ! } catch (InterruptedException ex) { ! } ! log.info("Service Stop requested"); ! System.exit(1); ! } ! }.start(); ! ! } else { ! ! throw new RemoteException( ! "Service Destroy Not Enabled, Operation Ignored"); ! } } ! // /* ! // * JoinAdmin ! // */ // ! // public void addLookupAttributes(Entry[] arg0) throws RemoteException { // ! // log.info(""); // // // TODO Auto-generated method stub --- 140,192 ---- log.info(""); ! new Thread() { ! public void run() { ! server.shutdownNow(); ! log.info("Deleting state."); ! ! try { ! journal.closeAndDelete(); ! log.info("Journal deleted."); ! } catch (Throwable t) { ! log.warn("Could not delete journal: " + t, t); ! } ! if (!server.serviceIdFile.delete()) { ! log.warn("Could not delete file: " ! + server.serviceIdFile); ! } ! try { ! Thread.sleep(3); ! } catch (InterruptedException ex) { ! } ! log.info("Service stopped."); ! System.exit(1); ! } ! ! }.start(); } ! // /* ! // * JoinAdmin ! // */ // ! // public void addLookupAttributes(Entry[] arg0) throws RemoteException { // ! // log.info(""); // // // TODO Auto-generated method stub Index: AbstractServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/AbstractServer.java,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -d -r1.3 -r1.4 *** AbstractServer.java 23 Mar 2007 19:00:47 -0000 1.3 --- AbstractServer.java 23 Mar 2007 20:01:25 -0000 1.4 *************** *** 95,99 **** * * <pre> ! * java -Djava.security.policy=policy.all -cp lib\jini-ext.jar;lib\start.jar com.sun.jini.start.ServiceStarter src/test/com/bigdata/service/TestServerStarter.config * </pre> * --- 95,99 ---- * * <pre> ! * java -Djava.security.policy=policy.all -cp lib\jini-ext.jar;lib\start.jar com.sun.jini.start.ServiceStarter src/test/com/bigdata/service/TestServerStarter.config * </pre> * *************** *** 136,141 **** * for documentation on how to use the ServiceStarter. * ! * @todo reduce the permissions required to start the server with the server ! * starter. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> --- 136,144 ---- * for documentation on how to use the ServiceStarter. * ! * @todo put a lock on the serviceIdFile while the server is running. ! * ! * @todo the {@link DestroyAdmin} implementation on the {@link DataServer} is ! * not working correctly. Untangle the various ways in which things can ! * be stopped vs destroyed. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> *************** *** 181,184 **** --- 184,189 ---- protected Remote proxy; + private boolean open = false; + /** * The object used to inform the hosting environment that the server is *************** *** 314,317 **** --- 319,324 ---- proxy = exporter.export(impl); + open = true; + log.info("Proxy is " + proxy + "(" + proxy.getClass() + ")"); *************** *** 496,501 **** * Shutdown the server taking time only to unregister it from jini. */ ! public void shutdownNow() { /* * Terminate manager threads. --- 503,512 ---- * Shutdown the server taking time only to unregister it from jini. */ ! synchronized public void shutdownNow() { + if(!open) return; + + open = false; + /* * Terminate manager threads. *************** *** 504,509 **** try { ! log.info("Terminating manager threads."); /* * Hand-shaking with the NonActivableServiceDescriptor. --- 515,524 ---- try { ! log.info("Terminating service management threads."); + joinManager.terminate(); + + discoveryManager.terminate(); + /* * Hand-shaking with the NonActivableServiceDescriptor. *************** *** 511,518 **** lifeCycle.unregister(this); - joinManager.terminate(); - - discoveryManager.terminate(); - } catch (Exception ex) { --- 526,529 ---- *************** *** 622,626 **** public void run() { ! log.info("Runing shutdown."); server.shutdownNow(); --- 633,637 ---- public void run() { ! log.info("Running shutdown."); server.shutdownNow(); |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:30
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/journal In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv30146/src/java/com/bigdata/journal Modified Files: AbstractJournal.java Log Message: Added "standalone" bigdata instance configuration example. This just starts two data servers on the same host right now, but I plan to expand it to a full local bigdata instance. Index: AbstractJournal.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/journal/AbstractJournal.java,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -d -r1.5 -r1.6 *** AbstractJournal.java 22 Mar 2007 21:11:25 -0000 1.5 --- AbstractJournal.java 23 Mar 2007 20:01:26 -0000 1.6 *************** *** 781,784 **** --- 781,786 ---- assertOpen(); + log.info(""); + // force the commit thread to quit immediately. writeService.shutdownNow(); *************** *** 811,814 **** --- 813,818 ---- protected void _delete() { + log.info(""); + if (_bufferStrategy.isOpen()) { *************** *** 826,829 **** --- 830,835 ---- public void close() { + log.info(""); + _close(); *************** *** 831,834 **** --- 837,842 ---- public void closeAndDelete() { + + log.info(""); _close(); |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:30
|
Update of /cvsroot/cweb/bigdata/src/resources/logging In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv30146/src/resources/logging Modified Files: log4j.properties Log Message: Added "standalone" bigdata instance configuration example. This just starts two data servers on the same host right now, but I plan to expand it to a full local bigdata instance. Index: log4j.properties =================================================================== RCS file: /cvsroot/cweb/bigdata/src/resources/logging/log4j.properties,v retrieving revision 1.8 retrieving revision 1.9 diff -C2 -d -r1.8 -r1.9 *** log4j.properties 22 Mar 2007 21:11:25 -0000 1.8 --- log4j.properties 23 Mar 2007 20:01:25 -0000 1.9 *************** *** 16,19 **** --- 16,20 ---- log4j.logger.com.bigdata.objndx.IndexSegmentBuilder=INFO log4j.logger.com.bigdata.objndx.AbstractBTreeTestCase=INFO + #log4j.logger.com.bigdata.journal=DEBUG log4j.logger.com.bigdata.service=DEBUG |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:28
|
Update of /cvsroot/cweb/bigdata/src/resources/config/standalone In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv29982/src/resources/config/standalone Log Message: Directory /cvsroot/cweb/bigdata/src/resources/config/standalone added to the repository |
From: Bryan T. <tho...@us...> - 2007-03-23 20:01:28
|
Update of /cvsroot/cweb/bigdata/src/resources/config In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv29982/src/resources/config Log Message: Directory /cvsroot/cweb/bigdata/src/resources/config added to the repository |
From: Bryan T. <tho...@us...> - 2007-03-23 19:00:56
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/service In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv6976/src/java/com/bigdata/service Modified Files: AbstractServer.java Log Message: A little more tuning of the service starter example. Index: AbstractServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/AbstractServer.java,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -d -r1.2 -r1.3 *** AbstractServer.java 23 Mar 2007 18:42:45 -0000 1.2 --- AbstractServer.java 23 Mar 2007 19:00:47 -0000 1.3 *************** *** 89,97 **** /** - * Abstract base class for configurable services discoverable using JINI. * <p> * The recommended way to start a server is using the {@link ServiceStarter}. * <p> * The server MAY be started using a <code>main</code> routine: * * <pre> --- 89,106 ---- /** * <p> + * Abstract base class for configurable services discoverable using JINI. + * </p> * The recommended way to start a server is using the {@link ServiceStarter}. + * + * <pre> + * java -Djava.security.policy=policy.all -cp lib\jini-ext.jar;lib\start.jar com.sun.jini.start.ServiceStarter src/test/com/bigdata/service/TestServerStarter.config + * </pre> + * + * Other command line options MAY be recommended depending on the server that + * you are starting, e.g., <code>-server -XX:MaxDirectMemorySize=256M </code>. * <p> * The server MAY be started using a <code>main</code> routine: + * </p> * * <pre> *************** *** 103,111 **** * </pre> * ! * The service may be <em>terminated</em> by terminating the server process. * <p> * Services are <em>destroyed</em> using {@link DestroyAdmin}, e.g., through * the Jini service browser. Note that this tends to imply that all persistent * data associated with that service is also destroyed! * * @see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6380355, which --- 112,130 ---- * </pre> * ! * <p> ! * The service may be <em>terminated</em> by terminating the server process. A ! * {@link Runtime#addShutdownHook(Thread)} is installed by the server so that ! * you can also stop the server using ^C (Windows) and possibly ! * <code>kill</code> <i>pid</i> (Un*x). You can record the PID of the process ! * running the server when you start it under Un*x using a shell script. Note ! * that if you are starting multiple services at once with the ! * {@link ServiceStarter} then these methods will take down all servers running ! * in the same VM. ! * </p> * <p> * Services are <em>destroyed</em> using {@link DestroyAdmin}, e.g., through * the Jini service browser. Note that this tends to imply that all persistent * data associated with that service is also destroyed! + * </p> * * @see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6380355, which *************** *** 117,120 **** --- 136,142 ---- * for documentation on how to use the ServiceStarter. * + * @todo reduce the permissions required to start the server with the server + * starter. + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ *************** *** 361,364 **** --- 383,392 ---- } + /* + * The runtime shutdown hook appears to be a robust way to handle ^C by + * providing a clean service termination. + */ + Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); + } *************** *** 507,517 **** /** ! * Run the server (this should be invoked from <code>main</code>. You can ! * stop the server using ^C (Windows) and possibly <code>kill</code> (Un*x). ! * You can record the PID of the process running the server when you start ! * it under Un*x using a shell script. ! * <p> ! * Note: If you want to DESTROY a service (and its state), then you can do ! * that from the Jini Service Browser. */ protected void run() { --- 535,539 ---- /** ! * Run the server (this should be invoked from <code>main</code>. */ protected void run() { *************** *** 521,525 **** /* * Note: I have found the Runtime shutdown hook to be much more robust ! * than attempting to install a signal handler. */ --- 543,549 ---- /* * Note: I have found the Runtime shutdown hook to be much more robust ! * than attempting to install a signal handler. It is installed by ! * the server constructor rather than here so that it will be used ! * when the server is run by the ServiceStarter as well as from main(). */ *************** *** 556,564 **** /* - * The runtime shutdown hook appears to be a bit more robust. - */ - Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); - - /* * Wait until the server is terminated. */ --- 580,583 ---- |
From: Bryan T. <tho...@us...> - 2007-03-23 18:42:57
|
Update of /cvsroot/cweb/bigdata/src/test/com/bigdata/service In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv32506/src/test/com/bigdata/service Modified Files: ITestService.java TestServer.java TestServer.config Added Files: TestPersistentJiniService.java TestServerStarter.config Log Message: Working on service admin and service starter. --- NEW FILE: TestPersistentJiniService.java --- /** The Notice below must appear in each file of the Source Code of any copy you distribute of the Licensed Product. Contributors to any Modifications may add their own copyright notices to identify their own contributions. License: The contents of this file are subject to the CognitiveWeb Open Source License Version 1.1 (the License). You may not copy or use this file, in either source code or executable form, except in compliance with the License. You may obtain a copy of the License from http://www.CognitiveWeb.org/legal/license/ Software distributed under the License is distributed on an AS IS basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. Copyrights: Portions created by or assigned to CognitiveWeb are Copyright (c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact information for CognitiveWeb is available at http://www.CognitiveWeb.org Portions Copyright (c) 2002-2003 Bryan Thompson. Acknowledgements: Special thanks to the developers of the Jabber Open Source License 1.0 (JOSL), from which this License was derived. This License contains terms that differ from JOSL. Special thanks to the CognitiveWeb Open Source Contributors for their suggestions and support of the Cognitive Web. Modifications: */ /* * Created on Mar 23, 2007 */ package com.bigdata.service; import java.io.IOException; import net.jini.config.Configuration; import net.jini.config.ConfigurationException; import org.wonderly.jini2.PersistentJiniService; /** * @todo Work through an alternative to {@link TestServer} based on wonderly's * <code>startnow</code> JAR. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ public class TestPersistentJiniService extends PersistentJiniService { /** * @throws IOException * @throws ConfigurationException */ public TestPersistentJiniService() throws IOException, ConfigurationException { // TODO Auto-generated constructor stub } /** * @param arg0 * @throws IOException * @throws ConfigurationException */ public TestPersistentJiniService(Configuration arg0) throws IOException, ConfigurationException { super(arg0); // TODO Auto-generated constructor stub } /** * @param arg0 * @throws IOException * @throws ConfigurationException */ public TestPersistentJiniService(String[] arg0) throws IOException, ConfigurationException { super(arg0); // TODO Auto-generated constructor stub } } --- NEW FILE: TestServerStarter.config --- import java.io.File; import com.sun.jini.start.ServiceDescriptor; import com.sun.jini.start.NonActivatableServiceDescriptor; /* * Sample configuration for starting a service using the Jini ServiceStarter. * * Note: DO NOT include anything outside of com.sun.jini.start and the core * Java packages or you will get a ClassNotFoundException when the ServiceStarter * tries to resolve the imports! If you see this anyway, then add "jini-ext.jar" * to the classpath and it will resolve the ConfigurationException class and then * show you the stack trace so that you can figure out what is going on (I find * that it can help tremendously to have "jini-ext.jar" in the classpath when * working on this configuration - and can even appear to be necessary for the * server to start! * * @see http://java.sun.com/products/jini/2.0/doc/api/com/sun/jini/start/ServiceStarter.html */ com.sun.jini.start { /* * Code base for downloadable code exposed by this service. */ private static codebase = "http://proto.cognitiveweb.org/maven-repository/bigdata/jars/"; /* * @todo restrict the policy to what is actually required by the service. * Among other things, we only need access to a temporary directory and * to the directory in which the journals and index segments will be * stored, not general read/write on the disk. */ private static policy = "policy.all"; /* * The directory containing the various JARs. */ private static libdir = "lib"+File.separator; /* * Declare dependencies for the server here. */ private static classpath = // jini libdir+"jini-core.jar"+File.pathSeparator+ libdir+"jini-ext.jar"+File.pathSeparator+ libdir+"reggie.jar"+File.pathSeparator+ libdir+"sun-util.jar"+File.pathSeparator+ // utility JARs. libdir+"log4j-1.2.8.jar"+File.pathSeparator+ libdir+"ctc_utils-5-4-2005.jar"+File.pathSeparator+ libdir+"lgpl-utils-1.0-b1-dev.jar"+File.pathSeparator+ libdir+"cweb-extser-0.1-b2-dev.jar"+File.pathSeparator+ // ICU (unicode support). libdir+"icu4j-3_6.jar"+File.pathSeparator+ // test suites only! libdir+"junit-3.8.1.jar"+File.pathSeparator+ libdir+"cweb-junit-ext-1.1-b2-dev.jar"+File.pathSeparator+ // main bigdata JAR. //libdir+ "bigdata.jar" ; static serviceDescriptors = new ServiceDescriptor[] { new NonActivatableServiceDescriptor( codebase, policy, classpath, "com.bigdata.service.DataServer", new String[] { "src/test/com/bigdata/service/TestServer.config" }) }; } Index: TestServer.config =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/service/TestServer.config,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** TestServer.config 22 Mar 2007 21:11:24 -0000 1.1 --- TestServer.config 23 Mar 2007 18:42:47 -0000 1.2 *************** *** 1,3 **** ! import java.io.*; import net.jini.jeri.BasicILFactory; --- 1,3 ---- ! import java.io.File; import net.jini.jeri.BasicILFactory; *************** *** 5,10 **** import net.jini.jeri.tcp.TcpServerEndpoint; - import net.jini.core.discovery.LookupLocator; import net.jini.discovery.LookupDiscovery; import net.jini.core.entry.Entry; import net.jini.lookup.entry.Name; --- 5,10 ---- import net.jini.jeri.tcp.TcpServerEndpoint; import net.jini.discovery.LookupDiscovery; + import net.jini.core.discovery.LookupLocator; import net.jini.core.entry.Entry; import net.jini.lookup.entry.Name; *************** *** 14,23 **** import net.jini.lookup.entry.ServiceInfo; - import java.io.File; - import com.sun.jini.config.ConfigUtil; - - import com.sun.jini.start.ServiceDescriptor; - import com.sun.jini.start.NonActivatableServiceDescriptor; - /* * Declares how the service will provision itself. --- 14,17 ---- *************** *** 34,79 **** exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(0), new BasicILFactory()); ! ! /* ! * Code base for downloadable code exposed by this service. ! */ ! private static codebase = ! "http://proto.cognitiveweb.org/maven-repository/bigdata/jars/"; ! ! /* @todo restrict the policy to what is actually required by the service. ! * Among other things, we only need access to a temporary directory and ! * to the directory in which the journals and index segments will be ! * stored, not general read/write on the disk. ! */ ! private static policy = "policy.all"; ! ! /* ! * The directory containing the various JARs. ! */ ! private static libdir = "ant-deploy"+File.separator; ! ! /* ! * Declare dependencies for the server here. ! */ ! private static classpath = ! // jini ! libdir+"reggie.jar"+File.pathSeparator+ ! libdir+"jini-core.jar"+File.pathSeparator+ ! libdir+"jini-ext.jar"+File.pathSeparator+ ! libdir+"sun-util.jar"+File.pathSeparator+ ! // utility JARs. ! libdir+"log4j-1.2.8.jar"+File.pathSeparator+ ! libdir+"ctc_utils-5-4-2005.jar"+File.pathSeparator+ ! libdir+"lgpl-utils-1.0-b1-dev.jar"+File.pathSeparator+ ! libdir+"cweb-extser-0.1-b2-dev.jar"+File.pathSeparator+ ! // ICU (unicode support). ! libdir+"icu4j-3_6.jar"+File.pathSeparator+ ! // test suites only! ! libdir+"junit-3.8.1.jar"+File.pathSeparator+ ! libdir+"cweb-junit-ext-1.1-b2-dev.jar"+File.pathSeparator+ ! // main bigdata JAR. ! libdir+"bigdata.jar" ! ; ! /* * The name of the property file containing the configuration information for --- 28,32 ---- exporter = new BasicJeriExporter(TcpServerEndpoint.getInstance(0), new BasicILFactory()); ! /* * The name of the property file containing the configuration information for *************** *** 82,91 **** propertyFile = new File("src/test/com/bigdata/service/TestService.properties"); ! static serviceDescriptors = new ServiceDescriptor[] { ! new NonActivatableServiceDescriptor( ! codebase, policy, classpath, ! "config.FileClassifierServerConfig", ! new String[] { propertyFile}) ! }; } --- 35,42 ---- propertyFile = new File("src/test/com/bigdata/service/TestService.properties"); ! /* ! * The file on which the serviceID will be written. ! */ ! serviceIdFile = new File("TestService.id"); } *************** *** 131,138 **** }; - /* - * The file on which the serviceID will be written. - */ - serviceIdFile = new File("TestService.id"); - } --- 82,84 ---- Index: TestServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/service/TestServer.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** TestServer.java 22 Mar 2007 21:11:24 -0000 1.1 --- TestServer.java 23 Mar 2007 18:42:47 -0000 1.2 *************** *** 56,62 **** --- 56,64 ---- import java.io.InputStream; import java.rmi.Remote; + import java.rmi.RemoteException; import java.rmi.server.ExportException; import java.util.Properties; + import net.jini.admin.Administrable; import net.jini.admin.JoinAdmin; import net.jini.config.Configuration; *************** *** 74,77 **** --- 76,80 ---- import net.jini.lease.LeaseRenewalEvent; import net.jini.lease.LeaseRenewalManager; + import net.jini.lookup.DiscoveryAdmin; import net.jini.lookup.JoinManager; import net.jini.lookup.ServiceIDListener; *************** *** 80,83 **** --- 83,88 ---- import com.bigdata.journal.Journal; + import com.sun.jini.admin.DestroyAdmin; + import com.sun.jini.admin.StorageLocationAdmin; import com.sun.jini.start.ServiceStarter; *************** *** 96,100 **** * * <pre> ! * java -Djava.security.policy=policy.all -classpath ant-deploy\reggie.jar;ant-deploy\jini-core.jar;ant-deploy\jini-ext.jar;ant-deploy\sun-util.jar;ant-deploy\bigdata.jar -jar ant-deploy\start.jar src\test\org\CognitiveWeb\bigdata\jini\TestServer.config * </pre> * --- 101,105 ---- * * <pre> ! * java -Djava.security.policy=policy.all -classpath ant-deploy\reggie.jar;ant-deploy\jini-core.jar;ant-deploy\jini-ext.jar;ant-deploy\sun-util.jar;ant-deploy\bigdata.jar -jar ant-deploy\start.jar src\test\org\CognitiveWeb\bigdata\jini\TestServer.config * </pre> * *************** *** 102,105 **** --- 107,123 ---- * transfer). Research how heavy mashalling is and what options exist to * make it faster and lighter. + * + * @todo Support JASS. + * + * @todo {@link JoinAdmin} is not actually implemented (it is declared as a work + * around for {@link DestroyAdmin} which otherwise crashes the Jini + * ServiceBrowser with a NPE when you try to destroy the service). + * Consider using Wonderly's <code>startNow</code> PersistentJiniService + * as a base rather than rolling our own. That might be more robust -- see + * {@link TestPersistentJiniService}. + * + * @todo consider {@link StorageLocationAdmin} + * + * @todo consider {@link DiscoveryAdmin} */ public class TestServer implements LeaseListener, ServiceIDListener *************** *** 116,120 **** private Exporter exporter; private ITestService proxy; ! private File serviceIdFile = null; /** --- 134,138 ---- private Exporter exporter; private ITestService proxy; ! protected File serviceIdFile = null; /** *************** *** 158,162 **** Entry[].class, null/* default */); ! serviceIdFile = (File) config.getEntry(ADVERT_LABEL, "serviceIdFile", File.class); // default --- 176,192 ---- Entry[].class, null/* default */); ! /* ! * Extract how the service will provision itself from the ! * Configuration. ! */ ! ! // use the configuration to construct an exporter ! exporter = (Exporter) config.getEntry(// ! SERVICE_LABEL, // component ! "exporter", // name ! Exporter.class // type (of the return object) ! ); ! ! serviceIdFile = (File) config.getEntry(SERVICE_LABEL, "serviceIdFile", File.class); // default *************** *** 183,198 **** /* - * Extract how the service will provision itself from the - * Configuration. - */ - - // use the configuration to construct an exporter - exporter = (Exporter) config.getEntry(// - SERVICE_LABEL, // component - "exporter", // name - Exporter.class // type (of the return object) - ); - - /* * Access the properties file used to configure the service. */ --- 213,216 ---- *************** *** 221,225 **** // create the service object. ! impl = new TestServiceImpl(properties); // export a proxy object for this service instance. --- 239,243 ---- // create the service object. ! impl = new TestServiceImpl(this,properties); // export a proxy object for this service instance. *************** *** 447,450 **** --- 465,480 ---- log.warn(ex); } + testServer.shutdownNow(); + // /* + // * Note: The reference to the service instance here forces a hard + // * reference to remain for the test server. If you comment out this log + // * statement, then you need to do something else to hold onto the hard + // * reference. + // */ + // log.info("Server will die: "+testServer); + } + + void shutdownNow() { + /* * Terminate manager threads. *************** *** 452,457 **** try { log.info("Terminating manager threads."); ! testServer.joinManager.terminate(); ! testServer.discoveryManager.terminate(); } catch (Exception ex) { log.error("Could not terminate: "+ex, ex); --- 482,487 ---- try { log.info("Terminating manager threads."); ! joinManager.terminate(); ! discoveryManager.terminate(); } catch (Exception ex) { log.error("Could not terminate: "+ex, ex); *************** *** 464,476 **** */ log.info("Unexporting the service proxy."); ! testServer.unexport(true); - // /* - // * Note: The reference to the service instance here forces a hard - // * reference to remain for the test server. If you comment out this log - // * statement, then you need to do something else to hold onto the hard - // * reference. - // */ - // log.info("Server will die: "+testServer); } --- 494,499 ---- */ log.info("Unexporting the service proxy."); ! unexport(true); } *************** *** 564,569 **** * </a> */ ! public static class TestServiceImpl implements ITestService { /** --- 587,601 ---- * </a> */ ! public static class TestServiceImpl implements ITestService, RemoteAdministrable, RemoteDestroyAdmin, RemoteJoinAdmin { + + private TestServer server; + private Journal journal; + + public Journal getJournal() { + + return journal; + + } /** *************** *** 572,580 **** * @param properties */ ! public TestServiceImpl(Properties properties) { log.info("Created: " + this ); ! new Journal(properties); } --- 604,614 ---- * @param properties */ ! public TestServiceImpl(TestServer server,Properties properties) { log.info("Created: " + this ); ! journal = new Journal(properties); ! ! this.server = server; } *************** *** 586,589 **** --- 620,805 ---- } + public Object getAdmin() throws RemoteException { + + log.info(""); + + return server.proxy; + + } + + /* + * DestroyAdmin + */ + + /** + * Destroy the service (including its persistent state). + * + * @throws RemoteException + */ + public void destroy() throws RemoteException { + + log.info(""); + + boolean destroyEnabled = true; + + if (destroyEnabled) { + + new Thread() { + + public void run() { + + server.shutdownNow(); + + try { + + getJournal().closeAndDelete(); + + log.info("Journal deleted."); + + } catch (Throwable t) { + + log.warn("Could not delete persistent state: " + t, + t); + + } + + if (!server.serviceIdFile.delete()) { + + log.warn("Could not delete file: " + + server.serviceIdFile); + + } + + try { + Thread.sleep(3); + } catch (InterruptedException ex) { + } + + log.info("Service Stop requested"); + + System.exit(1); + + } + + }.start(); + } else { + throw new RemoteException( + "Service Destroy Not Enabled, Operation Ignored"); + } + + } + + /* + * JoinAdmin + */ + + public void addLookupAttributes(Entry[] arg0) throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + + } + + public void addLookupGroups(String[] arg0) throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + + } + + public void addLookupLocators(LookupLocator[] arg0) throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + + } + + public Entry[] getLookupAttributes() throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + return null; + } + + public String[] getLookupGroups() throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + return null; + } + + public LookupLocator[] getLookupLocators() throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + return null; + } + + public void modifyLookupAttributes(Entry[] arg0, Entry[] arg1) throws RemoteException { + + log.info(""); + + // TODO Auto-generated method stub + + } + + public void removeLookupGroups(String[] arg0) throws RemoteException { + log.info(""); + + // TODO Auto-generated method stub + + } + + public void removeLookupLocators(LookupLocator[] arg0) throws RemoteException { + log.info(""); + + // TODO Auto-generated method stub + + } + + public void setLookupGroups(String[] arg0) throws RemoteException { + log.info(""); + + // TODO Auto-generated method stub + + } + + public void setLookupLocators(LookupLocator[] arg0) throws RemoteException { + log.info(""); + + // TODO Auto-generated method stub + + } + + } + + /* + * Note: You need to extend Remote in order for these APIs to be exported! + */ + + public static interface RemoteAdministrable extends Remote, Administrable { + + } + + public static interface RemoteDestroyAdmin extends Remote, DestroyAdmin { + + } + + public static interface RemoteJoinAdmin extends Remote, JoinAdmin { + + } + + public static interface RemoteDiscoveryAdmin extends Remote, DiscoveryAdmin { + + } + + public static interface RemoteStorageLocationAdmin extends Remote, StorageLocationAdmin { + } Index: ITestService.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/test/com/bigdata/service/ITestService.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** ITestService.java 22 Mar 2007 21:11:24 -0000 1.1 --- ITestService.java 23 Mar 2007 18:42:47 -0000 1.2 *************** *** 52,55 **** --- 52,59 ---- import java.rmi.RemoteException; + import com.sun.jini.admin.DestroyAdmin; + + import net.jini.admin.Administrable; + /** * The public interface for a test service. *************** *** 65,69 **** * @download */ ! public interface ITestService extends Remote { --- 69,73 ---- * @download */ ! public interface ITestService extends Remote { |
From: Bryan T. <tho...@us...> - 2007-03-23 18:42:53
|
Update of /cvsroot/cweb/bigdata/LEGAL In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv32506/LEGAL Added Files: startnow-license.txt Log Message: Working on service admin and service starter. --- NEW FILE: startnow-license.txt --- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. |
From: Bryan T. <tho...@us...> - 2007-03-23 18:42:53
|
Update of /cvsroot/cweb/bigdata/src/java/com/bigdata/service In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv32506/src/java/com/bigdata/service Modified Files: AbstractServer.java DataServer.java Log Message: Working on service admin and service starter. Index: DataServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/DataServer.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** DataServer.java 22 Mar 2007 21:11:23 -0000 1.1 --- DataServer.java 23 Mar 2007 18:42:45 -0000 1.2 *************** *** 49,54 **** --- 49,57 ---- import java.rmi.Remote; + import java.rmi.RemoteException; import java.util.Properties; + import com.sun.jini.start.LifeCycle; + /** * The bigdata data server. *************** *** 67,70 **** --- 70,79 ---- } + + public DataServer(String[] args, LifeCycle lifeCycle) { + + super( args, lifeCycle ); + + } public static void main(String[] args) { *************** *** 76,80 **** protected Remote newService(Properties properties) { ! return new DataService(properties); } --- 85,271 ---- protected Remote newService(Properties properties) { ! return new AdministrableDataService(this,properties); ! ! } ! ! /** ! * Adds jini administration interfaces to the basic {@link DataService}. ! * ! * @author <a href="mailto:tho...@us...">Bryan Thompson</a> ! * @version $Id$ ! */ ! public static class AdministrableDataService extends DataService implements ! RemoteAdministrable, RemoteDestroyAdmin { ! ! protected AbstractServer server; ! ! public AdministrableDataService(AbstractServer server,Properties properties) { ! ! super(properties); ! ! this.server = server; ! ! } ! ! public Object getAdmin() throws RemoteException { ! ! log.info(""); ! ! return server.proxy; ! ! } ! ! /* ! * DestroyAdmin ! */ ! ! /** ! * Destroy the service and deletes any files containing resources (<em>application data</em>) ! * that was in use by that service. ! * ! * @throws RemoteException ! */ ! public void destroy() throws RemoteException { ! ! log.info(""); ! ! boolean destroyEnabled = true; ! ! if (destroyEnabled) { ! ! new Thread() { ! ! public void run() { ! ! server.shutdownNow(); ! ! try { ! ! journal.closeAndDelete(); ! ! log.info("Journal deleted."); ! ! } catch (Throwable t) { ! ! log.warn("Could not delete persistent state: " + t, ! t); ! ! } ! ! if (!server.serviceIdFile.delete()) { ! ! log.warn("Could not delete file: " ! + server.serviceIdFile); ! ! } ! ! try { ! Thread.sleep(3); ! } catch (InterruptedException ex) { ! } ! ! log.info("Service Stop requested"); ! ! System.exit(1); ! ! } ! ! }.start(); ! ! } else { ! ! throw new RemoteException( ! "Service Destroy Not Enabled, Operation Ignored"); ! } ! ! } ! ! // /* ! // * JoinAdmin ! // */ ! // ! // public void addLookupAttributes(Entry[] arg0) throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void addLookupGroups(String[] arg0) throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void addLookupLocators(LookupLocator[] arg0) throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public Entry[] getLookupAttributes() throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // return null; ! // } ! // ! // public String[] getLookupGroups() throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // return null; ! // } ! // ! // public LookupLocator[] getLookupLocators() throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // return null; ! // } ! // ! // public void modifyLookupAttributes(Entry[] arg0, Entry[] arg1) throws RemoteException { ! // ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void removeLookupGroups(String[] arg0) throws RemoteException { ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void removeLookupLocators(LookupLocator[] arg0) throws RemoteException { ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void setLookupGroups(String[] arg0) throws RemoteException { ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } ! // ! // public void setLookupLocators(LookupLocator[] arg0) throws RemoteException { ! // log.info(""); ! // ! // // TODO Auto-generated method stub ! // ! // } } Index: AbstractServer.java =================================================================== RCS file: /cvsroot/cweb/bigdata/src/java/com/bigdata/service/AbstractServer.java,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -d -r1.1 -r1.2 *** AbstractServer.java 22 Mar 2007 21:11:23 -0000 1.1 --- AbstractServer.java 23 Mar 2007 18:42:45 -0000 1.2 *************** *** 60,63 **** --- 60,64 ---- import java.util.Properties; + import net.jini.admin.Administrable; import net.jini.admin.JoinAdmin; import net.jini.config.Configuration; *************** *** 74,77 **** --- 75,79 ---- import net.jini.lease.LeaseRenewalEvent; import net.jini.lease.LeaseRenewalManager; + import net.jini.lookup.DiscoveryAdmin; import net.jini.lookup.JoinManager; import net.jini.lookup.ServiceIDListener; *************** *** 79,97 **** import org.apache.log4j.Logger; ! import sun.misc.Signal; ! import sun.misc.SignalHandler; /** * Abstract base class for configurable services discoverable using JINI. ! * Services are started using a <code>main</code> routine: * <pre> ! public static void main(String[] args) { ! ! new MyServer(args).run(); ! ! } ! * </pre> ! * ! * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ --- 81,120 ---- import org.apache.log4j.Logger; ! import com.sun.jini.admin.DestroyAdmin; ! import com.sun.jini.admin.StorageLocationAdmin; ! import com.sun.jini.start.LifeCycle; ! import com.sun.jini.start.NonActivatableServiceDescriptor; ! import com.sun.jini.start.ServiceDescriptor; ! import com.sun.jini.start.ServiceStarter; /** * Abstract base class for configurable services discoverable using JINI. ! * <p> ! * The recommended way to start a server is using the {@link ServiceStarter}. ! * <p> ! * The server MAY be started using a <code>main</code> routine: ! * * <pre> ! * public static void main(String[] args) { ! * ! * new MyServer(args).run(); ! * ! * } ! * </pre> ! * ! * The service may be <em>terminated</em> by terminating the server process. ! * <p> ! * Services are <em>destroyed</em> using {@link DestroyAdmin}, e.g., through ! * the Jini service browser. Note that this tends to imply that all persistent ! * data associated with that service is also destroyed! ! * ! * @see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6380355, which ! * describes a bug in the service browser that will display a ! * "NullPointerException" dialog box if you destroy a service which ! * implements {@link DestroyAdmin} but not {@link JoinAdmin}. ! * ! * @see http://java.sun.com/products/jini/2.0/doc/api/com/sun/jini/start/ServiceStarter.html ! * for documentation on how to use the ServiceStarter. ! * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ *************** *** 102,106 **** public static final transient Logger log = Logger .getLogger(AbstractServer.class); ! private ServiceID serviceID; private DiscoveryManagement discoveryManager; --- 125,141 ---- public static final transient Logger log = Logger .getLogger(AbstractServer.class); ! ! /** ! * The label in the {@link Configuration} file for the service ! * description. ! */ ! protected final static transient String SERVICE_LABEL = "ServiceDescription"; ! ! /** ! * The label in the {@link Configuration} file for the service advertisment ! * data. ! */ ! protected final static transient String ADVERT_LABEL = "AdvertDescription"; ! private ServiceID serviceID; private DiscoveryManagement discoveryManager; *************** *** 110,114 **** * The file where the {@link ServiceID} will be written/read. */ ! private File serviceIdFile; /** * Responsible for exporting a proxy for the service. --- 145,149 ---- * The file where the {@link ServiceID} will be written/read. */ ! protected File serviceIdFile; /** * Responsible for exporting a proxy for the service. *************** *** 122,128 **** * The exported proxy for the service implementation object. */ ! private Remote proxy; /** * Server startup reads {@link Configuration} data from the file(s) named by * <i>args</i>, starts the service, and advertises the service for --- 157,171 ---- * The exported proxy for the service implementation object. */ ! protected Remote proxy; /** + * The object used to inform the hosting environment that the server is + * unregistering (terminating). A fake object is used when the server is run + * from the command line, otherwise the object is supplied by the + * {@link NonActivatableServiceDescriptor}. + */ + private LifeCycle lifeCycle; + + /** * Server startup reads {@link Configuration} data from the file(s) named by * <i>args</i>, starts the service, and advertises the service for *************** *** 135,142 **** protected AbstractServer(String[] args) { ! final String SERVICE_LABEL = "ServiceDescription"; ! final String ADVERT_LABEL = "AdvertDescription"; Entry[] entries = null; LookupLocator[] unicastLocators = null; --- 178,205 ---- protected AbstractServer(String[] args) { ! this( args, new FakeLifeCycle() ); ! ! } ! /** ! * Server startup invoked by the ServerStarter ! * ! * @param args ! * Arguments from the {@link ServiceDescriptor}. ! * @param lifeCycle ! * The life cycle object. ! * ! * @see NonActivatableServiceDescriptor ! */ ! protected AbstractServer(String[] args, LifeCycle lifeCycle ) { ! ! if (lifeCycle == null) ! throw new IllegalArgumentException(); ! ! this.lifeCycle = lifeCycle; + // @todo verify that this belongs here. + System.setSecurityManager(new SecurityManager()); + Entry[] entries = null; LookupLocator[] unicastLocators = null; *************** *** 148,154 **** /* ! * Extract how the service will perform service discovery. */ groups = (String[]) config.getEntry(ADVERT_LABEL, "groups", String[].class, LookupDiscovery.ALL_GROUPS/* default */); --- 211,221 ---- /* ! * Extract how the service will advertise itself from the ! * Configuration. */ + entries = (Entry[]) config.getEntry(ADVERT_LABEL, "entries", + Entry[].class, null/* default */); + groups = (String[]) config.getEntry(ADVERT_LABEL, "groups", String[].class, LookupDiscovery.ALL_GROUPS/* default */); *************** *** 159,170 **** /* ! * Extract how the service will advertise itself from the * Configuration. */ ! entries = (Entry[]) config.getEntry(ADVERT_LABEL, "entries", ! Entry[].class, null/* default */); ! serviceIdFile = (File) config.getEntry(ADVERT_LABEL, "serviceIdFile", File.class); // default --- 226,242 ---- /* ! * Extract how the service will provision itself from the * Configuration. */ ! // The exporter used to expose the service proxy. ! exporter = (Exporter) config.getEntry(// ! SERVICE_LABEL, // component ! "exporter", // name ! Exporter.class // type (of the return object) ! ); ! // The file on which the ServiceID will be written. ! serviceIdFile = (File) config.getEntry(SERVICE_LABEL, "serviceIdFile", File.class); // default *************** *** 189,208 **** } - - /* - * Extract how the service will provision itself from the - * Configuration. - */ ! // use the configuration to construct an exporter ! exporter = (Exporter) config.getEntry(// ! SERVICE_LABEL, // component ! "exporter", // name ! Exporter.class // type (of the return object) ! ); ! ! /* ! * Access the properties file used to configure the service. ! */ File propertyFile = (File) config.getEntry(SERVICE_LABEL, --- 261,266 ---- } ! // The properties file used to configure the service. File propertyFile = (File) config.getEntry(SERVICE_LABEL, *************** *** 408,465 **** /** - * Run the server (this should be invoked from <code>main</code>. - * - * FIXME work through the install a signal handler that will shutdown the - * service politely when it is invoked. Do we need -Xrs on the command - * line for this to work? Which signals should be trapped? Does this - * vary by OS? - * - * SIGINT Interactive attention (CTRL-C). JVM will exit normally. Yes <br> - * SIGTERM Termination request. JVM will exit normally. Yes <br> - * SIGHUP Hang up. JVM will exit normally. Yes - * - * @see http://www-128.ibm.com/developerworks/java/library/i-signalhandling/ - */ - protected void run() { - - log.info("Started server."); - - /* - * Install signal handlers. - */ - - ServerShutdownSignalHandler.install("SIGINT",this); - - // ServerShutdownSignalHandler.install("SIGTERM",this); - - /* - * Wait until the server is terminated. - */ - - Object keepAlive = new Object(); - - synchronized (keepAlive) { - - try { - - keepAlive.wait(); - - } catch (InterruptedException ex) { - - log.info(""+ex); - - } - - } - - } - - /** * Shutdown the server taking time only to unregister it from jini. - * - * @todo make this extensible? provide for normal shutdown vs this? support - * the jini Admin interface. */ ! private void shutdownNow() { /* --- 466,472 ---- /** * Shutdown the server taking time only to unregister it from jini. */ ! public void shutdownNow() { /* *************** *** 470,473 **** --- 477,485 ---- log.info("Terminating manager threads."); + + /* + * Hand-shaking with the NonActivableServiceDescriptor. + */ + lifeCycle.unregister(this); joinManager.terminate(); *************** *** 495,566 **** /** ! * Signal handler shuts down the server politely. ! * ! * @author <a href="mailto:tho...@us...">Bryan Thompson</a> ! * @version $Id$ */ ! static class ServerShutdownSignalHandler implements SignalHandler { ! private final AbstractServer server; ! private SignalHandler oldHandler; ! protected ServerShutdownSignalHandler(AbstractServer server) { ! if(server == null) throw new IllegalArgumentException(); ! this.server = server; } ! /** ! * Install the signal handler. ! */ ! public static SignalHandler install(String signalName, ! AbstractServer server) { ! ! Signal signal = new Signal(signalName); ! ! ServerShutdownSignalHandler newHandler = new ServerShutdownSignalHandler( ! server); ! ! newHandler.oldHandler = Signal.handle(signal, newHandler); ! ! log.info("Installed handler: " + signal + ", oldHandler=" ! + newHandler.oldHandler); ! ! return newHandler; } ! public void handle(Signal sig) { - log.warn("Signal: "+sig); - - /* - * Handle signal. - */ server.shutdownNow(); - try { - - // Chain back to previous handler, if one exists - if ( oldHandler != SIG_DFL && oldHandler != SIG_IGN ) { - - oldHandler.handle(sig); - - } - - } catch (Exception ex) { - - log.fatal("Signal handler failed, reason "+ex); - - System.exit(1); - - } - } } /** --- 507,683 ---- /** ! * Run the server (this should be invoked from <code>main</code>. You can ! * stop the server using ^C (Windows) and possibly <code>kill</code> (Un*x). ! * You can record the PID of the process running the server when you start ! * it under Un*x using a shell script. ! * <p> ! * Note: If you want to DESTROY a service (and its state), then you can do ! * that from the Jini Service Browser. */ ! protected void run() { ! log.info("Started server."); ! ! /* ! * Note: I have found the Runtime shutdown hook to be much more robust ! * than attempting to install a signal handler. ! */ ! // /* ! // * Install signal handlers. ! // * SIGINT Interactive attention (CTRL-C). JVM will exit normally. <br> ! // * SIGTERM Termination request. JVM will exit normally. <br> ! // * SIGHUP Hang up. JVM will exit normally.<br> ! // * ! // * @see http://www-128.ibm.com/developerworks/java/library/i-signalhandling/ ! // * ! // * @see http://forum.java.sun.com/thread.jspa?threadID=514860&messageID=2451429 ! // * for the use of {@link Runtime#addShutdownHook(Thread)}. ! // * ! // */ ! // ! // try { ! // ServerShutdownSignalHandler.install("SIGINT",this); ! // } catch(IllegalArgumentException ex) { ! // log.info("Signal handled not installed: "+ex); ! // } ! // ! // try { ! // ServerShutdownSignalHandler.install("SIGTERM",this); ! // } catch(IllegalArgumentException ex) { ! // log.info("Signal handled not installed: "+ex); ! // } ! // ! // try { ! // ServerShutdownSignalHandler.install("SIGHUP",this); ! // } catch(IllegalArgumentException ex) { ! // log.info("Signal handled not installed: "+ex); ! // } ! /* ! * The runtime shutdown hook appears to be a bit more robust. ! */ ! Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); ! /* ! * Wait until the server is terminated. ! */ ! ! Object keepAlive = new Object(); ! ! synchronized (keepAlive) { ! try { ! ! keepAlive.wait(); ! ! } catch (InterruptedException ex) { ! ! log.info(""+ex); ! ! } } + + } ! /** ! * Runs {@link AbstractServer#shutdownNow()}. ! * ! * @author <a href="mailto:tho...@us...">Bryan Thompson</a> ! * @version $Id$ ! */ ! static class ShutdownThread extends Thread { ! ! final AbstractServer server; ! ! public ShutdownThread(AbstractServer server) { ! ! if (server == null) ! throw new IllegalArgumentException(); ! ! this.server = server; } + + public void run() { ! log.info("Runing shutdown."); server.shutdownNow(); } } + + // /** + // * Signal handler shuts down the server politely. + // * + // * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + // * @version $Id$ + // */ + // static class ServerShutdownSignalHandler implements SignalHandler { + // + // private final AbstractServer server; + // + // private SignalHandler oldHandler; + // + // protected ServerShutdownSignalHandler(AbstractServer server) { + // + // if(server == null) throw new IllegalArgumentException(); + // + // this.server = server; + // + // } + // + // /** + // * Install the signal handler. + // */ + // public static SignalHandler install(String signalName, + // AbstractServer server) { + // + // Signal signal = new Signal(signalName); + // + // ServerShutdownSignalHandler newHandler = new ServerShutdownSignalHandler( + // server); + // + // newHandler.oldHandler = Signal.handle(signal, newHandler); + // + // log.info("Installed handler: " + signal + ", oldHandler=" + // + newHandler.oldHandler); + // + // return newHandler; + // + // } + // + // public void handle(Signal sig) { + // + // log.warn("Processing signal: "+sig); + // + // /* + // * Handle signal. + // */ + // server.shutdownNow(); + // + // try { + // + // // Chain back to previous handler, if one exists + // if ( oldHandler != SIG_DFL && oldHandler != SIG_IGN ) { + // + // oldHandler.handle(sig); + // + // } + // + // } catch (Exception ex) { + // + // log.fatal("Signal handler failed, reason "+ex); + // + // System.exit(1); + // + // } + // + // } + // + // } /** *************** *** 621,623 **** --- 738,777 ---- // } + + /* + * Note: You need to extend Remote in order for these APIs to be exported! + */ + + public static interface RemoteAdministrable extends Remote, Administrable { + + } + + public static interface RemoteDestroyAdmin extends Remote, DestroyAdmin { + + } + + public static interface RemoteJoinAdmin extends Remote, JoinAdmin { + + } + + public static interface RemoteDiscoveryAdmin extends Remote, DiscoveryAdmin { + + } + + public static interface RemoteStorageLocationAdmin extends Remote, StorageLocationAdmin { + + } + + private static class FakeLifeCycle implements LifeCycle { + + public boolean unregister(Object arg0) { + + log.info(""); + + return true; + + } + + } + } |
From: Bryan T. <tho...@us...> - 2007-03-23 18:42:53
|
Update of /cvsroot/cweb/bigdata/lib In directory sc8-pr-cvs4.sourceforge.net:/tmp/cvs-serv32506/lib Added Files: startnow.jar Log Message: Working on service admin and service starter. --- NEW FILE: startnow.jar --- (This appears to be a binary file; contents omitted.) |