From: <tho...@us...> - 2010-07-27 18:37:05
|
Revision: 3318 http://bigdata.svn.sourceforge.net/bigdata/?rev=3318&view=rev Author: thompsonbry Date: 2010-07-27 18:36:58 +0000 (Tue, 27 Jul 2010) Log Message: ----------- Modified the Checkpoint record to use fixed length fields in the record in order to better support unversioned updates. Visited more of the tuple serializers to make sure that they are also versioned. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/Checkpoint.java trunk/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java trunk/bigdata/src/java/com/bigdata/btree/NOPTupleSerializer.java trunk/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java trunk/bigdata/src/java/com/bigdata/journal/Name2Addr.java trunk/bigdata/src/java/com/bigdata/mdi/MetadataIndex.java trunk/bigdata/src/java/com/bigdata/resources/JournalIndex.java trunk/bigdata/src/java/com/bigdata/service/CommitTimeIndex.java trunk/bigdata/src/java/com/bigdata/sparse/TPSTupleSerializer.java trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java trunk/bigdata/src/test/com/bigdata/journal/TestAll.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermTupleSerializer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationTupleSerializer.java Modified: trunk/bigdata/src/java/com/bigdata/btree/Checkpoint.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/btree/Checkpoint.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -6,8 +6,6 @@ import java.io.ObjectOutput; import java.nio.ByteBuffer; -import org.CognitiveWeb.extser.LongPacker; - import com.bigdata.io.SerializerUtil; import com.bigdata.rawstore.IRawStore; @@ -316,19 +314,24 @@ this.counter = counter; } - + /** * Initial serialization version. + * <p> + * Note: The fields of the {@link Checkpoint} record use fixed length + * representations in order to support the possibility that we might do an + * in place update of a {@link Checkpoint} record as part of a data + * migration strategy. For the same reason, the {@link Checkpoint} record + * includes some unused fields. Those fields are available for future + * version changes without requiring us to change the length of the + * {@link Checkpoint} record. */ private static transient final int VERSION0 = 0x0; - + /** - * This serialization version adds the field recording the address of the - * optional bloom filter. That address defaults to zero (0L) for earlier - * versions, indicating that no bloom filter is stored for the - * {@link Checkpoint}. + * The current version. */ - private static transient final int VERSION1 = 0x1; + private static transient final int VERSION = VERSION0; /** * Write the {@link Checkpoint} record on the store, setting @@ -381,58 +384,57 @@ public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - final int version = (int) LongPacker.unpackLong(in); + final int version = in.readInt(); - if (version != VERSION0 && version != VERSION1) + if (version != VERSION0) throw new IOException("Unknown version: " + version); this.addrMetadata = in.readLong(); this.addrRoot = in.readLong(); - if (version == VERSION1) { - - this.addrBloomFilter = in.readLong(); - - } else { - - this.addrBloomFilter = 0L; - - } + this.addrBloomFilter = in.readLong(); - this.height = (int) LongPacker.unpackLong(in); + this.height = in.readInt(); - this.nnodes = (int) LongPacker.unpackLong(in); + this.nnodes = in.readInt(); - this.nleaves = (int) LongPacker.unpackLong(in); + this.nleaves = in.readInt(); - this.nentries = (int) LongPacker.unpackLong(in); + this.nentries = in.readInt(); - this.counter = LongPacker.unpackLong(in); + this.counter = in.readLong(); + in.readLong(); // unused. + + in.readLong(); // unused. + } public void writeExternal(final ObjectOutput out) throws IOException { - LongPacker.packLong(out, VERSION1); + out.writeInt(VERSION); out.writeLong(addrMetadata); out.writeLong(addrRoot); - // Note: added in VERSION1. out.writeLong(addrBloomFilter); - LongPacker.packLong(out, height); + out.writeInt(height); - LongPacker.packLong(out, nnodes); + out.writeInt(nnodes); - LongPacker.packLong(out, nleaves); + out.writeInt(nleaves); - LongPacker.packLong(out, nentries); + out.writeInt(nentries); - LongPacker.packLong(out, counter); + out.writeLong(counter); + + out.writeLong(0L/*unused*/); + out.writeLong(0L/*unused*/); + } } Modified: trunk/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/btree/DefaultTupleSerializer.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -314,17 +314,17 @@ * Note: Explicit versioning for the {@link DefaultTupleSerializer} was * introduced with inlining of datatype literals for the RDF database. */ - private final static transient int VERSION0 = 0; + private final static transient byte VERSION0 = 0; /** * The current version. */ - private final static transient int VERSION = VERSION0; + private final static transient byte VERSION = VERSION0; public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { - final short version = in.readShort(); + final byte version = in.readByte(); switch (version) { case VERSION0: delegateKeyBuilderFactory = (IKeyBuilderFactory) in.readObject(); @@ -342,7 +342,7 @@ public void writeExternal(final ObjectOutput out) throws IOException { - out.writeShort(VERSION); + out.writeByte(VERSION); out.writeObject(delegateKeyBuilderFactory); Modified: trunk/bigdata/src/java/com/bigdata/btree/NOPTupleSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/NOPTupleSerializer.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/btree/NOPTupleSerializer.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -132,16 +132,39 @@ } - public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + super.readExternal(in); + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + } - public void writeExternal(ObjectOutput out) throws IOException { + public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); + out.writeByte(VERSION); + } } Modified: trunk/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/journal/CommitRecordIndex.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -24,6 +24,8 @@ package com.bigdata.journal; import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.UUID; import com.bigdata.btree.BTree; @@ -579,7 +581,7 @@ /** * */ - private static final long serialVersionUID = -4410874047750277697L; + private static final long serialVersionUID = 1; /** * Used to (de-)serialize {@link Entry}s (NOT thread-safe). @@ -657,6 +659,41 @@ } - } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + + } // CommitRecordIndexTupleSerializer + } Modified: trunk/bigdata/src/java/com/bigdata/journal/Name2Addr.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/journal/Name2Addr.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -30,6 +30,8 @@ import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.lang.ref.WeakReference; import java.util.Arrays; import java.util.Iterator; @@ -1269,6 +1271,41 @@ } - } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + + } // Name2AddrTupleSerializer + } Modified: trunk/bigdata/src/java/com/bigdata/mdi/MetadataIndex.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/mdi/MetadataIndex.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/mdi/MetadataIndex.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -466,8 +466,43 @@ } - } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + + } // PartitionLocatorTupleSerializer + /** * Passes the notice along to the {@link #view}. It caches de-serialized * locators and needs to drop them from its cache if they become stale. Modified: trunk/bigdata/src/java/com/bigdata/resources/JournalIndex.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/JournalIndex.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/resources/JournalIndex.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -23,6 +23,9 @@ */ package com.bigdata.resources; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.UUID; import com.bigdata.btree.BTree; @@ -330,6 +333,41 @@ } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + } } Modified: trunk/bigdata/src/java/com/bigdata/service/CommitTimeIndex.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/CommitTimeIndex.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/service/CommitTimeIndex.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -1,5 +1,8 @@ package com.bigdata.service; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.UUID; import com.bigdata.btree.BTree; @@ -290,6 +293,41 @@ } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + } } Modified: trunk/bigdata/src/java/com/bigdata/sparse/TPSTupleSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/sparse/TPSTupleSerializer.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/java/com/bigdata/sparse/TPSTupleSerializer.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -1,5 +1,9 @@ package com.bigdata.sparse; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; + import com.bigdata.btree.BTree; import com.bigdata.btree.DefaultTupleSerializer; import com.bigdata.btree.ITuple; @@ -77,4 +81,39 @@ } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + } Modified: trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -28,6 +28,9 @@ package com.bigdata.btree; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; import java.util.NoSuchElementException; import java.util.Properties; import java.util.UUID; @@ -208,6 +211,41 @@ } - } + /** + * The initial version (no additional persistent state). + */ + private final static transient byte VERSION0 = 0; + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + + public void readExternal(final ObjectInput in) throws IOException, + ClassNotFoundException { + + super.readExternal(in); + + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + + } + + public void writeExternal(final ObjectOutput out) throws IOException { + + super.writeExternal(out); + + out.writeByte(VERSION); + + } + + } // StringSerializer + } Modified: trunk/bigdata/src/test/com/bigdata/journal/TestAll.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata/src/test/com/bigdata/journal/TestAll.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -108,7 +108,7 @@ // } - suite.addTest( TestDiskJournal.suite() ); +// suite.addTest( TestDiskJournal.suite() ); suite.addTest( TestWORMStrategy.suite() ); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermTupleSerializer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermTupleSerializer.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Id2TermTupleSerializer.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -211,13 +211,13 @@ * namespace:UTF * </pre> */ - static final transient short VERSION0 = 0; + private static final transient byte VERSION0 = 0; - private static final transient short VERSION = VERSION0; + private static final transient byte VERSION = VERSION0; public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); - final short version = in.readShort(); + final byte version = in.readByte(); final String namespace; final String valueFactoryClass; switch (version) { @@ -254,17 +254,9 @@ public void writeExternal(final ObjectOutput out) throws IOException { super.writeExternal(out); - final short version = VERSION; - final String valueFactoryClass = valueFactory.getClass().getName(); - out.writeShort(version); - switch (version) { - case VERSION0: - out.writeUTF(namespace); - out.writeUTF(valueFactoryClass); - break; - default: - throw new AssertionError(); - } + out.writeByte(VERSION); + out.writeUTF(namespace); + out.writeUTF(valueFactory.getClass().getName()); } } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationTupleSerializer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationTupleSerializer.java 2010-07-27 18:17:27 UTC (rev 3317) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/JustificationTupleSerializer.java 2010-07-27 18:36:58 UTC (rev 3318) @@ -154,11 +154,31 @@ } + /** + * The initial version. + */ + private final static transient byte VERSION0 = 0; + + /** + * The current version. + */ + private final static transient byte VERSION = VERSION0; + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); + final byte version = in.readByte(); + + switch (version) { + case VERSION0: + break; + default: + throw new UnsupportedOperationException("Unknown version: " + + version); + } + N = in.readByte(); } @@ -167,6 +187,8 @@ super.writeExternal(out); + out.writeByte(VERSION); + out.writeByte(N); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-27 21:15:56
|
Revision: 3326 http://bigdata.svn.sourceforge.net/bigdata/?rev=3326&view=rev Author: thompsonbry Date: 2010-07-27 21:15:49 +0000 (Tue, 27 Jul 2010) Log Message: ----------- Cleaned up IndexMetadata to remove support for IAddressSerializer and ISplitHandler. Those interfaces and their implementations are now gone. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java trunk/bigdata/src/java/com/bigdata/resources/ViewMetadata.java trunk/bigdata/src/java/com/bigdata/search/ReadIndexTask.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/benchmark/ThroughputMaster.java Removed Paths: ------------- trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java Deleted: trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -1,121 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Dec 26, 2006 - */ - -package com.bigdata.btree; - -import java.io.DataInput; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - -import com.bigdata.io.DataOutputBuffer; -import com.bigdata.rawstore.IAddressManager; - -/** - * Serializes each address as a long integer and does not attempt to pack or - * compress the addresses. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @deprecated This class is no longer used. The implementation exists solely to - * facilitate de-serialization of older {@link IndexMetadata} record - * versions. - */ -public class AddressSerializer implements IAddressSerializer, Externalizable { - - private static final long serialVersionUID = -1434032311796654357L; - -// public static final IAddressSerializer INSTANCE = new AddressSerializer(); - - /** - * De-serialization ctor. - */ - public AddressSerializer() { - } - - public void putChildAddresses(IAddressManager addressManager, DataOutputBuffer os, long[] childAddr, - int nchildren) throws IOException { - - throw new UnsupportedOperationException(); - -// for (int i = 0; i < nchildren; i++) { -// -// final long addr = childAddr[i]; -// -// /* -// * Children MUST have assigned persistent identity. -// */ -// if (addr == 0L) { -// -// throw new RuntimeException("Child is not persistent: index=" -// + i); -// -// } -// -// os.writeLong(addr); -// -// } - - } - - public void getChildAddresses(IAddressManager addressManager,DataInput is, long[] childAddr, - int nchildren) throws IOException { - - throw new UnsupportedOperationException(); - -// for (int i = 0; i < nchildren; i++) { -// -// final long addr = is.readLong(); -// -// if (addr == 0L) { -// -// throw new RuntimeException( -// "Child does not have persistent address: index=" + i); -// -// } -// -// childAddr[i] = addr; -// -// } - - } - - public void readExternal(ObjectInput arg0) throws IOException, ClassNotFoundException { - - // NOP (no state) - - } - - public void writeExternal(ObjectOutput arg0) throws IOException { - - // NOP (no state) - - } - -} Modified: trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -31,7 +31,6 @@ import java.io.Serializable; import com.bigdata.relation.accesspath.IAccessPath; -import com.bigdata.resources.DefaultSplitHandler; /** * An interface that is used to generate a bloom filter for an @@ -145,16 +144,12 @@ * scale-out indices DO NOT share the same limitation. Each time a scale-out * index is partitioned, it is broken into a mutable {@link BTree} for * absorbing writes for an index partition and zero or more - * {@link IndexSegment}s. The default configuration of the - * {@link DefaultSplitHandler} for a scale-out index caps the #of entries in - * an index partition at ~ 1.5M. However, many of those index entries are - * going to migrate to the {@link IndexSegment}s, so the #of index entries - * in the {@link BTree} is never that large. Finally, #of index entries in - * an {@link IndexSegment} is always known when the {@link IndexSegment} is + * {@link IndexSegment}s. Each time an overflow occurs, index entries are + * migrated to the {@link IndexSegment}s, so the #of index entries in the + * {@link BTree} is never that large. Finally, #of index entries in an + * {@link IndexSegment} is always known when the {@link IndexSegment} is * built, so the {@link BloomFilter} for an {@link IndexSegment} is always a * perfect fit. - * - * @see DefaultSplitHandler */ public static final transient BloomFilterFactory DEFAULT = new BloomFilterFactory( DEFAULT_N, DEFAULT_ERROR_RATE, DEFAULT_MAX_ERROR_RATE); @@ -234,15 +229,15 @@ * Create and return a new (empty) bloom filter for a {@link BTree} or * {@link IndexSegment}. * <p> - * The bloom filter can be provisioned with reference to - * {@link src/architecture/bloomfilter.xls}. Let <code>p</code> be the - * probability of a false positive (aka the error rate) and <code>n</code> - * be the #of index entries. The values p=.02 and n=1M result in a space - * requirement of 8656171 bits or approximately 1mb and uses ~ 8.6 bits per - * element. In order to achieve the same error rate with n=10M, the size - * requirements of the bloom filter will be approximately 10mb since the - * filter will still use ~ 8.6 bits per element for that error rate, or - * roughly one byte per index entry. + * The bloom filter can be provisioned with reference to {@link src + * /architecture/bloomfilter.xls}. Let <code>p</code> be the probability of + * a false positive (aka the error rate) and <code>n</code> be the #of index + * entries. The values p=.02 and n=1M result in a space requirement of + * 8656171 bits or approximately 1mb and uses ~ 8.6 bits per element. In + * order to achieve the same error rate with n=10M, the size requirements of + * the bloom filter will be approximately 10mb since the filter will still + * use ~ 8.6 bits per element for that error rate, or roughly one byte per + * index entry. * <p> * The maximum record length for the backing store can easily be exceeded by * a large bloom filter, large bloom filters will require significant time @@ -252,12 +247,12 @@ * While the scale-out architecture uses group commits and hence can be * expected to perform more commits during a bulk data load, it also uses * one bloom filter per {@link AbstractBTree} so the #of index entries is - * bounded by the configured {@link ISplitHandler}. On the other hand, the - * bloom filter performance will degrade as a scale-up index grows in size - * since the bloom filter can not be made very large for a scale-up store - * (the maximum record size is reduced in order to permit more records) and - * large indices will therefore experience increasing false positive rates - * as they grow. + * bounded by the configured {@link ISimpleSplitHandler} in an application + * dependent manner. On the other hand, the bloom filter performance will + * degrade as a scale-up index grows in size since the bloom filter can not + * be made very large for a scale-up store (the maximum record size is + * reduced in order to permit more records) and large indices will therefore + * experience increasing false positive rates as they grow. * <p> * Whether or not a bloom filter is useful depends on the application. The * bloom filter will ONLY be used for point tests such as contains(), Deleted: trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -1,79 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Dec 26, 2006 - */ - -package com.bigdata.btree; - -import java.io.DataInput; -import java.io.IOException; -import java.io.Serializable; - -import com.bigdata.io.DataOutputBuffer; -import com.bigdata.rawstore.IAddressManager; - -/** - * Interface for (de-)serialization of addresses of child nodes and leaves as - * recorded on a {@link Node}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @deprecated This interface is no longer used. It exists solely to facilitate - * de-serialization of older {@link IndexMetadata} record versions. - */ -public interface IAddressSerializer extends Serializable { - - /** - * De-serialize the child addresses for a node. - * - * @param is - * The input stream. - * @param childAddr - * The array into which the addresses must be written. - * @param nchildren - * The #of valid values in the array. The values in indices - * [0:n-1] are defined and must be read from the buffer and - * written on the array. - */ - public void getChildAddresses(IAddressManager addressManager, DataInput is, - long[] childAddr, int nchildren) throws IOException; - - /** - * Serialize the child addresses for a node. - * - * @param os - * The output stream. - * @param childAddr - * The array of child addresses to be written. - * @param nchildren - * The #of valid values in the array. The values in indices - * [0:n-1] are defined and must be written. - */ - public void putChildAddresses(IAddressManager addressManager, - DataOutputBuffer os, long[] childAddr, int nchildren) - throws IOException; - -} Deleted: trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -1,119 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Feb 12, 2008 - */ - -package com.bigdata.btree; - -import java.io.Serializable; - -import com.bigdata.sparse.SparseRowStore; - -/** - * An interface used to decide when and index partition is overcapacity and - * should be split, including the split point(s), and when an index partition is - * undercapacity and should be joined with its right sibling. - * <p> - * Note: applications frequency must constrain the allowable separator keys when - * splitting an index partition into two or more index partitions. For example, - * the {@link SparseRowStore} must to maintain an guarantee of atomic operations - * for a logical row, which is in turn defined as the ordered set of index - * entries sharing the same primary key. You can use this interface to impose - * application specific constraints such that the index partition boundaries - * only fall on acceptable separator keys. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @deprecated by {@link ISimpleSplitHandler}. This is only kept around to - * deserialize existing instances. - */ -public interface ISplitHandler extends Serializable { - -// /** -// * Return <code>true</code> if a cursory examination of an index partition -// * suggests that it SHOULD be split into 2 or more index partitions. -// * -// * @param rangeCount -// * A fast range count (may overestimate). -// * -// * @return <code>true</code> if the index partition should be split. -// */ -// public boolean shouldSplit(long rangeCount); -// -// /** -// * Return the percentage of a single nominal split that would be satisified -// * by an index partition based on the specified range count. If the index -// * partition has exactly the desired number of tuples, then return ONE -// * (1.0). If the index partition has 50% of the desired #of tuples, then -// * return <code>.5</code>. If the index partition could be used to build -// * two splits, then return TWO (2.0), etc. -// * -// * @param rangeCount -// * A fast range count (may overestimate). -// * -// * @return The percentage of a split per above. -// */ -// public double percentOfSplit(long rangeCount); -// -// /** -// * Return <code>true</code> if a cursory examination of an index partition -// * suggests that it SHOULD be joined with either its left or right sibling. -// * The basic determination is that the index partition is "undercapacity". -// * Normally this is decided in terms of the range count of the index -// * partition. -// * -// * @param rangeCount -// * A fast range count (may overestimate). -// * -// * @return <code>true</code> if the index partition should be joined. -// */ -// public boolean shouldJoin(long rangeCount); -// -// /** -// * Choose a set of splits that completely span the key range of the index -// * view. The first split MUST use the leftSeparator of the index view as its -// * leftSeparator. The last split MUST use the rightSeparator of the index -// * view as its rightSeparator. The #of splits SHOULD be chosen such that the -// * resulting index partitions are each at least 50% full. -// * -// * @param partitionIdFactory -// * -// * @param ndx -// * The source index partition. -// * -// * @return A {@link Split}[] array contains everything that we need to -// * define the new index partitions -or- <code>null</code> if a more -// * detailed examination reveals that the index SHOULD NOT be split -// * at this time. -// */ -//// * @param btreeCounters -//// * Performance counters for the index partition view collected -//// * since the last overflow. -// public Split[] getSplits(IPartitionIdFactory partitionIdFactory, -// ILocalBTreeView ndx);//, BTreeCounters btreeCounters); - -} Modified: trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -2377,86 +2377,87 @@ */ private static transient final int VERSION0 = 0x0; - /** - * This version introduced the {@link #asynchronousIndexWriteConfiguration}. - * Reads of an earlier version create a instance of that field based on a - * default configuration. - */ - private static transient final int VERSION1 = 0x1; - - /** - * This version introduced the {@link #scatterSplitConfiguration}. Reads of - * an earlier version create a instance of that field based on a default - * configuration. - */ - private static transient final int VERSION2 = 0x2; +// /** +// * This version introduced the {@link #asynchronousIndexWriteConfiguration}. +// * Reads of an earlier version create a instance of that field based on a +// * default configuration. +// */ +// private static transient final int VERSION1 = 0x1; +// +// /** +// * This version introduced the {@link #scatterSplitConfiguration}. Reads of +// * an earlier version create a instance of that field based on a default +// * configuration. +// */ +// private static transient final int VERSION2 = 0x2; +// +// /** +// * This version introduced {@link #indexSegmentLeafCacheTimeout}. Reads of +// * an earlier version use the +// * {@link Options#DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT} for this field. +// */ +// private static transient final int VERSION3 = 0x3; +// +// /** +// * This version introduced {@link #btreeRecordCompressorFactory} and +// * {@link #indexSegmentRecordCompressorFactory}. Both of these fields are +// * optional, which implies no compression provider. Reads of prior versions +// * set these fields to <code>null</code>. +// * +// * @see Options#BTREE_RECORD_COMPRESSOR_FACTORY +// * @see Options#INDEX_SEGMENT_RECORD_COMPRESSOR_FACTORY +// */ +// private static transient final int VERSION4 = 0x04; +// +// /** +// * This version introduced {@link #childLocks}. Reads of prior versions set +// * this field to <code>true</code>. +// * +// * @see Options#CHILD_LOCKS +// */ +// private static transient final int VERSION5 = 0x05; +// +// /** +// * This version introduced {@link #versionTimestampFilters}. Reads of prior +// * versions set this field to <code>false</code>. +// */ +// private static transient final int VERSION6 = 0x06; +// +// /** +// * This version gets rid of the read-retention queue capacity and nscan +// * properties and the index segment leaf cache capacity and timeout +// * properties. +// */ +// private static transient final int VERSION7 = 0x07; +// +// /** +// * This version gets rid of the IAddressSerializer interface used by the +// * older {@link NodeSerializer} class to (de-)serialize the child addresses +// * for a {@link Node}. +// */ +// private static transient final int VERSION8 = 0x08; +// +// /** +// * The childLocks feature was dropped in this version. +// */ +// private static transient final int VERSION9 = 0x09; +// +// /** +// * The split handler was changed from an implementation based on the #of +// * tuples to one based on the size on disk of an index segment after a +// * compacting merge. The old split handlers are replaced by a +// * <code>null</code> reference when they are de-serialized. +// * +// * @see ISplitHandler +// * @see ISimpleSplitHandler +// */ +// private static transient final int VERSION10 = 0x10; /** - * This version introduced {@link #indexSegmentLeafCacheTimeout}. Reads of - * an earlier version use the - * {@link Options#DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT} for this field. - */ - private static transient final int VERSION3 = 0x3; - - /** - * This version introduced {@link #btreeRecordCompressorFactory} and - * {@link #indexSegmentRecordCompressorFactory}. Both of these fields are - * optional, which implies no compression provider. Reads of prior versions - * set these fields to <code>null</code>. - * - * @see Options#BTREE_RECORD_COMPRESSOR_FACTORY - * @see Options#INDEX_SEGMENT_RECORD_COMPRESSOR_FACTORY - */ - private static transient final int VERSION4 = 0x04; - - /** - * This version introduced {@link #childLocks}. Reads of prior versions set - * this field to <code>true</code>. - * - * @see Options#CHILD_LOCKS - */ - private static transient final int VERSION5 = 0x05; - - /** - * This version introduced {@link #versionTimestampFilters}. Reads of prior - * versions set this field to <code>false</code>. - */ - private static transient final int VERSION6 = 0x06; - - /** - * This version gets rid of the read-retention queue capacity and nscan - * properties and the index segment leaf cache capacity and timeout - * properties. - */ - private static transient final int VERSION7 = 0x07; - - /** - * This version gets rid of the IAddressSerializer interface used by the - * older {@link NodeSerializer} class to (de-)serialize the child addresses - * for a {@link Node}. - */ - private static transient final int VERSION8 = 0x08; - - /** - * The childLocks feature was dropped in this version. - */ - private static transient final int VERSION9 = 0x09; - - /** - * The split handler was changed from an implementation based on the #of - * tuples to one based on the size on disk of an index segment after a - * compacting merge. The old split handlers are replaced by a - * <code>null</code> reference when they are de-serialized. - * - * @see ISplitHandler - * @see ISimpleSplitHandler - */ - private static transient final int VERSION10 = 0x10; - - /** * The version that will be serialized by this class. */ - private static transient final int CURRENT_VERSION = VERSION10; + private static transient final int CURRENT_VERSION = VERSION0; +// private static transient final int CURRENT_VERSION = VERSION10; /** * @todo review generated record for compactness. @@ -2468,16 +2469,16 @@ switch (version) { case VERSION0: - case VERSION1: - case VERSION2: - case VERSION3: - case VERSION4: - case VERSION5: - case VERSION6: - case VERSION7: - case VERSION8: - case VERSION9: - case VERSION10: +// case VERSION1: +// case VERSION2: +// case VERSION3: +// case VERSION4: +// case VERSION5: +// case VERSION6: +// case VERSION7: +// case VERSION8: +// case VERSION9: +// case VERSION10: break; default: throw new IOException("Unknown version: version=" + version); @@ -2499,94 +2500,94 @@ writeRetentionQueueScan = (int)LongPacker.unpackLong(in); - if (version < VERSION7) { - - /* btreeReadRetentionQueueCapacity = (int) */LongPacker - .unpackLong(in); +// if (version < VERSION7) { +// +// /* btreeReadRetentionQueueCapacity = (int) */LongPacker +// .unpackLong(in); +// +// /* btreeReadRetentionQueueScan = (int) */LongPacker.unpackLong(in); +// +// } - /* btreeReadRetentionQueueScan = (int) */LongPacker.unpackLong(in); - - } - pmd = (LocalPartitionMetadata)in.readObject(); btreeClassName = in.readUTF(); checkpointClassName = in.readUTF(); - if (version < VERSION8) { +// if (version < VERSION8) { +// +// // Read and discard the IAddressSerializer object. +// in.readObject(); +// +// } - // Read and discard the IAddressSerializer object. - in.readObject(); - - } - nodeKeysCoder = (IRabaCoder) in.readObject(); tupleSer = (ITupleSerializer) in.readObject(); - if (version < VERSION4) { +// if (version < VERSION4) { +// +// btreeRecordCompressorFactory = null; +// +// } else { - btreeRecordCompressorFactory = null; - - } else { - btreeRecordCompressorFactory = (IRecordCompressorFactory) in .readObject(); - } +// } conflictResolver = (IConflictResolver)in.readObject(); - if (version < VERSION5 || version >= VERSION9) { - -// childLocks = true; - - } else { - -// childLocks = - in.readBoolean(); - - } +// if (version < VERSION5 || version >= VERSION9) { +// +//// childLocks = true; +// +// } else { +// +//// childLocks = +// in.readBoolean(); +// +// } deleteMarkers = in.readBoolean(); versionTimestamps = in.readBoolean(); - if (version < VERSION6) { +// if (version < VERSION6) { +// +// versionTimestampFilters = false; +// +// } else { - versionTimestampFilters = false; - - } else { - versionTimestampFilters = in.readBoolean(); - - } +// +// } bloomFilterFactory = (BloomFilterFactory) in.readObject(); overflowHandler = (IOverflowHandler)in.readObject(); - if (version < VERSION10) { +// if (version < VERSION10) { +// +// /* +// * The old style of split handler is discarded. The default behavior +// * for the new style of split handler covers all known uses of the +// * old style split handler. While some indices (the sparse row store +// * for example) will have to register a new split handler for +// * safety, those indices were not safe for splits historically. +// */ +// +// // read and discard the old split handler. +// in.readObject(); +// +// splitHandler2 = null; +// +// } else { - /* - * The old style of split handler is discarded. The default behavior - * for the new style of split handler covers all known uses of the - * old style split handler. While some indices (the sparse row store - * for example) will have to register a new split handler for - * safety, those indices were not safe for splits historically. - */ - - // read and discard the old split handler. - in.readObject(); - - splitHandler2 = null; - - } else { - splitHandler2 = (ISimpleSplitHandler) in.readObject(); - } +// } /* * IndexSegment. @@ -2594,118 +2595,118 @@ indexSegmentBranchingFactor = (int) LongPacker.unpackLong(in); - if (version < VERSION7) { +// if (version < VERSION7) { +// +// /* indexSegmentLeafCacheCapacity = (int) */LongPacker +// .unpackLong(in); +// +// if (version < VERSION3) { +// +// /* +// * indexSegmentLeafCacheTimeout = Long +// * .parseLong(Options.DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT); +// */ +// +// } else { +// +// /* indexSegmentLeafCacheTimeout = (long) */LongPacker +// .unpackLong(in); +// +// } +// +// } - /* indexSegmentLeafCacheCapacity = (int) */LongPacker - .unpackLong(in); - - if (version < VERSION3) { - - /* - * indexSegmentLeafCacheTimeout = Long - * .parseLong(Options.DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT); - */ - - } else { - - /* indexSegmentLeafCacheTimeout = (long) */LongPacker - .unpackLong(in); - - } - - } - indexSegmentBufferNodes = in.readBoolean(); - if (version < VERSION4) { +// if (version < VERSION4) { +// +// indexSegmentRecordCompressorFactory = null; +// +// } else { - indexSegmentRecordCompressorFactory = null; - - } else { - indexSegmentRecordCompressorFactory = (IRecordCompressorFactory) in .readObject(); - } +// } - if (version < VERSION1) { - - /* - * Use the default configuration since not present in the serialized - * form before VERSION1. - */ - - final int masterQueueCapacity = Integer - .parseInt(Options.DEFAULT_MASTER_QUEUE_CAPACITY); - - final int masterChunkSize = Integer - .parseInt(Options.DEFAULT_MASTER_CHUNK_SIZE); - - final long masterChunkTimeoutNanos = Long - .parseLong(Options.DEFAULT_MASTER_CHUNK_TIMEOUT_NANOS); - - final long sinkIdleTimeoutNanos = Long - .parseLong(Options.DEFAULT_SINK_IDLE_TIMEOUT_NANOS); - - final long sinkPollTimeoutNanos = Long - .parseLong(Options.DEFAULT_SINK_POLL_TIMEOUT_NANOS); - - final int sinkQueueCapacity = Integer - .parseInt(Options.DEFAULT_SINK_QUEUE_CAPACITY); - - final int sinkChunkSize = Integer - .parseInt(Options.DEFAULT_SINK_CHUNK_SIZE); - - final long sinkChunkTimeoutNanos = Long - .parseLong(Options.DEFAULT_SINK_CHUNK_TIMEOUT_NANOS); - - asynchronousIndexWriteConfiguration = new AsynchronousIndexWriteConfiguration( - masterQueueCapacity,// - masterChunkSize,// - masterChunkTimeoutNanos,// - sinkIdleTimeoutNanos,// - sinkPollTimeoutNanos,// - sinkQueueCapacity,// - sinkChunkSize,// - sinkChunkTimeoutNanos// - ); - - } else { +// if (version < VERSION1) { +// +// /* +// * Use the default configuration since not present in the serialized +// * form before VERSION1. +// */ +// +// final int masterQueueCapacity = Integer +// .parseInt(Options.DEFAULT_MASTER_QUEUE_CAPACITY); +// +// final int masterChunkSize = Integer +// .parseInt(Options.DEFAULT_MASTER_CHUNK_SIZE); +// +// final long masterChunkTimeoutNanos = Long +// .parseLong(Options.DEFAULT_MASTER_CHUNK_TIMEOUT_NANOS); +// +// final long sinkIdleTimeoutNanos = Long +// .parseLong(Options.DEFAULT_SINK_IDLE_TIMEOUT_NANOS); +// +// final long sinkPollTimeoutNanos = Long +// .parseLong(Options.DEFAULT_SINK_POLL_TIMEOUT_NANOS); +// +// final int sinkQueueCapacity = Integer +// .parseInt(Options.DEFAULT_SINK_QUEUE_CAPACITY); +// +// final int sinkChunkSize = Integer +// .parseInt(Options.DEFAULT_SINK_CHUNK_SIZE); +// +// final long sinkChunkTimeoutNanos = Long +// .parseLong(Options.DEFAULT_SINK_CHUNK_TIMEOUT_NANOS); +// +// asynchronousIndexWriteConfiguration = new AsynchronousIndexWriteConfiguration( +// masterQueueCapacity,// +// masterChunkSize,// +// masterChunkTimeoutNanos,// +// sinkIdleTimeoutNanos,// +// sinkPollTimeoutNanos,// +// sinkQueueCapacity,// +// sinkChunkSize,// +// sinkChunkTimeoutNanos// +// ); +// +// } else { asynchronousIndexWriteConfiguration = (AsynchronousIndexWriteConfiguration) in .readObject(); - } +// } - if (version < VERSION2) { +// if (version < VERSION2) { +// +// /* +// * Use the default configuration since not present in the serialized +// * form before VERSION2. +// */ +// +// final boolean scatterSplitEnabled = Boolean +// .parseBoolean(Options.DEFAULT_SCATTER_SPLIT_ENABLED); +// +// final double scatterSplitPercentOfSplitThreshold = Double +// .parseDouble(Options.DEFAULT_SCATTER_SPLIT_PERCENT_OF_SPLIT_THRESHOLD); +// +// final int scatterSplitDataServicesCount = Integer +// .parseInt(Options.DEFAULT_SCATTER_SPLIT_DATA_SERVICE_COUNT); +// +// final int scatterSplitIndexPartitionsCount = Integer +// .parseInt(Options.DEFAULT_SCATTER_SPLIT_INDEX_PARTITION_COUNT); +// +// this.scatterSplitConfiguration = new ScatterSplitConfiguration( +// scatterSplitEnabled, scatterSplitPercentOfSplitThreshold, +// scatterSplitDataServicesCount, +// scatterSplitIndexPartitionsCount); +// +// } else { - /* - * Use the default configuration since not present in the serialized - * form before VERSION2. - */ - - final boolean scatterSplitEnabled = Boolean - .parseBoolean(Options.DEFAULT_SCATTER_SPLIT_ENABLED); - - final double scatterSplitPercentOfSplitThreshold = Double - .parseDouble(Options.DEFAULT_SCATTER_SPLIT_PERCENT_OF_SPLIT_THRESHOLD); - - final int scatterSplitDataServicesCount = Integer - .parseInt(Options.DEFAULT_SCATTER_SPLIT_DATA_SERVICE_COUNT); - - final int scatterSplitIndexPartitionsCount = Integer - .parseInt(Options.DEFAULT_SCATTER_SPLIT_INDEX_PARTITION_COUNT); - - this.scatterSplitConfiguration = new ScatterSplitConfiguration( - scatterSplitEnabled, scatterSplitPercentOfSplitThreshold, - scatterSplitDataServicesCount, - scatterSplitIndexPartitionsCount); - - } else { - scatterSplitConfiguration = (ScatterSplitConfiguration) in.readObject(); - } +// } } @@ -2752,30 +2753,30 @@ out.writeObject(tupleSer); - if (version >= VERSION4) { +// if (version >= VERSION4) { out.writeObject(btreeRecordCompressorFactory); - } +// } out.writeObject(conflictResolver); - if (version >= VERSION5 && version < VERSION9 ) { +// if (version >= VERSION5 && version < VERSION9 ) { +// +//// out.writeBoolean(childLocks); +// out.writeBoolean(false/* childLocks */); +// +// } -// out.writeBoolean(childLocks); - out.writeBoolean(false/* childLocks */); - - } - out.writeBoolean(deleteMarkers); out.writeBoolean(versionTimestamps); - if (version >= VERSION6) { +// if (version >= VERSION6) { out.writeBoolean(versionTimestampFilters); - } +// } out.writeObject(bloomFilterFactory); @@ -2800,25 +2801,25 @@ out.writeBoolean(indexSegmentBufferNodes); - if (version >= VERSION4) { +// if (version >= VERSION4) { out.writeObject(btreeRecordCompressorFactory); - } +// } +// +// if (version >= VERSION1) { - if (version >= VERSION1) { - // introduced in VERSION1 out.writeObject(asynchronousIndexWriteConfiguration); - } +// } +// +// if (version >= VERSION2) { - if (version >= VERSION2) { - // introduced in VERSION2 out.writeObject(scatterSplitConfiguration); - } +// } } Deleted: trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -1,124 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Dec 26, 2006 - */ - -package com.bigdata.btree; - -import java.io.DataInput; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; - -import com.bigdata.io.DataOutputBuffer; -import com.bigdata.rawstore.IAddressManager; -import com.bigdata.rawstore.IRawStore; - -/** - * Packs the addresses using the {@link IAddressManager} for the backing - * {@link IRawStore}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @deprecated This class is no longer used. The implementation exists solely to - * facilitate de-serialization of older {@link IndexMetadata} record - * versions. - */ -public class PackedAddressSerializer implements IAddressSerializer, Externalizable { - - /** - * - */ - private static final long serialVersionUID = 7533128830948670801L; - -// public static final IAddressSerializer INSTANCE = new PackedAddressSerializer(); - - public PackedAddressSerializer() { - - } - - public void putChildAddresses(IAddressManager addressManager, DataOutputBuffer os, - long[] childAddr, int nchildren) throws IOException { - - throw new UnsupportedOperationException(); - -// for (int i = 0; i < nchildren; i++) { -// -// final long addr = childAddr[i]; -// -// /* -// * Children MUST have assigned persistent identity. -// */ -// if (addr == 0L) { -// -// throw new RuntimeException("Child is not persistent: index=" -// + i); -// -// } -// -// addressManager.packAddr(os, addr); -// -// } - - } - - public void getChildAddresses(IAddressManager addressManager, DataInput is, - long[] childAddr, int nchildren) throws IOException { - - throw new UnsupportedOperationException(); - -// for (int i = 0; i < nchildren; i++) { -// -// final long addr = addressManager.unpackAddr(is); -// -// if (addr == 0L) { -// -// throw new RuntimeException( -// "Child does not have persistent address: index=" + i); -// -// } -// -// childAddr[i] = addr; -// -// } - - } - - - public void readExternal(ObjectInput arg0) throws IOException, ClassNotFoundException { - - // NOP (no state) - - } - - public void writeExternal(ObjectOutput arg0) throws IOException { - - // NOP (no state) - - } - -} Modified: trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -137,7 +137,7 @@ * <p> * Reads will read through the <i>writeSet</i> and then the resource(s) in * the <i>groundState</i> in the order in which they are given. A read is - * satisified by the first resource containing an index entry for the search + * satisfied by the first resource containing an index entry for the search * key. * <p> * Writes will first read through looking for a @todo javadoc Modified: trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -65,7 +65,6 @@ import com.bigdata.mdi.IResourceMetadata; import com.bigdata.mdi.LocalPartitionMetadata; import com.bigdata.relation.accesspath.AbstractAccessPath; -import com.bigdata.resources.DefaultSplitHandler; import com.bigdata.service.MetadataService; import com.bigdata.service.Split; @@ -86,10 +85,9 @@ * {@link ILocalBTreeView} and {@link IAutoboxBTree}. * * @todo Can I implement {@link ILinearList} here? That would make it possible - * to use keyAt() and indexOf() and might pave the way for a faster - * {@link DefaultSplitHandler} and also for a {@link MetadataService} that - * supports overflow since the index segments could be transparent at that - * point. + * to use keyAt() and indexOf() and might pave the way for a + * {@link MetadataService} that supports overflow since the index segments + * could be transparent at that point. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ Deleted: trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java 2010-07-27 21:09:15 UTC (rev 3325) +++ trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java 2010-07-27 21:15:49 UTC (rev 3326) @@ -1,922 +0,0 @@ -package com.bigdata.resources; - -import com.bigdata.bfs.BigdataFileSystem; -import com.bigdata.btree.ISimpleSplitHandler; -import com.bigdata.btree.ISplitHandler; - -/** - * A configurable default policy for deciding when and where to split an index - * partition into 2 or more index partitions. - * <p> - * Note: There is probably no single value for {@link #getEntryCountPerSplit()} - * that is going to be "right" across applications. The space requirements for - * keys is very difficult to estimate since leading key compression will often - * provide a good win. Likewise, indices are free to use compression on their - * values as well so the size of the byte[] values is not a good estimate of - * their size in the index. - * <p> - * Note: The #of index entries is a good proxy for the space requirements of - * most indices. The {@link BigdataFileSystem} is one case where the space - * requirements could be quite different since 64M blocks may be stored along - * with the index entries, however in that case you can also test for the size - * of the index segment that is part of the view and decide that it's time to - * split the view. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: DefaultSplitHandler.java 2265 2009-10-26 12:51:06Z thompsonbry - * $ - * - * @deprecated by {@link ISimpleSplitHandler}. This is only kept around to - * deserialize existing instances. - */ -public class DefaultSplitHandler implements ISplitHandler { - - /** - * - */ - private static final long serialVersionUID = 1675517991163473445L; - -// /** -// * Logger. -// */ -// protected static final Logger log = Logger -// .getLogger(DefaultSplitHandler.class); -// -// /** -// * True iff the {@link #log} level is DEBUG or less. -// */ -// final protected static boolean DEBUG = log.isDebugEnabled(); -// -// /** -// * True iff the {@link #log} level is INFO or less. -// */ -// final protected static boolean INFO = log.isInfoEnabled(); - - private int minimumEntryCount; - - private int entryCountPerSplit; - - private int sampleRate; - - private double overCapacityMultiplier; - - private double underCapacityMultiplier; - -// public String toString() { -// -// final StringBuilder sb = new StringBuilder(); -// -// sb.append(getClass().getName()); -// -// sb.append("{ minimumEntryCount=" + minimumEntryCount); -// -// sb.append(", entryCountPerSplit=" + entryCountPerSplit); -// -// sb.append(", sampleRate=" + sampleRate); -// -// sb.append(", overCapacityMultiplier=" + overCapacityMultiplier); -// -// sb.append(", underCapacityMultiplier=" + underCapacityMultiplier); -// -// sb.append(", targetCountPerSplit=" + getTargetEntryCountPerSplit()); -// -// sb.append("}"); -// -// return sb.toString(); -// -// } - - /** - * De-serialization ctor. - */ - public DefaultSplitHandler() { - - } - -// /** -// * Setup a split handler. -// * -// * @param minimumEntryCount -// * An index partition which has no more than this many tuples -// * should be joined with its rightSibling (if any). -// * @param entryCountPerSplit -// * The target #of tuples for an index partition. -// * @param overCapacityMultiplier -// * The index partition will be split when its actual entry count -// * is GTE to -// * <code>overCapacityMultiplier * entryCountPerSplit</code> -// * @param underCapacityMultiplier -// * When an index partition will be split, the #of new index -// * partitions will be chosen such that each index partition is -// * approximately <i>underCapacityMultiplier</i> full. -// * @param sampleRate -// * The #of samples to take per estimated split (non-negative, and -// * generally on the order of 10s of samples). The purpose of the -// * samples is to accommodate the actual distribution of the keys -// * in the index. -// * -// * @throws IllegalArgumentException -// * if any argument, or combination or arguments, is out of -// * range. -// */ -// public DefaultSplitHandler(final int minimumEntryCount, -// final int entryCountPerSplit, final double overCapacityMultiplier, -// final double underCapacityMultiplier, final int sampleRate) { -// -// /* -// * Bootstap parameter settings. -// * -// * First, verify combination of parameters is legal. -// */ -// assertSplitJoinStable(minimumEntryCount, entryCountPerSplit, -// underCapacityMultiplier); -// -// /* -// * Now that we know the combination is legal, set individual parameters -// * that have dependencies in their legal range. This will let us set the -// * individual parameters with their settor methods below. -// */ -// this.minimumEntryCount = minimumEntryCount; -// this.entryCountPerSplit = entryCountPerSplit; -// this.underCapacityMultiplier = underCapacityMultiplier; -// -// /* -// * Use individual set methods to validate each parameter by itself. -// */ -// -// setMinimumEntryCount(minimumEntryCount); -// -// setEntryCountPerSplit(entryCountPerSplit); -// -// setOverCapacityMultiplier(overCapacityMultiplier); -// -// setUnderCapacityMultiplier(underCapacityMultiplier); -// -// setSampleRate(sampleRate); -// -// } - -// /** -// * Return <code>true</code> iff the range count of the index is less than -// * the {@link #getMinimumEntryCount()}. -// * <p> -// * Note: This relies on the fast range count, which is the upper bound on -// * the #of index entries. For this reason an index partition which has -// * undergone a lot of deletes will not underflow until it has gone through a -// * build to purge the deleted index entries. This is true even when all -// * index entries in the index partition have been deleted! -// */ -// public boolean shouldJoin(final long rangeCount) { -// -// final boolean shouldJoin = rangeCount <= getMinimumEntryCount(); -// -// if (INFO) -// log.info("shouldJoin=" + shouldJoin + " : rangeCount=" + rangeCount -// + ", minimumEntryCount=" + getMinimumEntryCount()); -// -// return shouldJoin; -// -// } -// -// /** -// * Verify that a split will not result in index partitions whose range -// * counts are such that they would be immediately eligible for a join. -// * -// * @throws IllegalArgumentException -// * if split / join is not stable for the specified values. -// * -// * @todo it might be worth while to convert this to a warning since actions -// * such as a scatter split are designed with the expectation that the -// * splits may be undercapacity but will fill up before the next -// * overflow (or that joins will simply not be triggered for N -// * overflows after a split). -// */ -// static void assertSplitJoinStable(final int minimumEntryCount, -// final int entryCountPerSplit, final double underCapacityMultiplier) { -// -// final int targetEntryCount = (int) Math.round(underCapacityMultiplier -// * entryCountPerSplit); -// -// if (minimumEntryCount > targetEntryCount) { -// -// throw new IllegalArgumentException("minimumEntryCount(" -// + minimumEntryCount + ") exceeds underCapacityMultiplier(" -// + underCapacityMultiplier + ") * entryCountPerSplit(" -// + entryCountPerSplit + ")"); -// -// } -// -// } -// -// /** -// * The minimum #of index entries before the index partition becomes eligible -// * to be joined. -// */ -// public int getMinimumEntryCount() { -// -// return minimumEntryCount; -// -// } -// -// public void setMinimumEntryCount(final int minimumEntryCount) { -// -// if (minimumEntryCount < 0) -// throw new IllegalArgumentException("minimumEntryCount=" -// + minimumEntryCount); -// -// assertSplitJoinStable(minimumEntryCount, getEntryCountPerSplit(), -// getUnderCapacityMultiplier()); -// -// this.minimumEntryCount = minimumEntryCount; -// -// } -// -// /** -// * The target maximum #of index entries in an index partition. -// */ -// public int getEntryCountPerSplit() { -// -// return entryCountPerSplit; -// -// } -// -// public void setEntryCountPerSplit(final int entryCountPerSplit) { -// -//// if (entryCountPerSplit < Options.MIN_BRANCHING_FACTOR) { -//// -//// throw new IllegalArgumentException( -//// "entryCountPerSplit must be GTE the minimum branching factor: entryCountPerSplit=" -//// + entryCountPerSplit -//// + ", minBranchingFactor=" -//// + Options.MIN_BRANCHING_FACTOR); -//// -//// } -// if (entryCountPerSplit < 1) { -// -// throw new IllegalArgumentException( -// "entryCountPerSplit must be GTE ONE(1): entryCountPerSplit=" -// + entryCountPerSplit); -// -// } -// -// assertSplitJoinStable(getMinimumEntryCount(), entryCountPerSplit, -// getUnderCapacityMultiplier()); -// -// this.entryCountPerSplit = entryCountPerSplit; -// -// } -// -// /** -// * The #of samples per estimated #of splits. -// */ -// public int getSampleRate() { -// -// return sampleRate; -// -// } -// -// public void setSampleRate(final int sampleRate) { -// -// if (sampleRate <= 0) -// throw new IllegalArgumentException(); -// -// this.sampleRate = sampleRate; -// -// } -// -// /** -// * The threshold for splitting an index is the -// * {@link #getOverCapacityMultiplier()} times -// * {@link #getEntryCountPerSplit()}. If there are fewer than this many -// * entries in the index then it will not be split. -// */ -// public double getOverCapacityMultiplier() { -// -// return overCapacityMultiplier; -// -// } -// -// /** -// * -// * @param overCapacityMultiplier -// * A value in [1.0:2.0]. -// */ -// public void setOverCapacityMultiplier(final double overCapacityMultiplier) { -// -// final double min = 1.0; -// final double max = 2.0; -// -// if (overCapacityMultiplier < min || overCapacityMultiplier > max) { -// -// throw new IllegalArgumentException("Must be in [" + min + ":" + max -// + "], but was " + overCapacityMultiplier); -// -// } -// -// this.overCapacityMultiplier = overCapacityMultiplier; -// -// } -// -// /** -// * This is the target under capacity rate for a new index partition. For -// * example, if the {@link #getEntryCountPerSplit()} is 5M and this -// * property is <code>.75</code> then an attempt will be made to divide -// * the index partition into N splits such that each split is at 75% of -// * the {@link #getEntryCountPerSplit()} capacity. -// */ -// public double getUnderCapacityMultiplier() { -// -// return underCapacityMultiplier; -// -// } -// -// /** -// * -// * @param underCapacityMultiplier -// * A value in [0.5,1.0). -// */ -// public void setUnderCapacityMultiplier(final double underCapacityMultiplier) { -// -// final double min = 0.5; -// final double max = 1.0; -// -// if (underCapacityMultiplier < min || underCapacityMultiplier >= max) { -// -// throw new IllegalArgumentException("Must be in [" + min + ":" + max -// + "), but was " + underCapacityMultiplier); -// -// } -// -// assertSplitJoinStable(getMinimumEntryCount(), getEntryCountPerSplit(), -// underCapacityMultiplier); -// -// this.underCapacityMultiplier = underCapacityMultiplier; -// -// } -// -// /** -// * The target #of tuples per split, which is given by: -// * -// * <pre> -// * targetEntryCountPerSplit := underCapacityMultiplier * entryCountPerSplit -// * </pre> -// * -// */ -// public int getTargetEntryCountPerSplit() { -// -// return (int) Math.round(getUnderCapacityMultiplier() -// * getEntryCountPerSplit()); -// -// } -// -// public boolean shouldSplit(final long rangeCount) { -// -// /* -// * Recommend split if the range count equals or exceeds the overcapacity -// * multiplier. -// */ -// -// if (rangeCount >= (getOverCapacityMultiplier() * entryCountPerSplit)) { -// -// if(INFO) -// log.info("Recommending split: rangeCount(" + rangeCount -// + ") >= (entryCountPerSplit(" + entryCountPerSplit -// + ") * overCapacityMultiplier(" -// + getOverCapacityMultiplier() + "))"); -// -// return true; -// -// } -// -// return false; -// -// } -// -// public double percentOfSplit(final long rangeCount) { -// -// final double percentOfSplit = (double) rangeCount -// / (double) entryCountPerSplit; -// -// if (INFO) -// log.info("percentOfSplit=" + percentOfSplit + " = rangeCount(" -// + rangeCount + ") / entryCountPerSplit(" -// + entryCountPerSplit + ")"); -// -// return percentOfSplit; -// -// } -// -// /** -// * A sample collected from a key-range scan. -// * -// * @author <a href="mailto:tho...@us...">Bryan Thompson</a> -// * @version $Id$ -// */ -// static public class Sample { -// -// /** -// * A key from the index. -// */ -// final byte[] key; -// -// /** -// * The origin zero (0) offset at which that key was found -// * (interpretation is that the key was visited by the Nth -// * {@link ITuple}). -// */ -// final int offset; -// -// public Sample(byte[] key, int offset) { -// -// assert key != null; -// -// assert offset >= 0; -// -// this.key = key; -// -// this.offset = offset; -// -// } -// -// public String toString() { -// -// return super.toString() + "{offset=" + offset + ", key=" -// + Arrays.toString(key) + "}"; -// -// } -// -// } -// -// /** -// * Sample index using a range scan choosing ({@link #getSampleRate()} x N) -// * {@link Sample}s. The key range scan will filter out both duplicates and -// * deleted i... [truncated message content] |
From: <tho...@us...> - 2010-07-29 14:55:47
|
Revision: 3343 http://bigdata.svn.sourceforge.net/bigdata/?rev=3343&view=rev Author: thompsonbry Date: 2010-07-29 14:55:40 +0000 (Thu, 29 Jul 2010) Log Message: ----------- Progress on the TermIdEncoder [https://sourceforge.net/apps/trac/bigdata/ticket/124]. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/BTree.java trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java trunk/bigdata/src/java/com/bigdata/mdi/LocalPartitionMetadata.java trunk/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java trunk/bigdata/src/java/com/bigdata/resources/CompactingMergeTask.java trunk/bigdata/src/java/com/bigdata/resources/IncrementalBuildTask.java trunk/bigdata/src/java/com/bigdata/resources/JoinIndexPartitionTask.java trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java trunk/bigdata/src/java/com/bigdata/resources/SplitIndexPartitionTask.java trunk/bigdata/src/java/com/bigdata/resources/SplitUtility.java trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java trunk/bigdata/src/java/com/bigdata/service/MetadataService.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexPartitionFencePosts.java trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask.java trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask2.java trunk/bigdata/src/test/com/bigdata/resources/TestMergeTask.java trunk/bigdata/src/test/com/bigdata/resources/TestOverflow.java trunk/bigdata/src/test/com/bigdata/resources/TestResourceManagerBootstrap.java trunk/bigdata/src/test/com/bigdata/resources/TestSegSplitter.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/TermIdEncoder.java Modified: trunk/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/BTree.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/btree/BTree.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -1339,8 +1339,8 @@ oldPmd.getLeftSeparatorKey(), // oldPmd.getRightSeparatorKey(),// newResources,// - oldPmd.getIndexPartitionCause(),// - "" // history is deprecated. + oldPmd.getIndexPartitionCause()// +// "" // history is deprecated. ); // update the local partition metadata on our cloned IndexMetadata. Modified: trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/btree/IndexSegmentBuilder.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -1227,9 +1227,9 @@ pmd.getLeftSeparatorKey(),// pmd.getRightSeparatorKey(),// null, // No resource metadata for indexSegment. - pmd.getIndexPartitionCause(), - pmd.getHistory()+ - "build("+pmd.getPartitionId()+",compactingMerge="+compactingMerge+") " + pmd.getIndexPartitionCause() +// ,pmd.getHistory()+ +// "build("+pmd.getPartitionId()+",compactingMerge="+compactingMerge+") " ) ); Modified: trunk/bigdata/src/java/com/bigdata/mdi/LocalPartitionMetadata.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/mdi/LocalPartitionMetadata.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/mdi/LocalPartitionMetadata.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -41,7 +41,6 @@ import com.bigdata.btree.IndexSegmentStore; import com.bigdata.journal.Journal; import com.bigdata.service.DataService; -import com.bigdata.service.Event; /** * An immutable object providing metadata about a local index partition, @@ -80,6 +79,9 @@ /** * * @see #getSourcePartitionId() + * + * @deprecated MoveTask manages without this field (it was required by the + * previous MOVE implementation). */ private int sourcePartitionId; @@ -111,53 +113,53 @@ */ private IndexPartitionCause cause; - /** - * A history of operations giving rise to the current partition metadata. - * E.g., register(timestamp), copyOnOverflow(timestamp), split(timestamp), - * join(partitionId,partitionId,timestamp), etc. This is truncated when - * serialized to keep it from growing without bound. - * - * @deprecated See {@link #getHistory()} - */ - private String history; +// /** +// * A history of operations giving rise to the current partition metadata. +// * E.g., register(timestamp), copyOnOverflow(timestamp), split(timestamp), +// * join(partitionId,partitionId,timestamp), etc. This is truncated when +// * serialized to keep it from growing without bound. +// * +// * @deprecated See {@link #getHistory()} +// */ +// private String history; +// +// /** +// * If the history string exceeds {@link #MAX_HISTORY_LENGTH} characters then +// * truncates it to the last {@link #MAX_HISTORY_LENGTH}-3 characters, +// * prepends "...", and returns the result. Otherwise returns the entire +// * history string. +// * +// * @deprecated See {@link #history} +// */ +// protected String getTruncatedHistory() { +// +// if (MAX_HISTORY_LENGTH == 0) +// return ""; +// +// String history = this.history; +// +// if(history.length() > MAX_HISTORY_LENGTH) { +// +// /* +// * Truncate the history. +// */ +// +// final int len = history.length(); +// +// final int fromIndex = len - (MAX_HISTORY_LENGTH - 3); +// +// assert fromIndex > 0 : "len=" + len + ", fromIndex=" + fromIndex +// + ", maxHistoryLength=" + MAX_HISTORY_LENGTH; +// +// history = "..." + history.substring(fromIndex, len); +// +// } +// +// return history; +// +// } /** - * If the history string exceeds {@link #MAX_HISTORY_LENGTH} characters then - * truncates it to the last {@link #MAX_HISTORY_LENGTH}-3 characters, - * prepends "...", and returns the result. Otherwise returns the entire - * history string. - * - * @deprecated See {@link #history} - */ - protected String getTruncatedHistory() { - - if (MAX_HISTORY_LENGTH == 0) - return ""; - - String history = this.history; - - if(history.length() > MAX_HISTORY_LENGTH) { - - /* - * Truncate the history. - */ - - final int len = history.length(); - - final int fromIndex = len - (MAX_HISTORY_LENGTH - 3); - - assert fromIndex > 0 : "len=" + len + ", fromIndex=" + fromIndex - + ", maxHistoryLength=" + MAX_HISTORY_LENGTH; - - history = "..." + history.substring(fromIndex, len); - - } - - return history; - - } - - /** * De-serialization constructor. */ public LocalPartitionMetadata() { @@ -199,21 +201,21 @@ * the remote {@link DataService} will fill it in on arrival. * @param cause * The underlying cause for the creation of the index partition. - * @param history - * A human interpretable history of the index partition. The - * history is a series of whitespace delimited records each of - * more or less the form <code>foo(x,y,z)</code>. The history - * gets truncated when the {@link LocalPartitionMetadata} is - * serialized in order to prevent it from growing without bound. */ +// * @param history +// * A human interpretable history of the index partition. The +// * history is a series of whitespace delimited records each of +// * more or less the form <code>foo(x,y,z)</code>. The history +// * gets truncated when the {@link LocalPartitionMetadata} is +// * serialized in order to prevent it from growing without bound. public LocalPartitionMetadata(// final int partitionId,// final int sourcePartitionId,// final byte[] leftSeparatorKey,// final byte[] rightSeparatorKey,// final IResourceMetadata[] resources,// - final IndexPartitionCause cause, - final String history + final IndexPartitionCause cause +// final String history ) { /* @@ -232,7 +234,7 @@ this.cause = cause; - this.history = history; +// this.history = history; /* * Test arguments. @@ -440,23 +442,23 @@ } - /** - * A history of the changes to the index partition. - * - * @deprecated I've essentially disabled the history (it is always empty - * when it is persisted). I found it nearly impossible to read. - * There are much saner ways to track what is going on in the - * federation. An analysis of the {@link Event} log is much more - * useful. If nothing else, you could examine the index - * partition in the metadata index by scanning the commit points - * and reading its state in each commit and reporting all state - * changes. - */ - final public String getHistory() { - - return history; - - } +// /** +// * A history of the changes to the index partition. +// * +// * @deprecated I've essentially disabled the history (it is always empty +// * when it is persisted). I found it nearly impossible to read. +// * There are much saner ways to track what is going on in the +// * federation. An analysis of the {@link Event} log is much more +// * useful. If nothing else, you could examine the index +// * partition in the metadata index by scanning the commit points +// * and reading its state in each commit and reporting all state +// * changes. +// */ +// final public String getHistory() { +// +// return history; +// +// } final public int hashCode() { @@ -466,7 +468,7 @@ } // Note: used by assertEquals in the test cases. - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) return true; @@ -520,7 +522,7 @@ ", rightSeparator="+BytesUtil.toString(rightSeparatorKey)+ ", resourceMetadata="+Arrays.toString(resources)+ ", cause="+cause+ - ", history="+history+ +// ", history="+history+ "}" ; @@ -537,6 +539,17 @@ * but that field is only serialized for a journal. */ private static final transient short VERSION1 = 0x1; + + /** + * This version serializes the {@link #partitionId} as 32-bits clean and + * gets rid of the <code>history</code> field. + */ + private static final transient short VERSION2 = 0x2; + + /** + * The current version. + */ + private static final transient short VERSION = VERSION2; public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -546,13 +559,18 @@ switch (version) { case VERSION0: case VERSION1: + case VERSION2: break; default: throw new IOException("Unknown version: " + version); } - - partitionId = (int) LongPacker.unpackLong(in); + if (version < VERSION2) { + partitionId = (int) LongPacker.unpackLong(in); + } else { + partitionId = in.readInt(); + } + sourcePartitionId = in.readInt(); // MAY be -1. final int nresources = ShortPacker.unpackShort(in); @@ -579,7 +597,9 @@ cause = (IndexPartitionCause)in.readObject(); - history = in.readUTF(); + if (version < VERSION2) { + /* history = */in.readUTF(); + } resources = nresources>0 ? new IResourceMetadata[nresources] : null; @@ -613,9 +633,13 @@ public void writeExternal(final ObjectOutput out) throws IOException { - ShortPacker.packShort(out, VERSION1); + ShortPacker.packShort(out, VERSION); - LongPacker.packLong(out, partitionId); + if (VERSION < VERSION2) { + LongPacker.packLong(out, partitionId); + } else { + out.writeInt(partitionId); + } out.writeInt(sourcePartitionId); // MAY be -1. @@ -640,7 +664,9 @@ out.writeObject(cause); - out.writeUTF(getTruncatedHistory()); + if (VERSION < VERSION2) { + out.writeUTF("");// getTruncatedHistory() + } /* * Note: we serialize using the IResourceMetadata interface so that we Modified: trunk/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -224,19 +224,36 @@ } + /** + * The original version. + */ private static final transient short VERSION0 = 0x0; + + /** + * The {@link #partitionId} is now 32-bits clean. + */ + private static final transient short VERSION1 = 0x0; + /** + * The current version. + */ + private static final transient short VERSION = VERSION1; + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { final short version = ShortPacker.unpackShort(in); - if (version != VERSION0) { - + if (version != VERSION0 && version != VERSION1) { + throw new IOException("Unknown version: "+version); } - partitionId = (int)LongPacker.unpackLong(in); + if (version < VERSION1) { + partitionId = (int) LongPacker.unpackLong(in); + } else { + partitionId = in.readInt(); + } dataServiceUUID = new UUID(in.readLong()/*MSB*/,in.readLong()/*LSB*/); @@ -264,9 +281,13 @@ public void writeExternal(ObjectOutput out) throws IOException { - ShortPacker.packShort(out, VERSION0); + ShortPacker.packShort(out, VERSION); - LongPacker.packLong(out, partitionId); + if (VERSION < VERSION1) { + LongPacker.packLong(out, partitionId); + } else { + out.writeInt(partitionId); + } out.writeLong(dataServiceUUID.getMostSignificantBits()); Modified: trunk/bigdata/src/java/com/bigdata/resources/CompactingMergeTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/CompactingMergeTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/CompactingMergeTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -1178,21 +1178,22 @@ currentpmd.getLeftSeparatorKey(),// currentpmd.getRightSeparatorKey(),// newResources, // - currentpmd.getIndexPartitionCause(), - currentpmd.getHistory() - + OverflowActionEnum.Merge// - + "(lastCommitTime=" - + segmentMetadata.getCreateTime()// - + ",btreeEntryCount=" - + btree.getEntryCount()// - + ",segmentEntryCount=" - + buildResult.builder.getCheckpoint().nentries// - + ",segment=" - + segmentMetadata.getUUID()// - + ",counter=" - + btree.getCounter().get()// - + ",oldResources=" - + Arrays.toString(currentResources) + ") ")); + currentpmd.getIndexPartitionCause() +// currentpmd.getHistory() +// + OverflowActionEnum.Merge// +// + "(lastCommitTime=" +// + segmentMetadata.getCreateTime()// +// + ",btreeEntryCount=" +// + btree.getEntryCount()// +// + ",segmentEntryCount=" +// + buildResult.builder.getCheckpoint().nentries// +// + ",segment=" +// + segmentMetadata.getUUID()// +// + ",counter=" +// + btree.getCounter().get()// +// + ",oldResources=" +// + Arrays.toString(currentResources) + ") " + )); // update the metadata associated with the btree btree.setIndexMetadata(indexMetadata); Modified: trunk/bigdata/src/java/com/bigdata/resources/IncrementalBuildTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/IncrementalBuildTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/IncrementalBuildTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -552,21 +552,22 @@ currentpmd.getLeftSeparatorKey(),// currentpmd.getRightSeparatorKey(),// newResources, // - currentpmd.getIndexPartitionCause(), - currentpmd.getHistory() - + OverflowActionEnum.Build// - + "(lastCommitTime=" - + segmentMetadata.getCreateTime()// - + ",segment=" - + segmentMetadata.getUUID()// - + ",#buildSources=" - + buildResult.sourceCount// - + ",merge=" - + buildResult.compactingMerge// - + ",counter=" - + btree.getCounter().get()// - + ",oldResources=" - + Arrays.toString(currentResources) + ") ")); + currentpmd.getIndexPartitionCause() +// , currentpmd.getHistory() +// + OverflowActionEnum.Build// +// + "(lastCommitTime=" +// + segmentMetadata.getCreateTime()// +// + ",segment=" +// + segmentMetadata.getUUID()// +// + ",#buildSources=" +// + buildResult.sourceCount// +// + ",merge=" +// + buildResult.compactingMerge// +// + ",counter=" +// + btree.getCounter().get()// +// + ",oldResources=" +// + Arrays.toString(currentResources) + ") " + )); // update the metadata associated with the btree btree.setIndexMetadata(indexMetadata); Modified: trunk/bigdata/src/java/com/bigdata/resources/JoinIndexPartitionTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/JoinIndexPartitionTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/JoinIndexPartitionTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -289,9 +289,10 @@ // Note: the live journal. getJournal().getResourceMetadata() // },// - IndexPartitionCause.join(resourceManager), - // new history line. - summary+" ")); + IndexPartitionCause.join(resourceManager) +// // new history line. +// , summary+" " + )); /* * Set the updated index metadata on the btree (required for it Modified: trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/MoveTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -1367,9 +1367,10 @@ // Historical writes from the source DS. historySegmentMetadata// }, - IndexPartitionCause.move(resourceManager), - // history line. - oldpmd.getHistory() + summary + " ")); + IndexPartitionCause.move(resourceManager) +// // history line. +// ,oldpmd.getHistory() + summary + " " + )); /* * Create the BTree to aborb writes for the target index Modified: trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/OverflowManager.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -2486,16 +2486,18 @@ oldpmd.getLeftSeparatorKey(),// oldpmd.getRightSeparatorKey(),// newResources, // - oldpmd.getIndexPartitionCause(), oldpmd - .getHistory() - + OverflowActionEnum.Copy - + "(lastCommitTime=" - + lastCommitTime - + ",entryCount=" - + entryCount - + ",counter=" - + oldBTree.getCounter().get() - + ") ")); + oldpmd.getIndexPartitionCause()// +// , oldpmd +// .getHistory() +// + OverflowActionEnum.Copy +// + "(lastCommitTime=" +// + lastCommitTime +// + ",entryCount=" +// + entryCount +// + ",counter=" +// + oldBTree.getCounter().get() +// + ") " + )); } else { @@ -2535,15 +2537,17 @@ oldpmd.getLeftSeparatorKey(),// oldpmd.getRightSeparatorKey(),// newResources, // - oldpmd.getIndexPartitionCause(), oldpmd - .getHistory() - + "overflow(lastCommitTime=" - + lastCommitTime - + ",entryCount=" - + entryCount - + ",counter=" - + oldBTree.getCounter().get() - + ") ")); + oldpmd.getIndexPartitionCause()// +// , oldpmd +// .getHistory() +// + "overflow(lastCommitTime=" +// + lastCommitTime +// + ",entryCount=" +// + entryCount +// + ",counter=" +// + oldBTree.getCounter().get() +// + ") " + )); } Modified: trunk/bigdata/src/java/com/bigdata/resources/SplitIndexPartitionTask.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/SplitIndexPartitionTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/SplitIndexPartitionTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -844,12 +844,12 @@ resourceManager.getLiveJournal() .getResourceMetadata(), splitResult.buildResults[i].segmentMetadata }, - IndexPartitionCause.split(resourceManager), - /* - * Note: history is record of the split. - */ - pmd.getHistory() + summary + " ")// - ); + IndexPartitionCause.split(resourceManager) +// /* +// * Note: history is record of the split. +// */ +// , pmd.getHistory() + summary + " "// + )); /* * create new btree. Modified: trunk/bigdata/src/java/com/bigdata/resources/SplitUtility.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/SplitUtility.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/SplitUtility.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -400,11 +400,12 @@ /* * Note: cause will be set by the atomic update task. */ - null,// - oldpmd.getHistory() - + "chooseTailSplitPoint(oldPartitionId=" - + oldpmd.getPartitionId() + ",nsplits=" + 2 - + ",newPartitionId=" + partitionId + ") "); + null// +// , oldpmd.getHistory() +// + "chooseTailSplitPoint(oldPartitionId=" +// + oldpmd.getPartitionId() + ",nsplits=" + 2 +// + ",newPartitionId=" + partitionId + ") " + ); final int fromIndex = 0; @@ -437,11 +438,12 @@ * Note: Cause will be set by the atomic update for the * split task. */ - null,// - oldpmd.getHistory() - + "chooseTailSplitPoint(oldPartitionId=" - + oldpmd.getPartitionId() + ",nsplits=" + 2 - + ",newPartitionId=" + partitionId + ") "); + null// +// , oldpmd.getHistory() +// + "chooseTailSplitPoint(oldPartitionId=" +// + oldpmd.getPartitionId() + ",nsplits=" + 2 +// + ",newPartitionId=" + partitionId + ") " + ); /* * Note: The index of the last tuple in the btree will be the @@ -1050,11 +1052,12 @@ /* * Note: cause will be set by the atomic update task. */ - null,// - oldpmd.getHistory() - + "chooseSplitPoint(oldPartitionId=" - + oldpmd.getPartitionId() + ",nsplits=" + N - + ",newPartitionId=" + partitionId + ") "); + null // +// , oldpmd.getHistory() +// + "chooseSplitPoint(oldPartitionId=" +// + oldpmd.getPartitionId() + ",nsplits=" + N +// + ",newPartitionId=" + partitionId + ") " + ); final Split split = new Split(newpmd, fromIndex, toIndex); Modified: trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/resources/StoreManager.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -2603,14 +2603,15 @@ getResourceMetadata() // }, // cause - IndexPartitionCause.register(resourceManager), - /* - * Note: Retains whatever history given by the - * caller. - */ - pmd.getHistory() + "register(name=" + name - + ",partitionId=" - + pmd.getPartitionId() + ") ")); + IndexPartitionCause.register(resourceManager) +// /* +// * Note: Retains whatever history given by the +// * caller. +// */ +// , pmd.getHistory() + "register(name=" + name +// + ",partitionId=" +// + pmd.getPartitionId() + ") " + )); } else { Modified: trunk/bigdata/src/java/com/bigdata/service/MetadataService.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/MetadataService.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/java/com/bigdata/service/MetadataService.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -1123,11 +1123,11 @@ * service. */ null, // [resources] Signal to the RegisterIndexTask. - null, // [cause] Signal to RegisterIndexTask - /* - * History. - */ - "createScaleOutIndex(name="+scaleOutIndexName+") " + null // [cause] Signal to RegisterIndexTask +// /* +// * History. +// */ +// ,"createScaleOutIndex(name="+scaleOutIndexName+") " )); dataServices[i].registerIndex(DataService Modified: trunk/bigdata/src/test/com/bigdata/btree/TestIndexPartitionFencePosts.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestIndexPartitionFencePosts.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/btree/TestIndexPartitionFencePosts.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -78,8 +78,8 @@ new byte[]{}, // leftSeparator null, // rightSeparator null, // no resource descriptions. - null, // no cause. - "" // history + null // no cause. +// , "" // history )); BTree ndx = BTree.create(new SimpleMemoryRawStore(),metadata); Modified: trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -638,8 +638,8 @@ new IResourceMetadata[] {// resourceManager.getLiveJournal().getResourceMetadata(), // }, // - IndexPartitionCause.register(resourceManager), - "" // history + IndexPartitionCause.register(resourceManager) +// ,"" // history )); // submit task to register the index and wait for it to complete. Modified: trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -127,8 +127,8 @@ new IResourceMetadata[] {// resourceManager.getLiveJournal().getResourceMetadata(), // }, // - IndexPartitionCause.register(resourceManager), - "" // history + IndexPartitionCause.register(resourceManager) +// "" // history )); // submit task to register the index and wait for it to complete. @@ -317,8 +317,8 @@ new IResourceMetadata[] {// resourceManager.getLiveJournal().getResourceMetadata(), // }, // - IndexPartitionCause.register(resourceManager), - "" // history + IndexPartitionCause.register(resourceManager) +// ,"" // history )); // submit task to register the index and wait for it to complete. Modified: trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask2.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask2.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask2.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -193,8 +193,8 @@ new IResourceMetadata[] {// resourceManager.getLiveJournal().getResourceMetadata(), // }, // - IndexPartitionCause.register(resourceManager), - "" // history + IndexPartitionCause.register(resourceManager) +// "" // history )); // submit task to register the index and wait for it to complete. Modified: trunk/bigdata/src/test/com/bigdata/resources/TestMergeTask.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestMergeTask.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestMergeTask.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -115,8 +115,8 @@ new IResourceMetadata[] {// resourceManager.getLiveJournal().getResourceMetadata(), // }, // - IndexPartitionCause.register(resourceManager), - "" // history + IndexPartitionCause.register(resourceManager) +// ,"" // history )); // submit task to register the index and wait for it to complete. Modified: trunk/bigdata/src/test/com/bigdata/resources/TestOverflow.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestOverflow.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestOverflow.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -121,8 +121,8 @@ new IResourceMetadata[]{ journal.getResourceMetadata() },// - IndexPartitionCause.register(resourceManager), - ""//history + IndexPartitionCause.register(resourceManager) +// ,""//history )); // create index and register on the journal. Modified: trunk/bigdata/src/test/com/bigdata/resources/TestResourceManagerBootstrap.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestResourceManagerBootstrap.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestResourceManagerBootstrap.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -652,8 +652,8 @@ new IndexPartitionCause( IndexPartitionCause.CauseEnum.Register, 0/*overflowCounter*/, System - .currentTimeMillis()/*lastCommitTime*/), - "bootstrap() "// history + .currentTimeMillis()/*lastCommitTime*/) +// ,"bootstrap() "// history )); /* Modified: trunk/bigdata/src/test/com/bigdata/resources/TestSegSplitter.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/TestSegSplitter.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata/src/test/com/bigdata/resources/TestSegSplitter.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -528,8 +528,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// , null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -664,8 +664,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// , null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -774,8 +774,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -868,8 +868,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -970,8 +970,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -1140,8 +1140,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -1229,8 +1229,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -1316,8 +1316,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -1476,8 +1476,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// ,null // history ); // Generates BTree w/ constrained keys and commits to store. @@ -1622,8 +1622,8 @@ fromKey, // toKey,// new IResourceMetadata[] { store.getResourceMetadata() }, // - null, // cause - null // history + null // cause +// null // history ); // Generates BTree w/ constrained keys and commits to store. Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -513,7 +513,7 @@ + "\tRightSeparator"// + "\tView"// + "\tCause"// - + "\tHistory"// +// + "\tHistory"// + "\tIndexMetadata"// ; @@ -610,8 +610,8 @@ + rec.detailRec.pmd.getIndexPartitionCause() + "\""); - // history - sb.append("\t\"" + rec.detailRec.pmd.getHistory() + "\""); +// // history +// sb.append("\t\"" + rec.detailRec.pmd.getHistory() + "\""); // indexMetadata sb.append("\t\"" + rec.detailRec.indexMetadata + "\""); @@ -620,8 +620,8 @@ sb.append("\tN/A"); sb.append("\tN/A"); +// sb.append("\tN/A"); sb.append("\tN/A"); - sb.append("\tN/A"); } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/TermIdEncoder.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/TermIdEncoder.java 2010-07-29 14:49:19 UTC (rev 3342) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/TermIdEncoder.java 2010-07-29 14:55:40 UTC (rev 3343) @@ -35,13 +35,13 @@ * available for bit flags in the low two-bits of the resulting long value * (they will be ZERO(0) and may be overwritten by the caller). * <p> - * The purpose of this encoding is to cause the N high bits to vary rapily + * The purpose of this encoding is to cause the N high bits to vary rapidly * as the local counter is driven up by writes on the index partition. This * has the effect of scattering writes on dependent indices (those using the * resulting long value as the sole or leading component of their key). * <p> * Given a source RDF/XML document with M "terms" distributed uniformly over - * K TERM2ID index partitions, each term has a uniform likelyhood of setting + * K TERM2ID index partitions, each term has a uniform likelihood of setting * any of the low bits of the local counter. After encoding, this means that * the N high-bits of encoded term identifier are uniformly distributed. * Assuming that the separator keys for the ID2TERM index divide the key @@ -49,7 +49,7 @@ * ID2TERM index partitions will be uniformly distributed as well. * <p> * The next bits in the encoded values are derived from the partition - * identifer followed by the term identifier and therefore have a strong + * identifier followed by the term identifier and therefore have a strong * bias for the index partition and the sequential assignment of local * counter values within an index partition respectively. This means that * read / write access within an index partition tends to have some This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-07-30 15:02:12
|
Revision: 3373 http://bigdata.svn.sourceforge.net/bigdata/?rev=3373&view=rev Author: thompsonbry Date: 2010-07-30 15:02:05 +0000 (Fri, 30 Jul 2010) Log Message: ----------- Removed KeyBuilder.asSortKey() call from com.bigdata.sparse.Schema. The same logic exists as a private static method with a private static KeyBuilder instance for backward compatibility on the SparseRowStore. Moved KeyBuilder.asSortKey() into the test suite (on TestKeyBuilder). Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/BigdataMap.java trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java trunk/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java trunk/bigdata/src/java/com/bigdata/sparse/Schema.java trunk/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java trunk/bigdata/src/test/com/bigdata/btree/AbstractTupleCursorTestCase.java trunk/bigdata/src/test/com/bigdata/btree/TestBTreeLeafCursors.java trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java trunk/bigdata/src/test/com/bigdata/btree/TestChunkedIterators.java trunk/bigdata/src/test/com/bigdata/btree/TestCopyOnWrite.java trunk/bigdata/src/test/com/bigdata/btree/TestDirtyIterators.java trunk/bigdata/src/test/com/bigdata/btree/TestIncrementalWrite.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderCacheInteraction.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithCompactingMerge.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithIncrementalBuild.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java trunk/bigdata/src/test/com/bigdata/btree/TestIndexSegmentWithBloomFilter.java trunk/bigdata/src/test/com/bigdata/btree/TestInsertLookupRemoveKeysInRootLeaf.java trunk/bigdata/src/test/com/bigdata/btree/TestIterators.java trunk/bigdata/src/test/com/bigdata/btree/TestLinearListMethods.java trunk/bigdata/src/test/com/bigdata/btree/TestMutableBTreeCursors.java trunk/bigdata/src/test/com/bigdata/btree/TestReopen.java trunk/bigdata/src/test/com/bigdata/btree/TestSplitJoinRootLeaf.java trunk/bigdata/src/test/com/bigdata/btree/TestSplitJoinThreeLevels.java trunk/bigdata/src/test/com/bigdata/btree/TestSplitRootLeaf.java trunk/bigdata/src/test/com/bigdata/btree/TestTouch.java trunk/bigdata/src/test/com/bigdata/btree/TestTransientBTree.java trunk/bigdata/src/test/com/bigdata/btree/filter/TestTupleFilters.java trunk/bigdata/src/test/com/bigdata/btree/keys/AbstractUnicodeKeyBuilderTestCase.java trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java trunk/bigdata/src/test/com/bigdata/btree/keys/TestSuccessorUtil.java trunk/bigdata/src/test/com/bigdata/btree/raba/codec/AbstractRabaCoderTestCase.java trunk/bigdata/src/test/com/bigdata/btree/raba/codec/RandomURIGenerator.java trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask.java trunk/bigdata/src/test/com/bigdata/resources/TestBuildTask2.java trunk/bigdata/src/test/com/bigdata/resources/TestMergeTask.java trunk/bigdata/src/test/com/bigdata/resources/TestOverflow.java trunk/bigdata/src/test/com/bigdata/resources/TestResourceManagerBootstrap.java trunk/bigdata/src/test/com/bigdata/resources/TestSegSplitter.java trunk/bigdata/src/test/com/bigdata/service/TestMove.java trunk/bigdata/src/test/com/bigdata/service/TestRangeQuery.java trunk/bigdata/src/test/com/bigdata/service/TestRestartSafe.java trunk/bigdata/src/test/com/bigdata/service/TestScatterSplit.java trunk/bigdata/src/test/com/bigdata/service/TestSplitJoin.java trunk/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java trunk/bigdata-jini/src/test/com/bigdata/service/jini/TestBigdataClient.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/internal/BlobOverflowHandler.java Modified: trunk/bigdata/src/java/com/bigdata/btree/BigdataMap.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/BigdataMap.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/java/com/bigdata/btree/BigdataMap.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -40,6 +40,7 @@ import com.bigdata.btree.filter.FilterConstructor; import com.bigdata.btree.filter.TupleFilter; import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.journal.ConcurrencyManager; /** @@ -71,7 +72,7 @@ * allows application keys that are instances of acceptable classes. This issue * is more critical for keys than for values since the keys define the total * index order and the default coercion rules for keys are provided by - * {@link KeyBuilder#asSortKey(Object)} which does not attenpt to partition the + * {@link TestKeyBuilder#asSortKey(Object)} which does not attenpt to partition the * key space by the application key type (keys are not safely polymorphic by * default). * <p> Modified: trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -88,7 +88,7 @@ * {@link #appendText(String, boolean, boolean)}. * </p> * - * @see KeyBuilder#asSortKey(Object) + * @see TestKeyBuilder#asSortKey(Object) * @see KeyBuilder#newInstance() * @see KeyBuilder#newUnicodeInstance() * @see KeyBuilder#newUnicodeInstance(Properties) Modified: trunk/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/java/com/bigdata/btree/keys/KeyBuilder.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -1065,78 +1065,8 @@ } - /* - * static helper methods. - */ - - /** - * Used to unbox an application key (convert it to an unsigned byte[]). - */ - static private final IKeyBuilder _keyBuilder = newUnicodeInstance(); - - /** - * Utility method converts an application key to a sort key (an unsigned - * byte[] that imposes the same sort order). - * <p> - * Note: This method is thread-safe. - * <p> - * Note: Strings are Unicode safe for the default locale. See - * {@link Locale#getDefault()}. If you require a specific local or different - * locals at different times or for different indices then you MUST - * provision and apply your own {@link KeyBuilder}. - * - * @param val - * An application key. - * - * @return The unsigned byte[] equivalent of that key. This will be - * <code>null</code> iff the <i>key</i> is <code>null</code>. If the - * <i>key</i> is a byte[], then the byte[] itself will be returned. - * - * @deprecated This method circumvents explicit configuration of the - * {@link KeyBuilder} and is used nearly exclusively by unit - * tests. While explicit configuration is not required for keys - * which do not include Unicode sort key components, this method - * also relies on a single global {@link KeyBuilder} instance - * protected by a lock. That lock is therefore a bottleneck. The - * correct practice is to use thread-local or per task - * {@link IKeyBuilder}s to avoid lock contention. - */ - @SuppressWarnings("unchecked") - public static final byte[] asSortKey(Object val) { + public byte[] getSortKey(final Object val) { - if (val == null) { - - return null; - - } - - if (val instanceof byte[]) { - - return (byte[]) val; - - } - - /* - * Synchronize on the keyBuilder to avoid concurrent modification of its - * state. - */ - - synchronized (_keyBuilder) { - - return _keyBuilder.getSortKey(val); - -// _keyBuilder.reset(); -// -// _keyBuilder.append( key ); -// -// return _keyBuilder.getKey(); - - } - - } - - public byte[] getSortKey(Object val) { - reset(); append( val ); Modified: trunk/bigdata/src/java/com/bigdata/sparse/Schema.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/sparse/Schema.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/java/com/bigdata/sparse/Schema.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -141,7 +141,7 @@ /* * One time encoding of the schema name as a Unicode sort key. */ - schemaBytes = KeyBuilder.asSortKey(name); + schemaBytes = asSortKey(name); } } @@ -501,5 +501,52 @@ + ",primaryKeyType=" + getPrimaryKeyType() + "}"; } + + /** + * Used for historical compatibility to unbox an application key (convert it + * to an unsigned byte[]). + */ + static private final IKeyBuilder _keyBuilder = KeyBuilder.newUnicodeInstance(); + + /** + * Utility method for historical compatibility converts an application key + * to a sort key (an unsigned byte[] that imposes the same sort order). + * <p> + * Note: This method is thread-safe. + * + * @param val + * An application key. + * + * @return The unsigned byte[] equivalent of that key. This will be + * <code>null</code> iff the <i>key</i> is <code>null</code>. If the + * <i>key</i> is a byte[], then the byte[] itself will be returned. + */ + @SuppressWarnings("unchecked") + private static final byte[] asSortKey(Object val) { + + if (val == null) { + + return null; + + } + + if (val instanceof byte[]) { + + return (byte[]) val; + + } + + /* + * Synchronize on the keyBuilder to avoid concurrent modification of its + * state. + */ + + synchronized (_keyBuilder) { + + return _keyBuilder.getSortKey(val); + + } + } + } Modified: trunk/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -49,6 +49,7 @@ import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.keys.KV; import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.btree.raba.IRaba; import com.bigdata.btree.raba.codec.RandomKeysGenerator; import com.bigdata.cache.HardReferenceQueue; @@ -1097,7 +1098,7 @@ assertEquals("#entries",i,btree.nentries); - final byte[] key = KeyBuilder.asSortKey(ikey); + final byte[] key = TestKeyBuilder.asSortKey(ikey); assertNull(btree.lookup(key)); @@ -1133,7 +1134,7 @@ for( int i=0; i<keys.length; i++ ) { - byte[] key = KeyBuilder.asSortKey(keys[i]); + byte[] key = TestKeyBuilder.asSortKey(keys[i]); assertEquals(entries[i],btree.lookup(key)); assertEquals(entries[i],btree.remove(key)); @@ -1216,7 +1217,7 @@ assertEquals("#entries",i,btree.nentries); - final byte[] key = KeyBuilder.asSortKey(ikey); + final byte[] key = TestKeyBuilder.asSortKey(ikey); assertNull(btree.lookup(key)); @@ -1252,7 +1253,7 @@ for( int i=0; i<keys.length; i++ ) { - final byte[] key = KeyBuilder.asSortKey(keys[i]); + final byte[] key = TestKeyBuilder.asSortKey(keys[i]); assertEquals(entries[i],btree.lookup(key)); assertEquals(entries[i],btree.remove(key)); @@ -1497,7 +1498,7 @@ assertEquals("#entries", i, btree.nentries); - final byte[] key = KeyBuilder.asSortKey(ikey); + final byte[] key = TestKeyBuilder.asSortKey(ikey); assertNull(btree.lookup(key)); @@ -1687,7 +1688,7 @@ assertEquals("#entries",i,btree.nentries); - final byte[] key = KeyBuilder.asSortKey(ikey); + final byte[] key = TestKeyBuilder.asSortKey(ikey); assertNull(btree.lookup(key)); @@ -1774,7 +1775,7 @@ final Integer ikey = keys[index]; - final byte[] key = KeyBuilder.asSortKey(ikey); + final byte[] key = TestKeyBuilder.asSortKey(ikey); final SimpleEntry val = vals[index]; @@ -1816,7 +1817,7 @@ Map.Entry<Integer,SimpleEntry> entry = itr.next(); - final byte[] tmp = KeyBuilder.asSortKey(entry.getKey()); + final byte[] tmp = TestKeyBuilder.asSortKey(entry.getKey()); assertEquals("lookup(" + entry.getKey() + ")", entry .getValue(), btree.lookup(tmp)); @@ -1855,7 +1856,7 @@ for( int i=0; i<nkeys; i++ ) { - keys[i] = KeyBuilder.asSortKey(i+1); // Note: this produces dense keys with origin ONE(1). + keys[i] = TestKeyBuilder.asSortKey(i+1); // Note: this produces dense keys with origin ONE(1). vals[i] = new SimpleEntry(); @@ -2597,7 +2598,7 @@ for (int i = 0; i < N; i++) { // @todo param governs chance of a key collision and maximum #of distinct keys. - final byte[] key = KeyBuilder.asSortKey(r.nextInt(100000)); + final byte[] key = TestKeyBuilder.asSortKey(r.nextInt(100000)); // Note: #of bytes effects very little that we want to test so we keep it small. final byte[] val = new byte[4]; Modified: trunk/bigdata/src/test/com/bigdata/btree/AbstractTupleCursorTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/AbstractTupleCursorTestCase.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/AbstractTupleCursorTestCase.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -33,7 +33,7 @@ import junit.framework.TestCase2; -import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.btree.raba.ReadOnlyKeysRaba; import com.bigdata.rawstore.SimpleMemoryRawStore; @@ -287,65 +287,65 @@ // seek to a probe key that does not exist. assertEquals(null, cursor.seek(29)); assertEquals(null, cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(29),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(29),cursor.currentKey()); assertTrue(cursor.hasNext()); assertEquals(new TestTuple<String>(30, "James"), cursor.next()); assertEquals(new TestTuple<String>(30, "James"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(30),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(30),cursor.currentKey()); assertFalse(cursor.hasNext()); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.prior()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(20),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20),cursor.currentKey()); // seek to a probe key that does not exist. assertEquals(null, cursor.seek(9)); assertEquals(null, cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(9),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(9),cursor.currentKey()); assertTrue(cursor.hasNext()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.next()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); assertTrue(cursor.hasNext()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.next()); - assertEquals(KeyBuilder.asSortKey(20),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20),cursor.currentKey()); // seek to a probe key that does not exist and scan forward. assertEquals(null, cursor.seek(19)); assertEquals(null, cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(19),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(19),cursor.currentKey()); assertTrue(cursor.hasNext()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.next()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(20),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20),cursor.currentKey()); assertTrue(cursor.hasNext()); assertEquals(new TestTuple<String>(30, "James"), cursor.next()); assertEquals(new TestTuple<String>(30, "James"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(30),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(30),cursor.currentKey()); // seek to a probe key that does not exist and scan backward. assertEquals(null, cursor.seek(19)); assertEquals(null, cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(19),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(19),cursor.currentKey()); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.prior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.tuple()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); // seek to a probe key that does not exist (after all valid tuples). assertEquals(null, cursor.seek(31)); assertEquals(null, cursor.tuple()); assertTrue(cursor.isCursorPositionDefined()); - assertEquals(KeyBuilder.asSortKey(31),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(31),cursor.currentKey()); assertFalse(cursor.hasNext()); // seek to a probe key that does not exist (after all valid tuples). assertEquals(null, cursor.seek(31)); assertEquals(null, cursor.tuple()); assertTrue(cursor.isCursorPositionDefined()); - assertEquals(KeyBuilder.asSortKey(31),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(31),cursor.currentKey()); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(30, "James"), cursor.prior()); @@ -369,9 +369,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); - final byte[] toKey = KeyBuilder.asSortKey(20); + final byte[] toKey = TestKeyBuilder.asSortKey(20); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); @@ -412,9 +412,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(20); + final byte[] fromKey = TestKeyBuilder.asSortKey(20); - final byte[] toKey = KeyBuilder.asSortKey(30); + final byte[] toKey = TestKeyBuilder.asSortKey(30); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); @@ -458,9 +458,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); - final byte[] toKey = KeyBuilder.asSortKey(19); + final byte[] toKey = TestKeyBuilder.asSortKey(19); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); @@ -471,7 +471,7 @@ // assertEquals(KeyBuilder.asSortKey(19),cursor.currentKey()); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); } @@ -481,19 +481,19 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); - final byte[] toKey = KeyBuilder.asSortKey(29); + final byte[] toKey = TestKeyBuilder.asSortKey(29); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(20),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20),cursor.currentKey()); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); } @@ -503,16 +503,16 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); - final byte[] toKey = KeyBuilder.asSortKey(11); + final byte[] toKey = TestKeyBuilder.asSortKey(11); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); } @@ -574,15 +574,15 @@ assertEquals("ntuples", 5, btree.getEntryCount()); // The separator key is (30). - assertEquals(KeyBuilder.asSortKey(30), ((Node) btree.getRoot()) + assertEquals(TestKeyBuilder.asSortKey(30), ((Node) btree.getRoot()) .getKeys().get(0)); // Verify the expected keys in the 1st leaf. AbstractBTreeTestCase.assertKeys( // new ReadOnlyKeysRaba(new byte[][] {// - KeyBuilder.asSortKey(10), // - KeyBuilder.asSortKey(20), // + TestKeyBuilder.asSortKey(10), // + TestKeyBuilder.asSortKey(20), // }),// ((Node) btree.getRoot()).getChild(0/* 1st leaf */).getKeys()); @@ -590,9 +590,9 @@ AbstractBTreeTestCase.assertKeys( // new ReadOnlyKeysRaba(new byte[][] {// - KeyBuilder.asSortKey(30), // - KeyBuilder.asSortKey(40), // - KeyBuilder.asSortKey(50),// + TestKeyBuilder.asSortKey(30), // + TestKeyBuilder.asSortKey(40), // + TestKeyBuilder.asSortKey(50),// }),// ((Node) btree.getRoot()).getChild(1/* 2nd leaf */).getKeys()); @@ -627,16 +627,16 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); - final byte[] toKey = KeyBuilder.asSortKey(30); + final byte[] toKey = TestKeyBuilder.asSortKey(30); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(20),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20),cursor.currentKey()); } @@ -647,16 +647,16 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(0); + final byte[] fromKey = TestKeyBuilder.asSortKey(0); - final byte[] toKey = KeyBuilder.asSortKey(19); + final byte[] toKey = TestKeyBuilder.asSortKey(19); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(10, "Bryan"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(10),cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(10),cursor.currentKey()); assertFalse(cursor.hasPrior()); } @@ -668,9 +668,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(0); + final byte[] fromKey = TestKeyBuilder.asSortKey(0); - final byte[] toKey = KeyBuilder.asSortKey(9); + final byte[] toKey = TestKeyBuilder.asSortKey(9); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); @@ -698,7 +698,7 @@ * Verify that the separatorKey in the parent is the first tuple we * expect to find in the 2nd leaf. */ - assertEquals(KeyBuilder.asSortKey(30), ((Node) btree.getRoot()) + assertEquals(TestKeyBuilder.asSortKey(30), ((Node) btree.getRoot()) .getKeys().get(0)); /* @@ -711,29 +711,29 @@ // Remove the first tuple in the 2nd leaf. btree.remove(30); // The separator key has not been changed. - assertEquals(((Node) btree.getRoot()).getKeys().get(0), KeyBuilder + assertEquals(((Node) btree.getRoot()).getKeys().get(0), TestKeyBuilder .asSortKey(30)); // The #of leaves has not been changed. assertEquals(2, btree.getLeafCount()); // Verify the expected keys in the 2nd leaf. AbstractBTreeTestCase.assertKeys(// new ReadOnlyKeysRaba(new byte[][]{// - KeyBuilder.asSortKey(40),// - KeyBuilder.asSortKey(50),// + TestKeyBuilder.asSortKey(40),// + TestKeyBuilder.asSortKey(50),// }),// ((Node) btree.getRoot()).getChild(1/*2nd leaf*/).getKeys()); - final byte[] fromKey = KeyBuilder.asSortKey(10); + final byte[] fromKey = TestKeyBuilder.asSortKey(10); // search for the tuple we just deleted from the 2nd leaf. - final byte[] toKey = KeyBuilder.asSortKey(30); + final byte[] toKey = TestKeyBuilder.asSortKey(30); final ITupleCursor2<String> cursor = newCursor(btree, IRangeQuery.DEFAULT, fromKey, toKey); assertTrue(cursor.hasPrior()); assertEquals(new TestTuple<String>(20, "Mike"), cursor.prior()); - assertEquals(KeyBuilder.asSortKey(20), cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(20), cursor.currentKey()); assertTrue(cursor.hasPrior()); } @@ -862,9 +862,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(2); + final byte[] fromKey = TestKeyBuilder.asSortKey(2); - final byte[] toKey = KeyBuilder.asSortKey(7); + final byte[] toKey = TestKeyBuilder.asSortKey(7); // first() { @@ -1107,7 +1107,7 @@ assertNull(cursor.seek(1)); - assertEquals(KeyBuilder.asSortKey(1), cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.currentKey()); assertFalse(cursor.hasPrior()); @@ -1141,9 +1141,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(5); + final byte[] fromKey = TestKeyBuilder.asSortKey(5); - final byte[] toKey = KeyBuilder.asSortKey(9); + final byte[] toKey = TestKeyBuilder.asSortKey(9); // first() { @@ -1237,7 +1237,7 @@ assertNull(cursor.seek(7)); - assertEquals(KeyBuilder.asSortKey(7), cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(7), cursor.currentKey()); assertFalse(cursor.hasPrior()); @@ -1254,9 +1254,9 @@ */ { - final byte[] fromKey = KeyBuilder.asSortKey(15); + final byte[] fromKey = TestKeyBuilder.asSortKey(15); - final byte[] toKey = KeyBuilder.asSortKey(19); + final byte[] toKey = TestKeyBuilder.asSortKey(19); // first() { @@ -1338,7 +1338,7 @@ assertNull(cursor.seek(17)); - assertEquals(KeyBuilder.asSortKey(17), cursor.currentKey()); + assertEquals(TestKeyBuilder.asSortKey(17), cursor.currentKey()); assertFalse(cursor.hasPrior()); Modified: trunk/bigdata/src/test/com/bigdata/btree/TestBTreeLeafCursors.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestBTreeLeafCursors.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestBTreeLeafCursors.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -33,7 +33,7 @@ import junit.framework.TestCase2; import com.bigdata.btree.BTree.Stack; -import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.rawstore.SimpleMemoryRawStore; /** @@ -125,56 +125,56 @@ ILeafCursor<Leaf> cursor = btree.newLeafCursor(SeekEnum.First); // verify first leaf since that is where we positioned the cursor. - assertEquals(KeyBuilder.asSortKey(1), cursor.leaf().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.leaf().getKeys().get(0)); // first(). - assertEquals(KeyBuilder.asSortKey(1), cursor.first().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.first().getKeys().get(0)); // last(). - assertEquals(KeyBuilder.asSortKey(9), cursor.last().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9), cursor.last().getKeys().get(0)); } public void test_seek() { - ILeafCursor<Leaf> cursor = btree.newLeafCursor(KeyBuilder.asSortKey(5)); + ILeafCursor<Leaf> cursor = btree.newLeafCursor(TestKeyBuilder.asSortKey(5)); // verify initial seek. - assertEquals(KeyBuilder.asSortKey(5), cursor.leaf().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(5), cursor.leaf().getKeys().get(0)); // verify seek to each key found in the B+Tree. - assertEquals(KeyBuilder.asSortKey(1), cursor.seek( - KeyBuilder.asSortKey(1)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.seek( + TestKeyBuilder.asSortKey(1)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(1), cursor.seek( - KeyBuilder.asSortKey(2)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.seek( + TestKeyBuilder.asSortKey(2)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(3), cursor.seek( - KeyBuilder.asSortKey(3)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(3), cursor.seek( + TestKeyBuilder.asSortKey(3)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(3), cursor.seek( - KeyBuilder.asSortKey(4)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(3), cursor.seek( + TestKeyBuilder.asSortKey(4)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(5), cursor.seek( - KeyBuilder.asSortKey(5)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(5), cursor.seek( + TestKeyBuilder.asSortKey(5)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(5), cursor.seek( - KeyBuilder.asSortKey(6)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(5), cursor.seek( + TestKeyBuilder.asSortKey(6)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(7), cursor.seek( - KeyBuilder.asSortKey(7)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(7), cursor.seek( + TestKeyBuilder.asSortKey(7)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(7), cursor.seek( - KeyBuilder.asSortKey(8)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(7), cursor.seek( + TestKeyBuilder.asSortKey(8)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(9), cursor.seek( - KeyBuilder.asSortKey(9)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9), cursor.seek( + TestKeyBuilder.asSortKey(9)).getKeys().get(0)); - assertEquals(KeyBuilder.asSortKey(9), cursor.seek( - KeyBuilder.asSortKey(10)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9), cursor.seek( + TestKeyBuilder.asSortKey(10)).getKeys().get(0)); // verify seek to key that would be in the last leaf but is not actually in the B+Tree. - assertEquals(KeyBuilder.asSortKey(9),cursor.seek(KeyBuilder.asSortKey(12)).getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9),cursor.seek(TestKeyBuilder.asSortKey(12)).getKeys().get(0)); } @@ -184,19 +184,19 @@ ILeafCursor<Leaf> cursor = btree.newLeafCursor(SeekEnum.First); // verify first leaf since that is where we positioned the cursor. - assertEquals(KeyBuilder.asSortKey(1), cursor.leaf().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.leaf().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(3), cursor.next().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(3), cursor.next().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(5), cursor.next().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(5), cursor.next().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(7), cursor.next().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(7), cursor.next().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(9), cursor.next().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9), cursor.next().getKeys().get(0)); } @@ -205,19 +205,19 @@ ILeafCursor<Leaf> cursor = btree.newLeafCursor(SeekEnum.Last); // verify last leaf since that is where we positioned the cursor. - assertEquals(KeyBuilder.asSortKey(9), cursor.leaf().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(9), cursor.leaf().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(7), cursor.prior().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(7), cursor.prior().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(5), cursor.prior().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(5), cursor.prior().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(3), cursor.prior().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(3), cursor.prior().getKeys().get(0)); // next(). - assertEquals(KeyBuilder.asSortKey(1), cursor.prior().getKeys().get(0)); + assertEquals(TestKeyBuilder.asSortKey(1), cursor.prior().getKeys().get(0)); } @@ -247,7 +247,7 @@ for (int i = 1; i <= 10; i++) { - btree.insert(KeyBuilder.asSortKey(i), "v"+i); + btree.insert(TestKeyBuilder.asSortKey(i), "v"+i); } Modified: trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestBigdataMap.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -39,7 +39,7 @@ import com.bigdata.btree.keys.DefaultKeyBuilderFactory; import com.bigdata.btree.keys.IKeyBuilderFactory; -import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.io.SerializerUtil; import com.bigdata.rawstore.SimpleMemoryRawStore; @@ -163,7 +163,7 @@ * Handles {@link String} keys and values and makes the keys available for * {@link BigdataMap} and {@link BigdataSet} (under the assumption that the * key and the value are the same!). The actual index order is governed by - * {@link KeyBuilder#asSortKey(Object)}. + * {@link TestKeyBuilder#asSortKey(Object)}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ Modified: trunk/bigdata/src/test/com/bigdata/btree/TestChunkedIterators.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestChunkedIterators.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestChunkedIterators.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -36,6 +36,7 @@ import com.bigdata.btree.filter.TupleFilter; import com.bigdata.btree.keys.DefaultKeyBuilderFactory; import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; import com.bigdata.io.SerializerUtil; import com.bigdata.rawstore.IBlock; import com.bigdata.rawstore.SimpleMemoryRawStore; @@ -109,7 +110,7 @@ for(int i=0; i<nentries; i++) { - keys[i] = KeyBuilder.asSortKey(i); + keys[i] = TestKeyBuilder.asSortKey(i); vals[i] = new byte[4]; @@ -203,7 +204,7 @@ for(int i=0; i<nentries; i++) { - keys[i] = KeyBuilder.asSortKey(i); + keys[i] = TestKeyBuilder.asSortKey(i); vals[i] = new byte[4]; @@ -337,7 +338,7 @@ for(int i=0; i<nentries; i++) { - keys[i] = KeyBuilder.asSortKey(i); + keys[i] = TestKeyBuilder.asSortKey(i); vals[i] = new byte[4]; @@ -425,7 +426,7 @@ for(int i=0; i<nentries; i++) { - keys[i] = KeyBuilder.asSortKey(i); + keys[i] = TestKeyBuilder.asSortKey(i); vals[i] = new byte[4]; Modified: trunk/bigdata/src/test/com/bigdata/btree/TestCopyOnWrite.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestCopyOnWrite.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestCopyOnWrite.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -29,7 +29,7 @@ import org.apache.log4j.Level; -import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; /** * Test suite for copy-on-write semantics. Among other things the tests in this @@ -79,12 +79,12 @@ SimpleEntry v9 = new SimpleEntry(9); // fill up the root leaf. - btree.insert(KeyBuilder.asSortKey(3), v3); - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); final Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -100,8 +100,8 @@ * split another leaf so that there are now three children to visit. at * this point the root is full. */ - btree.insert(KeyBuilder.asSortKey(1), v1); - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(1), v1); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); @@ -141,7 +141,7 @@ * triggers copy-on-write for (a). (a1) is dirty as a post-condition. * (d) is deleted as a post-condition. */ - assertEquals(v1,btree.remove(KeyBuilder.asSortKey(1))); + assertEquals(v1,btree.remove(TestKeyBuilder.asSortKey(1))); assertKeys(new int[]{7},c); assertNotSame(a,c.getChild(0)); final Leaf a1 = (Leaf)c.getChild(0); @@ -160,7 +160,7 @@ * insert a key that will go into (b). since (b) is immutable this * triggers copy-on-write. */ - btree.insert(KeyBuilder.asSortKey(8),v8); + btree.insert(TestKeyBuilder.asSortKey(8),v8); assertKeys(new int[]{7},c); assertEquals(a1,c.getChild(0)); assertNotSame(b,c.getChild(1)); @@ -194,7 +194,7 @@ * (b1) is clean, so it is stolen by setting its parent reference * to the new (c1). */ - assertEquals(v2,btree.remove(KeyBuilder.asSortKey(2))); + assertEquals(v2,btree.remove(TestKeyBuilder.asSortKey(2))); assertNotSame(c,btree.root); final Node c1 = (Node)btree.root; assertKeys(new int[]{7},c1); @@ -245,14 +245,14 @@ * copy-on-write. We verify that the root leaf reference is changed. */ assertEquals(a,btree.root); - btree.insert(KeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(3), v3); assertNotSame(a,btree.root); a = (Leaf)btree.root; // new reference for the root leaf. - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -289,7 +289,7 @@ assertTrue(a.isPersistent()); assertTrue(b.isPersistent()); assertTrue(c.isPersistent()); - btree.insert(KeyBuilder.asSortKey(1), v1); // triggers copy on write for (a) and (c). + btree.insert(TestKeyBuilder.asSortKey(1), v1); // triggers copy on write for (a) and (c). assertNotSame(c,btree.root); c = (Node)btree.root; assertNotSame(a,c.getChild(0)); @@ -300,7 +300,7 @@ assertTrue(b.isPersistent()); assertFalse(c.isPersistent()); // insert more until we split another leaf. - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); // the new leaf (d). @@ -323,8 +323,8 @@ * cause another leaf (d) to split, forcing the split to propagate to and * split the root and the tree to increase in height. */ - btree.insert(KeyBuilder.asSortKey(4), v4); - btree.insert(KeyBuilder.asSortKey(6), v6); + btree.insert(TestKeyBuilder.asSortKey(4), v4); + btree.insert(TestKeyBuilder.asSortKey(6), v6); // btree.dump(Level.DEBUG,System.err); assertNotSame(c,btree.root); final Node g = (Node)btree.root; @@ -365,7 +365,7 @@ * the following are cloned: d, c, g. * the following clean children are stolen: e, b (by the new root c). */ - assertEquals(v4,btree.remove(KeyBuilder.asSortKey(4))); + assertEquals(v4,btree.remove(TestKeyBuilder.asSortKey(4))); assertNotSame(g,btree.root); assertNotSame(c,btree.root); c = (Node) btree.root; @@ -393,7 +393,7 @@ * remove a key (7) from a leaf (b) forcing two leaves (b,e) to join * into (b) */ - assertEquals(v7,btree.remove(KeyBuilder.asSortKey(7))); + assertEquals(v7,btree.remove(TestKeyBuilder.asSortKey(7))); btree.dump(Level.DEBUG,System.err); assertKeys(new int[]{5},c); assertEquals(d,c.getChild(0)); @@ -421,16 +421,16 @@ assertEquals(c,btree.root); assertEquals(d,c.getChild(0)); assertEquals(b,c.getChild(1)); - assertEquals(v3, btree.remove(KeyBuilder.asSortKey(3))); // remove from (d) + assertEquals(v3, btree.remove(TestKeyBuilder.asSortKey(3))); // remove from (d) assertNotSame(c,btree.root); // c was cloned. c = (Node) btree.root; assertNotSame(d,c.getChild(0)); d = (Leaf)c.getChild(0); // d was cloned. assertEquals(b,c.getChild(1)); - assertEquals(v5,btree.remove(KeyBuilder.asSortKey(5))); // remove from (b) + assertEquals(v5,btree.remove(TestKeyBuilder.asSortKey(5))); // remove from (b) assertNotSame(b,c.getChild(1)); b = (Leaf)c.getChild(1); // b was cloned. - assertEquals(v6,btree.remove(KeyBuilder.asSortKey(6))); // remove from (b) + assertEquals(v6,btree.remove(TestKeyBuilder.asSortKey(6))); // remove from (b) assertKeys(new int[]{1,2,9},b); assertValues(new Object[]{v1,v2,v9}, b); assertTrue(d.isDeleted()); Modified: trunk/bigdata/src/test/com/bigdata/btree/TestDirtyIterators.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestDirtyIterators.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestDirtyIterators.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -29,7 +29,7 @@ import org.apache.log4j.Level; -import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.btree.keys.TestKeyBuilder; /** * Test suite for iterators that visit only dirty nodes or leaves. This test @@ -82,12 +82,12 @@ SimpleEntry v9 = new SimpleEntry(9); // fill up the root leaf. - btree.insert(KeyBuilder.asSortKey(3), v3); - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); final Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -109,8 +109,8 @@ * split another leaf so that there are now three children to visit. at * this point the root is full. */ - btree.insert(KeyBuilder.asSortKey(1), v1); - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(1), v1); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); @@ -135,7 +135,7 @@ * remove a key from a leaf forcing two leaves to join and verify the * visitation order. */ - assertEquals(v1,btree.remove(KeyBuilder.asSortKey(1))); + assertEquals(v1,btree.remove(TestKeyBuilder.asSortKey(1))); assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); assertEquals(b,c.getChild(1)); @@ -187,12 +187,12 @@ SimpleEntry v9 = new SimpleEntry(9); // fill up the root leaf. - btree.insert(KeyBuilder.asSortKey(3), v3); - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); final Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -212,8 +212,8 @@ * split another leaf so that there are now three children to visit. at * this point the root is full. */ - btree.insert(KeyBuilder.asSortKey(1), v1); - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(1), v1); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); @@ -258,7 +258,7 @@ * visitation order. this triggers copy-on-write for (a) and (a) is * dirty as a post-condition. */ - assertEquals(v1,btree.remove(KeyBuilder.asSortKey(1))); + assertEquals(v1,btree.remove(TestKeyBuilder.asSortKey(1))); assertKeys(new int[]{7},c); assertNotSame(a,c.getChild(0)); Leaf a1 = (Leaf)c.getChild(0); @@ -279,7 +279,7 @@ * insert a key that will go into (b). since (b) is immutable this * triggers copy-on-write. */ - btree.insert(KeyBuilder.asSortKey(8),v8); + btree.insert(TestKeyBuilder.asSortKey(8),v8); assertKeys(new int[]{7},c); assertEquals(a1,c.getChild(0)); assertNotSame(b,c.getChild(1)); @@ -313,7 +313,7 @@ * remove a key from (a1). since (a1) is immutable this triggers * copy-on-write. since the root is immtuable, it is also copied. */ - assertEquals(v2,btree.remove(KeyBuilder.asSortKey(2))); + assertEquals(v2,btree.remove(TestKeyBuilder.asSortKey(2))); assertNotSame(c,btree.root); Node c1 = (Node)btree.root; assertKeys(new int[]{7},c1); @@ -368,12 +368,12 @@ .postOrderNodeIterator(true)); // fill up the root leaf. - btree.insert(KeyBuilder.asSortKey(3), v3); - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); final Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -393,8 +393,8 @@ * split another leaf so that there are now three children to visit. at * this point the root is full. */ - btree.insert(KeyBuilder.asSortKey(1), v1); - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(1), v1); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); @@ -416,8 +416,8 @@ * cause another leaf (d) to split, forcing the split to propagate to and * split the root and the tree to increase in height. */ - btree.insert(KeyBuilder.asSortKey(4), v4); - btree.insert(KeyBuilder.asSortKey(6), v6); + btree.insert(TestKeyBuilder.asSortKey(4), v4); + btree.insert(TestKeyBuilder.asSortKey(6), v6); // btree.dump(Level.DEBUG,System.err); assertNotSame(c,btree.root); final Node g = (Node)btree.root; @@ -450,7 +450,7 @@ * be deleted. this causes (c,f) to merge as well, which in turn forces * the root to be replaced by (c). */ - assertEquals(v4,btree.remove(KeyBuilder.asSortKey(4))); + assertEquals(v4,btree.remove(TestKeyBuilder.asSortKey(4))); // btree.dump(Level.DEBUG,System.err); assertKeys(new int[]{5,7},c); assertEquals(d,c.getChild(0)); @@ -474,7 +474,7 @@ * remove a key (7) from a leaf (b) forcing two leaves to join and * verify the visitation order. */ - assertEquals(v7,btree.remove(KeyBuilder.asSortKey(7))); + assertEquals(v7,btree.remove(TestKeyBuilder.asSortKey(7))); btree.dump(Level.DEBUG,System.err); assertKeys(new int[]{5},c); assertEquals(d,c.getChild(0)); @@ -495,9 +495,9 @@ * remove keys from a leaf forcing the remaining two leaves to join and * verify the visitation order. */ - assertEquals(v3,btree.remove(KeyBuilder.asSortKey(3))); - assertEquals(v5,btree.remove(KeyBuilder.asSortKey(5))); - assertEquals(v6,btree.remove(KeyBuilder.asSortKey(6))); + assertEquals(v3,btree.remove(TestKeyBuilder.asSortKey(3))); + assertEquals(v5,btree.remove(TestKeyBuilder.asSortKey(5))); + assertEquals(v6,btree.remove(TestKeyBuilder.asSortKey(6))); assertKeys(new int[]{1,2,9},b); assertValues(new Object[]{v1,v2,v9}, b); assertTrue(d.isDeleted()); @@ -560,18 +560,18 @@ * and verify that both iterators now visit the root. */ assertEquals(a,btree.root); - btree.insert(KeyBuilder.asSortKey(3), v3); + btree.insert(TestKeyBuilder.asSortKey(3), v3); assertNotSame(a,btree.root); a = (Leaf)btree.root; // new reference for the root leaf. assertSameIterator(new IAbstractNode[] { btree.root }, btree.root .postOrderNodeIterator(false)); assertSameIterator(new IAbstractNode[] { btree.root }, btree.root .postOrderNodeIterator(true)); - btree.insert(KeyBuilder.asSortKey(5), v5); - btree.insert(KeyBuilder.asSortKey(7), v7); + btree.insert(TestKeyBuilder.asSortKey(5), v5); + btree.insert(TestKeyBuilder.asSortKey(7), v7); // split the root leaf. - btree.insert(KeyBuilder.asSortKey(9), v9); + btree.insert(TestKeyBuilder.asSortKey(9), v9); Node c = (Node) btree.root; assertKeys(new int[]{7},c); assertEquals(a,c.getChild(0)); @@ -618,7 +618,7 @@ assertTrue(a.isPersistent()); assertTrue(b.isPersistent()); assertTrue(c.isPersistent()); - btree.insert(KeyBuilder.asSortKey(1), v1); // triggers copy on write for (a) and (c). + btree.insert(TestKeyBuilder.asSortKey(1), v1); // triggers copy on write for (a) and (c). assertNotSame(c,btree.root); c = (Node)btree.root; assertNotSame(a,c.getChild(0)); @@ -627,7 +627,7 @@ assertFalse(a.isPersistent()); assertTrue(b.isPersistent()); assertFalse(c.isPersistent()); - btree.insert(KeyBuilder.asSortKey(2), v2); + btree.insert(TestKeyBuilder.asSortKey(2), v2); assertKeys(new int[]{3,7},c); assertEquals(a,c.getChild(0)); Leaf d = (Leaf)c.getChild(1); @@ -652,8 +652,8 @@ * cause another leaf (d) to split, forcing the split to propagate to and * split the root and the tree to increase in height. */ - btree.insert(KeyBuilder.asSortKey(4), v4); - btree.insert(KeyBuilder.asSortKey(6), v6); + btree.insert(TestKeyBuilder.asSortKey(4), v4); + btree.insert(TestKeyBuilder.asSortKey(6), v6); // btree.dump(Level.DEBUG,System.err); assertNotSame(c,btree.root); final Node g = (Node)btree.root; @@ -715,7 +715,7 @@ * * the following are cloned: d, c, g. */ - assertEquals(v4,btree.remove(KeyBuilder.asSortKey(4))); + assertEquals(v4,btree.remove(TestKeyBuilder.asSortKey(4))); assertNotSame(g,btree.root); assertNotSame(c,btree.root); c = (Node) btree.root; @@ -745,7 +745,7 @@ * remove a key (7) from a leaf (b) forcing two leaves (b,e) into (b) to * join and verify the visitation order. */ - assertEquals(v7,btree.remove(KeyBuilder.asSortKey(7))); + assertEquals(v7,btree.remove(TestKeyBuilder.asSortKey(7))); btree.dump(Level.DEBUG,System.err); assertKeys(new int[]{5},c); assertEquals(d,c.getChild(0)); @@ -779,16 +779,16 @@ assertEquals(c,btree.root); assertEquals(d,c.getChild(0)); assertEquals(b,c.getChild(1)); - assertEquals(v3, btree.remove(KeyBuilder.asSortKey(3))); // remove from (d) + assertEquals(v3, btree.remove(TestKeyBuilder.asSortKey(3))); // remove from (d) assertNotSame(c,btree.root); // c was cloned. c = (Node) btree.root; assertNotSame(d,c.getChild(0)); d = (Leaf)c.getChild(0); // d was cloned. assertEquals(b,c.getChild(1)); - assertEquals(v5,btree.remove(KeyBuilder.asSortKey(5))); // remove from (b) + assertEquals(v5,btree.remove(TestKeyBuilder.asSortKey(5))); // remove from (b) assertNotSame(b,c.getChild(1)); b = (Leaf)c.getChild(1); // b was cloned. - assertEquals(v6,btree.remove(KeyBuilder.asSortKey(6))); // remove from (b) + assertEquals(v6,btree.remove(TestKeyBuilder.asSortKey(6))); // remove from (b) assertKeys(new int[]{1,2,9},b); assertValues(new Object[]{v1,v2,v9}, b); assertTrue(d.isDeleted()); Modified: trunk/bigdata/src/test/com/bigdata/btree/TestIncrementalWrite.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/TestIncrementalWrite.java 2010-07-30 13:51:43 UTC (rev 3372) +++ trunk/bigdata/src/test/com/bigdata/btree/TestIncrementalWrite.java 2010-07-30 15:02:05 UTC (rev 3373) @@ -29,7 ... [truncated message content] |
From: <tho...@us...> - 2010-07-30 18:49:44
|
Revision: 3376 http://bigdata.svn.sourceforge.net/bigdata/?rev=3376&view=rev Author: thompsonbry Date: 2010-07-30 18:49:37 +0000 (Fri, 30 Jul 2010) Log Message: ----------- Modified IKeyBuilder to fill in the generic on the ISortKeyBuilder interface as "Object" and removed the @SuppressWarning() from Schema's asSortKey() implementation and the implementation in TestSortKeyBuilder. Added 'final' to a method signature in IVUtility. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java trunk/bigdata/src/java/com/bigdata/sparse/Schema.java trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java Modified: trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-07-30 18:20:18 UTC (rev 3375) +++ trunk/bigdata/src/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-07-30 18:49:37 UTC (rev 3376) @@ -97,7 +97,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public interface IKeyBuilder extends ISortKeyBuilder { +public interface IKeyBuilder extends ISortKeyBuilder<Object> { /** * The #of bytes of data in the key. Modified: trunk/bigdata/src/java/com/bigdata/sparse/Schema.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/sparse/Schema.java 2010-07-30 18:20:18 UTC (rev 3375) +++ trunk/bigdata/src/java/com/bigdata/sparse/Schema.java 2010-07-30 18:49:37 UTC (rev 3376) @@ -521,8 +521,7 @@ * <code>null</code> iff the <i>key</i> is <code>null</code>. If the * <i>key</i> is a byte[], then the byte[] itself will be returned. */ - @SuppressWarnings("unchecked") - private static final byte[] asSortKey(Object val) { + private static final byte[] asSortKey(final Object val) { if (val == null) { Modified: trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java 2010-07-30 18:20:18 UTC (rev 3375) +++ trunk/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java 2010-07-30 18:49:37 UTC (rev 3376) @@ -2541,8 +2541,7 @@ * <code>null</code> iff the <i>key</i> is <code>null</code>. If the * <i>key</i> is a byte[], then the byte[] itself will be returned. */ - @SuppressWarnings("unchecked") - public static final byte[] asSortKey(Object val) { + public static final byte[] asSortKey(final Object val) { if (val == null) { Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2010-07-30 18:20:18 UTC (rev 3375) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2010-07-30 18:49:37 UTC (rev 3376) @@ -398,7 +398,7 @@ * @return * the IV */ - public static final IV fromString(String s) { + public static final IV fromString(final String s) { if (s.startsWith("TermId")) { char type = s.charAt(s.length()-2); long tid = Long.valueOf(s.substring(7, s.length()-2)); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-08-02 00:49:46
|
Revision: 3383 http://bigdata.svn.sourceforge.net/bigdata/?rev=3383&view=rev Author: thompsonbry Date: 2010-08-02 00:49:40 +0000 (Mon, 02 Aug 2010) Log Message: ----------- Added logic to NanoSparqlServer to optionally include the namespace for all triple stores when the URL query parameter "showNamespaces" is specified. Added logic to NanoSparqlServer to show all properties for the specified namespace. Conditional logging in TPS. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/sparse/TPS.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Modified: trunk/bigdata/src/java/com/bigdata/sparse/TPS.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/sparse/TPS.java 2010-08-01 18:53:21 UTC (rev 3382) +++ trunk/bigdata/src/java/com/bigdata/sparse/TPS.java 2010-08-02 00:49:40 UTC (rev 3383) @@ -53,9 +53,9 @@ protected static final transient Logger log = Logger.getLogger(TPS.class); - protected static final transient boolean INFO = log.isInfoEnabled(); - - protected static final transient boolean DEBUG = log.isDebugEnabled(); +// protected static final transient boolean INFO = log.isInfoEnabled(); +// +// protected static final transient boolean DEBUG = log.isDebugEnabled(); /** * @@ -220,7 +220,7 @@ if (tpv != null) { - if (INFO) + if (log.isInfoEnabled()) log.info("Exact timestamp match: name=" + name + (timestamp == Long.MAX_VALUE ? ", current value" @@ -271,14 +271,14 @@ * encountered by the iterator. */ - if (INFO) + if (log.isInfoEnabled()) log.info("No match: name=" + name); return new TPV(schema, name, 0L, null); } - if (INFO) + if (log.isInfoEnabled()) log.info("Most recent match: name=" + name + ", value=" + last.value + ", timestamp=" + last.timestamp); @@ -335,7 +335,7 @@ */ public TPS currentRow(final INameFilter filter) { - if(DEBUG) { + if(log.isDebugEnabled()) { log.debug("filter=" + filter + ", preFilter=" + this); @@ -352,7 +352,7 @@ if (filter != null && !filter.accept(tpv.name)) { - if(DEBUG) { + if(log.isDebugEnabled()) { log.debug("rejecting on filter: "+tpv); @@ -367,7 +367,7 @@ // remove binding. final TPV old = m.remove(tpv.name); - if (DEBUG && old != null) { + if (log.isDebugEnabled() && old != null) { log.debug("removed binding: " + old); @@ -378,7 +378,7 @@ // (over)write binding. final TPV old = m.put(tpv.name, tpv); - if (DEBUG && old != null) { + if (log.isDebugEnabled() && old != null) { log.debug("overwrote: \nold=" + old + "\nnew=" + tpv); @@ -408,7 +408,7 @@ } - if(DEBUG) { + if(log.isDebugEnabled()) { log.debug("postFilter: "+tps); @@ -453,7 +453,7 @@ if (toTime <= fromTime) throw new IllegalArgumentException(); - if(DEBUG) { + if(log.isDebugEnabled()) { log.debug("fromTime=" + fromTime + ", toTime=" + toTime + ", filter=" + filter + ", preFilter=" + this); @@ -479,7 +479,7 @@ // Outside of the half-open range. - if (DEBUG) { + if (log.isDebugEnabled()) { log.debug("rejecting on timestamp: " + tp); @@ -491,7 +491,7 @@ if (filter != null && !filter.accept(tp.name)) { - if (DEBUG) { + if (log.isDebugEnabled()) { log.debug("rejecting on filter: " + tp); @@ -506,7 +506,7 @@ } - if(DEBUG) { + if(log.isDebugEnabled()) { log.debug("postFilter: "+tps); @@ -736,7 +736,7 @@ // #of tuples. final int n = in.readInt(); - if (INFO) + if (log.isDebugEnabled()) log.info("n=" + n + ", schema=" + schema); for (int i = 0; i < n; i++) { @@ -761,7 +761,7 @@ tuples.put(new TP(name, timestamp), tpv); - if (INFO) + if (log.isDebugEnabled()) log.info("tuple: name=" + name + ", timestamp=" + timestamp + ", value=" + value); @@ -899,7 +899,7 @@ public String toString() { - return "TPS{name="+name+",timestamp="+timestamp+",value="+value+"}"; + return "TPV{name="+name+",timestamp="+timestamp+",value="+value+"}"; } Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-08-01 18:53:21 UTC (rev 3382) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-08-02 00:49:40 UTC (rev 3383) @@ -43,6 +43,9 @@ import java.util.Date; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.TreeMap; import java.util.Vector; @@ -84,8 +87,10 @@ import com.bigdata.rdf.sail.bench.NanoSparqlClient.QueryType; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.AbstractResource; +import com.bigdata.relation.RelationSchema; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.jini.JiniClient; +import com.bigdata.sparse.ITPS; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.util.httpd.AbstractHTTPD; import com.bigdata.util.httpd.NanoHTTPD; @@ -226,6 +231,52 @@ } + /** + * Return a list of the registered {@link AbstractTripleStore}s. + */ + protected List<String> getNamespaces() { + + // the triple store namespaces. + final List<String> namespaces = new LinkedList<String>(); + + // scan the relation schema in the global row store. + final Iterator<ITPS> itr = (Iterator<ITPS>) indexManager + .getGlobalRowStore().rangeIterator(RelationSchema.INSTANCE); + + while (itr.hasNext()) { + + // A timestamped property value set is a logical row with + // timestamped property values. + final ITPS tps = itr.next(); + + // If you want to see what is in the TPS, uncomment this. +// System.err.println(tps.toString()); + + // The namespace is the primary key of the logical row for the + // relation schema. + final String namespace = (String) tps.getPrimaryKey(); + + // Get the name of the implementation class + // (AbstractTripleStore, SPORelation, LexiconRelation, etc.) + final String className = (String) tps.get(RelationSchema.CLASS) + .getValue(); + + try { + final Class cls = Class.forName(className); + if (AbstractTripleStore.class.isAssignableFrom(cls)) { + // this is a triple store (vs something else). + namespaces.add(namespace); + } + } catch (ClassNotFoundException e) { + log.error(e,e); + } + + } + + return namespaces; + + } + /** * Return various interesting metadata about the KB state. * @@ -296,6 +347,18 @@ sb.append(AbstractResource.Options.MAX_PARALLEL_SUBQUERIES + "=" + tripleStore.getMaxParallelSubqueries() + "\n"); + sb.append("-- All properties.--\n"); + + // get the triple store's properties from the global row store. + final Map<String, Object> properties = indexManager + .getGlobalRowStore().read(RelationSchema.INSTANCE, + namespace); + + // write them out, + for (String key : properties.keySet()) { + sb.append(key + "=" + properties.get(key)+"\n"); + } + // sb.append(tripleStore.predicateUsage()); } catch (Throwable t) { @@ -553,12 +616,30 @@ final boolean showKBInfo = params.get("showKBInfo") != null; + final boolean showNamespaces = params.get("showNamespaces") != null; + final StringBuilder sb = new StringBuilder(); sb.append("Accepted query count=" + queryIdFactory.get() + "\n"); - sb.append("Running query count=" + queries.size() + "\n"); + sb.append("Running query count=" + queries.size() + "\n"); + if (showNamespaces) { + + final List<String> namespaces = getNamespaces(); + + sb.append("Namespaces: "); + + for (String s : namespaces) { + + sb.append(s); + + } + + sb.append("\n"); + + } + if (showKBInfo) { // General information on the connected kb. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-08-04 20:05:13
|
Revision: 3409 http://bigdata.svn.sourceforge.net/bigdata/?rev=3409&view=rev Author: thompsonbry Date: 2010-08-04 20:05:06 +0000 (Wed, 04 Aug 2010) Log Message: ----------- Changed the log level from INFO to WARN when forcing a compacting merge of the data servivces in the federation. Modified DumpFederation to use CSV rather than tab-delimited output. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java Modified: trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java 2010-08-04 18:53:35 UTC (rev 3408) +++ trunk/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java 2010-08-04 20:05:06 UTC (rev 3409) @@ -517,8 +517,7 @@ final int ndataServices = dataServiceUUIDs.length; - if(log.isInfoEnabled()) - log.info("#dataServices=" + ndataServices + ", now=" + new Date()); + log.warn("Forcing overflow: #dataServices=" + ndataServices + ", now=" + new Date()); final List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(ndataServices); @@ -570,8 +569,7 @@ } - if(log.isInfoEnabled()) - log.info("Did overflow: #ok=" + nok + ", #dataServices=" + log.warn("Did overflow: #ok=" + nok + ", #dataServices=" + ndataServices + ", now=" + new Date()); if (nok != tasks.size()) { Modified: trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java =================================================================== --- trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java 2010-08-04 18:53:35 UTC (rev 3408) +++ trunk/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java 2010-08-04 20:05:06 UTC (rev 3409) @@ -191,7 +191,8 @@ try { - final FormatRecord formatter = new FormatTabTable(System.out); + final FormatRecord formatter = new FormatCSVTable(System.out); +// final FormatRecord formatter = new FormatTabTable(System.out); final DumpFederation dumper = new DumpFederation(fed, tx, formatter); @@ -416,7 +417,7 @@ private final FormatRecord formatter; /** - * Interface reponsible for formatting the output. + * Interface responsible for formatting the output. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ @@ -632,7 +633,199 @@ } /** + * Comma separated value delimited tabular output. * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ + public static class FormatCSVTable implements FormatRecord { + + private final PrintWriter out; + + public FormatCSVTable(final PrintWriter w) { + + if (w == null) + throw new IllegalArgumentException(); + + this.out = w; + + } + + public FormatCSVTable(final Writer w) { + + this( new PrintWriter( w )); + + } + + public FormatCSVTable(final PrintStream w) { + + this(new PrintWriter(w)); + + } + + /** + * @todo document the columns. + */ + public void writeHeaders() { + + final String s = "Timestamp"// + + ",IndexName" // + + ",IndexPartitionName"// + + ",PartitionId" // + + ",ServiceUUID" // + + ",ServiceName" // + + ",Hostname" // + + ",ServiceCode"// + /* + * Basic metadata about the index partition. + */ + + ",SourceCount"// + + ",SourceJournalCount" + + ",SourceSegmentCount" + + ",SumEntryCounts" // + + ",SumNodeCounts" // + + ",SumLeafCounts" // + + ",SumSegmentBytes"// + + ",SumSegmentNodeBytes" // + + ",SumSegmentLeafBytes"// + /* + * Note: These values are aggregates for the data + * service on which the index partition resides. + */ + + ",DataDirFreeSpace"// + + ",BytesUnderManagement"// + + ",JournalBytesUnderManagement"// + + ",IndexSegmentBytesUnderManagement"// + + ",ManagedJournalCount"// + + ",ManagedSegmentCount"// + + ",AsynchronousOverflowCount"// + /* + * Extended metadata about the index partition. + */ + + ",LeftSeparator"// + + ",RightSeparator"// + + ",View"// + + ",Cause"// +// + "\tHistory"// + + ",IndexMetadata"// + ; + + out.println(s); + + } + + /** format row for table. */ + public void writeRecord(final IndexPartitionRecord rec) { + + final StringBuilder sb = new StringBuilder(); + sb.append(rec.ts);//new Date(ts)); + sb.append("," + rec.indexName); + sb.append("," + DataService.getIndexPartitionName(rec.indexName,rec.locator.getPartitionId())); + sb.append("," + rec.locator.getPartitionId()); + sb.append("," + rec.locator.getDataServiceUUID()); + sb.append("," + rec.smd.getName()); + sb.append("," + rec.smd.getHostname()); + sb.append("," + "DS" + rec.smd.getCode()); + + if (rec.detailRec != null) { + + // aggregate across all sources in the view. + final SourceDetailRecord sourceDetailRec = new SourceDetailRecord( + rec.detailRec.sources); + + // core view stats. + sb.append("," + rec.detailRec.sourceCount); + sb.append("," + rec.detailRec.journalSourceCount); + sb.append("," + rec.detailRec.segmentSourceCount); + + // per source stats (aggregated across sources in the view). + sb.append("," + sourceDetailRec.entryCount); + sb.append("," + sourceDetailRec.nodeCount); + sb.append("," + sourceDetailRec.leafCount); + sb.append("," + sourceDetailRec.segmentByteCount); + sb.append("," + sourceDetailRec.segmentNodeByteCount); + sb.append("," + sourceDetailRec.segmentLeafByteCount); + + // stats for the entire data service + sb.append("," + rec.detailRec.dataDirFreeSpace); + sb.append("," + rec.detailRec.bytesUnderManagement); + sb.append("," + rec.detailRec.journalBytesUnderManagement); + sb.append("," + rec.detailRec.segmentBytesUnderManagement); + sb.append("," + rec.detailRec.managedJournalCount); + sb.append("," + rec.detailRec.managedSegmentCount); + sb.append("," + rec.detailRec.asynchronousOverflowCount); + + } else { + + /* + * Error obtaining the data of interest. + */ + + // core view stats. + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + + // aggregated per-source in view stats. + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + + // data service stats. + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + sb.append(",N/A"); + + } + + // extended view stats. + sb.append("," + + BytesUtil.toString(rec.locator.getLeftSeparatorKey()).replace(',', ' ')); + sb.append("," + + BytesUtil.toString(rec.locator.getRightSeparatorKey()).replace(',', ' ')); + + if (rec.detailRec != null && rec.detailRec.pmd != null) { + + // current view definition. + sb.append(",\"" + + Arrays.toString(rec.detailRec.pmd.getResources()).replace(',', ';') + + "\""); + + // cause (reason why the index partition was created). + sb.append(",\"" + + rec.detailRec.pmd.getIndexPartitionCause().toString().replace(',', ';') + + "\""); + +// // history +// sb.append(",\"" + rec.detailRec.pmd.getHistory() + "\""); + + // indexMetadata + sb.append(",\"" + rec.detailRec.indexMetadata.toString().replace(',', ';') + "\""); + + } else { + + sb.append(",N/A"); + sb.append(",N/A"); +// sb.append(",N/A"); + sb.append(",N/A"); + + } + + out.println(sb.toString()); + + } + + } + + /** + * * @param fed * The federation whose indices will be dump. * @param tx This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-11-23 15:22:37
|
Revision: 3979 http://bigdata.svn.sourceforge.net/bigdata/?rev=3979&view=rev Author: thompsonbry Date: 2010-11-23 15:22:27 +0000 (Tue, 23 Nov 2010) Log Message: ----------- Merge CHANGE_SET_BRANCH to trunk [r3608:HEAD]. Note: There is a problem in TestChangeSets when run with TestBigdataSailWithQuads. The test needs to be modified in order to not run the TM test variants when in quads mode. https://sourceforge.net/apps/trac/bigdata/ticket/166 has been amended to note this issue which will be fixed in the trunk. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataStatementImpl.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/StatementBuffer.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/ISPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPO.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexRemover.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/StatementWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSailRepositoryConnection.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/ProxyBigdataSailTestCase.java Added Paths: ----------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOIndexMutation.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestChangeSets.java Removed Paths: ------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java Modified: trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata/src/java/com/bigdata/btree/proc/AbstractKeyArrayIndexProcedure.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -39,6 +39,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.OutputStream; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -795,18 +796,34 @@ IResultHandler<ResultBitBuffer, ResultBitBuffer> { private final boolean[] results; + + /** + * I added this so I could encode information about tuple modification + * that takes more than one boolean to encode. For example, SPOs can + * be: INSERTED, REMOVED, UPDATED, NO_OP (2 bits). + */ + private final int multiplier; + private final AtomicInteger onCount = new AtomicInteger(); public ResultBitBufferHandler(final int nkeys) { + + this(nkeys, 1); + + } + + public ResultBitBufferHandler(final int nkeys, final int multiplier) { - results = new boolean[nkeys]; + results = new boolean[nkeys*multiplier]; + this.multiplier = multiplier; } public void aggregate(final ResultBitBuffer result, final Split split) { - System.arraycopy(result.getResult(), 0, results, split.fromIndex, - split.ntuples); + System.arraycopy(result.getResult(), 0, results, + split.fromIndex*multiplier, + split.ntuples*multiplier); onCount.addAndGet(result.getOnCount()); Modified: trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -144,21 +144,21 @@ final private UUID dataServiceUUID = UUID.randomUUID(); - @Override +// @Override public IBigdataFederation getFederation() { return fed; } - @Override +// @Override public DataService getDataService() { throw new UnsupportedOperationException(); } - @Override +// @Override public UUID getDataServiceUUID() { return dataServiceUUID; Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,98 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Comparator; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPOComparator; - -public class ChangeRecord implements IChangeRecord { - - private final ISPO stmt; - - private final ChangeAction action; - -// private final StatementEnum oldType; - - public ChangeRecord(final ISPO stmt, final ChangeAction action) { - -// this(stmt, action, null); -// -// } -// -// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, -// final StatementEnum oldType) { -// - this.stmt = stmt; - this.action = action; -// this.oldType = oldType; - - } - - public ChangeAction getChangeAction() { - - return action; - - } - -// public StatementEnum getOldStatementType() { -// -// return oldType; -// -// } - - public ISPO getStatement() { - - return stmt; - - } - - @Override - public boolean equals(Object o) { - - if (o == this) - return true; - - if (o == null || o instanceof IChangeRecord == false) - return false; - - final IChangeRecord rec = (IChangeRecord) o; - - final ISPO stmt2 = rec.getStatement(); - - // statements are equal - if (stmt == stmt2 || - (stmt != null && stmt2 != null && stmt.equals(stmt2))) { - - // actions are equal - return action == rec.getChangeAction(); - - } - - return false; - - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(action).append(": ").append(stmt); - - return sb.toString(); - - } - - public static final Comparator<IChangeRecord> COMPARATOR = - new Comparator<IChangeRecord>() { - - public int compare(final IChangeRecord r1, final IChangeRecord r2) { - - final ISPO spo1 = r1.getStatement(); - final ISPO spo2 = r2.getStatement(); - - return SPOComparator.INSTANCE.compare(spo1, spo2); - - } - - }; - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,98 @@ +package com.bigdata.rdf.changesets; + +import java.util.Comparator; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPOComparator; + +public class ChangeRecord implements IChangeRecord { + + private final ISPO stmt; + + private final ChangeAction action; + +// private final StatementEnum oldType; + + public ChangeRecord(final ISPO stmt, final ChangeAction action) { + +// this(stmt, action, null); +// +// } +// +// public ChangeRecord(final BigdataStatement stmt, final ChangeAction action, +// final StatementEnum oldType) { +// + this.stmt = stmt; + this.action = action; +// this.oldType = oldType; + + } + + public ChangeAction getChangeAction() { + + return action; + + } + +// public StatementEnum getOldStatementType() { +// +// return oldType; +// +// } + + public ISPO getStatement() { + + return stmt; + + } + + @Override + public boolean equals(Object o) { + + if (o == this) + return true; + + if (o == null || o instanceof IChangeRecord == false) + return false; + + final IChangeRecord rec = (IChangeRecord) o; + + final ISPO stmt2 = rec.getStatement(); + + // statements are equal + if (stmt == stmt2 || + (stmt != null && stmt2 != null && stmt.equals(stmt2))) { + + // actions are equal + return action == rec.getChangeAction(); + + } + + return false; + + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(action).append(": ").append(stmt); + + return sb.toString(); + + } + + public static final Comparator<IChangeRecord> COMPARATOR = + new Comparator<IChangeRecord>() { + + public int compare(final IChangeRecord r1, final IChangeRecord r2) { + + final ISPO spo1 = r1.getStatement(); + final ISPO spo2 = r2.getStatement(); + + return SPOComparator.INSTANCE.compare(spo1, spo2); + + } + + }; + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,38 +0,0 @@ -package com.bigdata.rdf.changesets; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. Change records - * will be sent to an instance of this class via the - * {@link #changeEvent(IChangeRecord)} method. These events will - * occur on an ongoing basis as statements are added to or removed from the - * indices. It is the change log's responsibility to collect change records. - * When the transaction is actually committed (or aborted), the change log will - * receive notification via {@link #transactionCommited()} or - * {@link #transactionAborted()}. - */ -public interface IChangeLog { - - /** - * Occurs when a statement add or remove is flushed to the indices (but - * not yet committed). - * - * @param record - * the {@link IChangeRecord} - */ - void changeEvent(final IChangeRecord record); - - /** - * Occurs when the current SAIL transaction is committed. - */ - void transactionCommited(); - - /** - * Occurs if the current SAIL transaction is aborted. - */ - void transactionAborted(); - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,38 @@ +package com.bigdata.rdf.changesets; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. Change records + * will be sent to an instance of this class via the + * {@link #changeEvent(IChangeRecord)} method. These events will + * occur on an ongoing basis as statements are added to or removed from the + * indices. It is the change log's responsibility to collect change records. + * When the transaction is actually committed (or aborted), the change log will + * receive notification via {@link #transactionCommited()} or + * {@link #transactionAborted()}. + */ +public interface IChangeLog { + + /** + * Occurs when a statement add or remove is flushed to the indices (but + * not yet committed). + * + * @param record + * the {@link IChangeRecord} + */ + void changeEvent(final IChangeRecord record); + + /** + * Occurs when the current SAIL transaction is committed. + */ + void transactionCommited(); + + /** + * Occurs if the current SAIL transaction is aborted. + */ + void transactionAborted(); + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,120 +0,0 @@ -package com.bigdata.rdf.changesets; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.spo.ISPO; - -/** - * Provides detailed information on changes made to statements in the database. - * Change records are generated for any statements that are used in - * addStatement() or removeStatements() operations on the SAIL connection, as - * well as any inferred statements that are added or removed as a result of - * truth maintenance when the database has inference enabled. - * <p> - * See {@link IChangeLog}. - */ -public interface IChangeRecord { - - /** - * Attempting to add or remove statements can have a number of different - * effects. This enum captures the different actions that can take place as - * a result of trying to add or remove a statement from the database. - */ - public enum ChangeAction { - - /** - * The focus statement was not in the database before and will be - * in the database after the commit. This can be the result of either - * explicit addStatement() operations on the SAIL connection, or from - * new inferences being generated via truth maintenance when the - * database has inference enabled. If the focus statement has a - * statement type of explicit then it was added via an addStatement() - * operation. If the focus statement has a statement type of inferred - * then it was added via truth maintenance. - */ - INSERTED, - - /** - * The focus statement was in the database before and will not - * be in the database after the commit. When the database has inference - * and truth maintenance enabled, the statement that is the focus of - * this change record was either an explicit statement that was the - * subject of a removeStatements() operation on the connection, or it - * was an inferred statement that was removed as a result of truth - * maintenance. Either way, the statement is no longer provable as an - * inference using other statements still in the database after the - * commit. If it were still provable, the explicit statement would have - * had its type changed to inferred, and the inferred statement would - * have remained untouched by truth maintenance. If an inferred - * statement was the subject of a removeStatement() operation on the - * connection it would have resulted in a no-op, since inferences can - * only be removed via truth maintenance. - */ - REMOVED, - - /** - * This change action can only occur when inference and truth - * maintenance are enabled on the database. Sometimes an attempt at - * statement addition or removal via an addStatement() or - * removeStatements() operation on the connection will result in a type - * change rather than an actual assertion or deletion. When in - * inference mode, statements can have one of three statement types: - * explicit, inferred, or axiom (see {@link StatementEnum}). There are - * several reasons why a statement will change type rather than be - * asserted or deleted: - * <p> - * <ul> - * <li> A statement is asserted, but already exists in the database as - * an inference or an axiom. The existing statement will have its type - * changed from inference or axiom to explicit. </li> - * <li> An explicit statement is retracted, but is still provable by - * other means. It will have its type changed from explicit to - * inference. </li> - * <li> An explicit statement is retracted, but is one of the axioms - * needed for inference. It will have its type changed from explicit to - * axiom. </li> - * </ul> - */ - UPDATED, - -// /** -// * This change action can occur for one of two reasons: -// * <p> -// * <ul> -// * <li> A statement is asserted, but already exists in the database as -// * an explicit statement. </li> -// * <li> An inferred statement or an axiom is retracted. Only explicit -// * statements can be retracted via removeStatements() operations. </li> -// * </ul> -// */ -// NO_OP - - } - - /** - * Return the ISPO that is the focus of this change record. - * - * @return - * the {@link ISPO} - */ - ISPO getStatement(); - - /** - * Return the change action for this change record. - * - * @return - * the {@link ChangeAction} - */ - ChangeAction getChangeAction(); - -// /** -// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method -// * will return the old statement type of the focus statement. The -// * new statement type is available on the focus statement itself. -// * -// * @return -// * the old statement type of the focus statement -// */ -// StatementEnum getOldStatementType(); - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/IChangeRecord.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,120 @@ +package com.bigdata.rdf.changesets; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.StatementEnum; +import com.bigdata.rdf.spo.ISPO; + +/** + * Provides detailed information on changes made to statements in the database. + * Change records are generated for any statements that are used in + * addStatement() or removeStatements() operations on the SAIL connection, as + * well as any inferred statements that are added or removed as a result of + * truth maintenance when the database has inference enabled. + * <p> + * See {@link IChangeLog}. + */ +public interface IChangeRecord { + + /** + * Attempting to add or remove statements can have a number of different + * effects. This enum captures the different actions that can take place as + * a result of trying to add or remove a statement from the database. + */ + public enum ChangeAction { + + /** + * The focus statement was not in the database before and will be + * in the database after the commit. This can be the result of either + * explicit addStatement() operations on the SAIL connection, or from + * new inferences being generated via truth maintenance when the + * database has inference enabled. If the focus statement has a + * statement type of explicit then it was added via an addStatement() + * operation. If the focus statement has a statement type of inferred + * then it was added via truth maintenance. + */ + INSERTED, + + /** + * The focus statement was in the database before and will not + * be in the database after the commit. When the database has inference + * and truth maintenance enabled, the statement that is the focus of + * this change record was either an explicit statement that was the + * subject of a removeStatements() operation on the connection, or it + * was an inferred statement that was removed as a result of truth + * maintenance. Either way, the statement is no longer provable as an + * inference using other statements still in the database after the + * commit. If it were still provable, the explicit statement would have + * had its type changed to inferred, and the inferred statement would + * have remained untouched by truth maintenance. If an inferred + * statement was the subject of a removeStatement() operation on the + * connection it would have resulted in a no-op, since inferences can + * only be removed via truth maintenance. + */ + REMOVED, + + /** + * This change action can only occur when inference and truth + * maintenance are enabled on the database. Sometimes an attempt at + * statement addition or removal via an addStatement() or + * removeStatements() operation on the connection will result in a type + * change rather than an actual assertion or deletion. When in + * inference mode, statements can have one of three statement types: + * explicit, inferred, or axiom (see {@link StatementEnum}). There are + * several reasons why a statement will change type rather than be + * asserted or deleted: + * <p> + * <ul> + * <li> A statement is asserted, but already exists in the database as + * an inference or an axiom. The existing statement will have its type + * changed from inference or axiom to explicit. </li> + * <li> An explicit statement is retracted, but is still provable by + * other means. It will have its type changed from explicit to + * inference. </li> + * <li> An explicit statement is retracted, but is one of the axioms + * needed for inference. It will have its type changed from explicit to + * axiom. </li> + * </ul> + */ + UPDATED, + +// /** +// * This change action can occur for one of two reasons: +// * <p> +// * <ul> +// * <li> A statement is asserted, but already exists in the database as +// * an explicit statement. </li> +// * <li> An inferred statement or an axiom is retracted. Only explicit +// * statements can be retracted via removeStatements() operations. </li> +// * </ul> +// */ +// NO_OP + + } + + /** + * Return the ISPO that is the focus of this change record. + * + * @return + * the {@link ISPO} + */ + ISPO getStatement(); + + /** + * Return the change action for this change record. + * + * @return + * the {@link ChangeAction} + */ + ChangeAction getChangeAction(); + +// /** +// * If the change action is {@link ChangeAction#TYPE_CHANGE}, this method +// * will return the old statement type of the focus statement. The +// * new statement type is available on the focus statement itself. +// * +// * @return +// * the old statement type of the focus statement +// */ +// StatementEnum getOldStatementType(); + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,163 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.rdf.store.BigdataStatementIterator; -import com.bigdata.striterator.ChunkedArrayIterator; - -/** - * This is a very simple implementation of a change log. NOTE: This is not - * a particularly great implementation. First of all it ends up storing - * two copies of the change set. Secondly it needs to be smarter about - * concurrency, or maybe we can be smart about it when we do the - * implementation on the other side (the SAIL connection can just write - * change events to a buffer and then the buffer can be drained by - * another thread that doesn't block the actual read/write operations, - * although then we need to be careful not to issue the committed() - * notification before the buffer is drained). - * - * @author mike - * - */ -public class InMemChangeLog implements IChangeLog { - - protected static final Logger log = Logger.getLogger(InMemChangeLog.class); - - /** - * Running tally of new changes since the last commit notification. - */ - private final Map<ISPO,IChangeRecord> changeSet = - new HashMap<ISPO, IChangeRecord>(); - - /** - * Keep a record of the change set as of the last commit. - */ - private final Map<ISPO,IChangeRecord> committed = - new HashMap<ISPO, IChangeRecord>(); - - /** - * See {@link IChangeLog#changeEvent(IChangeRecord)}. - */ - public synchronized void changeEvent(final IChangeRecord record) { - - if (log.isInfoEnabled()) - log.info(record); - - changeSet.put(record.getStatement(), record); - - } - - /** - * See {@link IChangeLog#transactionCommited()}. - */ - public synchronized void transactionCommited() { - - if (log.isInfoEnabled()) - log.info("transaction committed"); - - committed.clear(); - - committed.putAll(changeSet); - - changeSet.clear(); - - } - - /** - * See {@link IChangeLog#transactionAborted()}. - */ - public synchronized void transactionAborted() { - - if (log.isInfoEnabled()) - log.info("transaction aborted"); - - changeSet.clear(); - - } - - /** - * Return the change set as of the last commmit point. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit() { - - return committed.values(); - - } - - /** - * Return the change set as of the last commmit point, using the supplied - * database to resolve ISPOs to BigdataStatements. - * - * @return - * a collection of {@link IChangeRecord}s as of the last commit - * point - */ - public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { - - return resolve(db, committed.values()); - - } - - /** - * Use the supplied database to turn a set of ISPO change records into - * BigdataStatement change records. BigdataStatements also implement - * ISPO, the difference being that BigdataStatements also contain - * materialized RDF terms for the 3 (or 4) positions, in addition to just - * the internal identifiers (IVs) for those terms. - * - * @param db - * the database containing the lexicon needed to materialize - * the BigdataStatement objects - * @param unresolved - * the ISPO change records that came from IChangeLog notification - * events - * @return - * the fully resolves BigdataStatement change records - */ - private Collection<IChangeRecord> resolve(final AbstractTripleStore db, - final Collection<IChangeRecord> unresolved) { - - final Collection<IChangeRecord> resolved = - new LinkedList<IChangeRecord>(); - - // collect up the ISPOs out of the unresolved change records - final ISPO[] spos = new ISPO[unresolved.size()]; - int i = 0; - for (IChangeRecord rec : unresolved) { - spos[i++] = rec.getStatement(); - } - - // use the database to resolve them into BigdataStatements - final BigdataStatementIterator it = - db.asStatementIterator( - new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); - - /* - * the BigdataStatementIterator will produce BigdataStatement objects - * in the same order as the original ISPO array - */ - for (IChangeRecord rec : unresolved) { - - final BigdataStatement stmt = it.next(); - - resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); - - } - - return resolved; - - } - - - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/InMemChangeLog.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,163 @@ +package com.bigdata.rdf.changesets; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.BigdataStatementIterator; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * This is a very simple implementation of a change log. NOTE: This is not + * a particularly great implementation. First of all it ends up storing + * two copies of the change set. Secondly it needs to be smarter about + * concurrency, or maybe we can be smart about it when we do the + * implementation on the other side (the SAIL connection can just write + * change events to a buffer and then the buffer can be drained by + * another thread that doesn't block the actual read/write operations, + * although then we need to be careful not to issue the committed() + * notification before the buffer is drained). + * + * @author mike + * + */ +public class InMemChangeLog implements IChangeLog { + + protected static final Logger log = Logger.getLogger(InMemChangeLog.class); + + /** + * Running tally of new changes since the last commit notification. + */ + private final Map<ISPO,IChangeRecord> changeSet = + new HashMap<ISPO, IChangeRecord>(); + + /** + * Keep a record of the change set as of the last commit. + */ + private final Map<ISPO,IChangeRecord> committed = + new HashMap<ISPO, IChangeRecord>(); + + /** + * See {@link IChangeLog#changeEvent(IChangeRecord)}. + */ + public synchronized void changeEvent(final IChangeRecord record) { + + if (log.isInfoEnabled()) + log.info(record); + + changeSet.put(record.getStatement(), record); + + } + + /** + * See {@link IChangeLog#transactionCommited()}. + */ + public synchronized void transactionCommited() { + + if (log.isInfoEnabled()) + log.info("transaction committed"); + + committed.clear(); + + committed.putAll(changeSet); + + changeSet.clear(); + + } + + /** + * See {@link IChangeLog#transactionAborted()}. + */ + public synchronized void transactionAborted() { + + if (log.isInfoEnabled()) + log.info("transaction aborted"); + + changeSet.clear(); + + } + + /** + * Return the change set as of the last commmit point. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit() { + + return committed.values(); + + } + + /** + * Return the change set as of the last commmit point, using the supplied + * database to resolve ISPOs to BigdataStatements. + * + * @return + * a collection of {@link IChangeRecord}s as of the last commit + * point + */ + public Collection<IChangeRecord> getLastCommit(final AbstractTripleStore db) { + + return resolve(db, committed.values()); + + } + + /** + * Use the supplied database to turn a set of ISPO change records into + * BigdataStatement change records. BigdataStatements also implement + * ISPO, the difference being that BigdataStatements also contain + * materialized RDF terms for the 3 (or 4) positions, in addition to just + * the internal identifiers (IVs) for those terms. + * + * @param db + * the database containing the lexicon needed to materialize + * the BigdataStatement objects + * @param unresolved + * the ISPO change records that came from IChangeLog notification + * events + * @return + * the fully resolves BigdataStatement change records + */ + private Collection<IChangeRecord> resolve(final AbstractTripleStore db, + final Collection<IChangeRecord> unresolved) { + + final Collection<IChangeRecord> resolved = + new LinkedList<IChangeRecord>(); + + // collect up the ISPOs out of the unresolved change records + final ISPO[] spos = new ISPO[unresolved.size()]; + int i = 0; + for (IChangeRecord rec : unresolved) { + spos[i++] = rec.getStatement(); + } + + // use the database to resolve them into BigdataStatements + final BigdataStatementIterator it = + db.asStatementIterator( + new ChunkedArrayIterator<ISPO>(i, spos, null/* keyOrder */)); + + /* + * the BigdataStatementIterator will produce BigdataStatement objects + * in the same order as the original ISPO array + */ + for (IChangeRecord rec : unresolved) { + + final BigdataStatement stmt = it.next(); + + resolved.add(new ChangeRecord(stmt, rec.getChangeAction())); + + } + + return resolved; + + } + + + +} Deleted: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java =================================================================== --- branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -1,208 +0,0 @@ -package com.bigdata.rdf.changesets; - -import java.util.Iterator; -import java.util.Map; -import org.apache.log4j.Logger; -import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.model.BigdataBNode; -import com.bigdata.rdf.spo.ISPO; -import com.bigdata.rdf.spo.SPO; -import com.bigdata.rdf.spo.ISPO.ModifiedEnum; -import com.bigdata.rdf.store.AbstractTripleStore; -import com.bigdata.relation.accesspath.IElementFilter; -import com.bigdata.striterator.ChunkedArrayIterator; -import com.bigdata.striterator.IChunkedOrderedIterator; - -public class StatementWriter { - - protected static final Logger log = Logger.getLogger(StatementWriter.class); - - public static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final IChunkedOrderedIterator<ISPO> itr, - final IChangeLog changeLog) { - - long n = 0; - - if (itr.hasNext()) { - -// final BigdataStatementIteratorImpl itr2 = -// new BigdataStatementIteratorImpl(database, bnodes, itr) -// .start(database.getExecutorService()); -// -// final BigdataStatement[] stmts = -// new BigdataStatement[database.getChunkCapacity()]; - final SPO[] stmts = new SPO[database.getChunkCapacity()]; - - int i = 0; - while ((i = nextChunk(itr, stmts)) > 0) { - n += addStatements(database, statementStore, copyOnly, filter, - stmts, i, changeLog); - } - - } - - return n; - - } - - private static long addStatements(final AbstractTripleStore database, - final AbstractTripleStore statementStore, - final boolean copyOnly, - final IElementFilter<ISPO> filter, - final ISPO[] stmts, - final int numStmts, - final IChangeLog changeLog) { - -// final SPO[] tmp = allocateSPOs(stmts, numStmts); - - final long n = database.addStatements(statementStore, copyOnly, - new ChunkedArrayIterator<ISPO>(numStmts, stmts, - null/* keyOrder */), filter); - - // Copy the state of the isModified() flag and notify changeLog - for (int i = 0; i < numStmts; i++) { - - if (stmts[i].isModified()) { - -// stmts[i].setModified(true); - - if (changeLog != null) { - - switch(stmts[i].getModified()) { - case INSERTED: - changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.INSERTED)); - break; - case UPDATED: - changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.UPDATED)); - break; - case REMOVED: - throw new AssertionError(); - default: - break; - } - - } - - } - - } - - return n; - - } - - public static long removeStatements(final AbstractTripleStore database, - final IChunkedOrderedIterator<ISPO> itr, - final boolean computeClosureForStatementIdentifiers, - final IChangeLog changeLog) { - - long n = 0; - - if (itr.hasNext()) { - -// final BigdataStatementIteratorImpl itr2 = -// new BigdataStatementIteratorImpl(database, bnodes, itr) -// .start(database.getExecutorService()); -// -// final BigdataStatement[] stmts = -// new BigdataStatement[database.getChunkCapacity()]; - final SPO[] stmts = new SPO[database.getChunkCapacity()]; - - int i = 0; - while ((i = nextChunk(itr, stmts)) > 0) { - n += removeStatements(database, stmts, i, - computeClosureForStatementIdentifiers, changeLog); - } - - } - - return n; - - } - - private static long removeStatements(final AbstractTripleStore database, - final ISPO[] stmts, - final int numStmts, - final boolean computeClosureForStatementIdentifiers, - final IChangeLog changeLog) { - - final long n = database.removeStatements( - new ChunkedArrayIterator<ISPO>(numStmts, stmts, - null/* keyOrder */), - computeClosureForStatementIdentifiers); - - // Copy the state of the isModified() flag and notify changeLog - for (int i = 0; i < numStmts; i++) { - - if (stmts[i].isModified()) { - - // just to be safe - stmts[i].setModified(ModifiedEnum.REMOVED); - - changeLog.changeEvent( - new ChangeRecord(stmts[i], ChangeAction.REMOVED)); - - } - - } - - return n; - - } - - private static int nextChunk(final Iterator<ISPO> itr, - final ISPO[] stmts) { - - assert stmts != null && stmts.length > 0; - - int i = 0; - while (itr.hasNext()) { - stmts[i++] = itr.next(); - if (i == stmts.length) { - // stmts[] is full - return i; - } - } - - /* - * stmts[] is empty (i = 0) or partially - * full (i > 0 && i < stmts.length) - */ - return i; - - } - -// private static SPO[] allocateSPOs(final BigdataStatement[] stmts, -// final int numStmts) { -// -// final SPO[] tmp = new SPO[numStmts]; -// -// for (int i = 0; i < tmp.length; i++) { -// -// final BigdataStatement stmt = stmts[i]; -// -// final SPO spo = new SPO(stmt); -// -// if (log.isDebugEnabled()) -// log.debug("writing: " + stmt.toString() + " (" + spo + ")"); -// -// if(!spo.isFullyBound()) { -// -// throw new AssertionError("Not fully bound? : " + spo); -// -// } -// -// tmp[i] = spo; -// -// } -// -// return tmp; -// -// -// } - -} Copied: trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java (from rev 3978, branches/CHANGE_SET_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java) =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java (rev 0) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/changesets/StatementWriter.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -0,0 +1,208 @@ +package com.bigdata.rdf.changesets; + +import java.util.Iterator; +import java.util.Map; +import org.apache.log4j.Logger; +import com.bigdata.rdf.changesets.IChangeRecord.ChangeAction; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; +import com.bigdata.rdf.spo.ISPO; +import com.bigdata.rdf.spo.SPO; +import com.bigdata.rdf.spo.ISPO.ModifiedEnum; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.striterator.ChunkedArrayIterator; +import com.bigdata.striterator.IChunkedOrderedIterator; + +public class StatementWriter { + + protected static final Logger log = Logger.getLogger(StatementWriter.class); + + public static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final IChunkedOrderedIterator<ISPO> itr, + final IChangeLog changeLog) { + + long n = 0; + + if (itr.hasNext()) { + +// final BigdataStatementIteratorImpl itr2 = +// new BigdataStatementIteratorImpl(database, bnodes, itr) +// .start(database.getExecutorService()); +// +// final BigdataStatement[] stmts = +// new BigdataStatement[database.getChunkCapacity()]; + final SPO[] stmts = new SPO[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr, stmts)) > 0) { + n += addStatements(database, statementStore, copyOnly, filter, + stmts, i, changeLog); + } + + } + + return n; + + } + + private static long addStatements(final AbstractTripleStore database, + final AbstractTripleStore statementStore, + final boolean copyOnly, + final IElementFilter<ISPO> filter, + final ISPO[] stmts, + final int numStmts, + final IChangeLog changeLog) { + +// final SPO[] tmp = allocateSPOs(stmts, numStmts); + + final long n = database.addStatements(statementStore, copyOnly, + new ChunkedArrayIterator<ISPO>(numStmts, stmts, + null/* keyOrder */), filter); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (stmts[i].isModified()) { + +// stmts[i].setModified(true); + + if (changeLog != null) { + + switch(stmts[i].getModified()) { + case INSERTED: + changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.INSERTED)); + break; + case UPDATED: + changeLog.changeEvent(new ChangeRecord(stmts[i], ChangeAction.UPDATED)); + break; + case REMOVED: + throw new AssertionError(); + default: + break; + } + + } + + } + + } + + return n; + + } + + public static long removeStatements(final AbstractTripleStore database, + final IChunkedOrderedIterator<ISPO> itr, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + + long n = 0; + + if (itr.hasNext()) { + +// final BigdataStatementIteratorImpl itr2 = +// new BigdataStatementIteratorImpl(database, bnodes, itr) +// .start(database.getExecutorService()); +// +// final BigdataStatement[] stmts = +// new BigdataStatement[database.getChunkCapacity()]; + final SPO[] stmts = new SPO[database.getChunkCapacity()]; + + int i = 0; + while ((i = nextChunk(itr, stmts)) > 0) { + n += removeStatements(database, stmts, i, + computeClosureForStatementIdentifiers, changeLog); + } + + } + + return n; + + } + + private static long removeStatements(final AbstractTripleStore database, + final ISPO[] stmts, + final int numStmts, + final boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + + final long n = database.removeStatements( + new ChunkedArrayIterator<ISPO>(numStmts, stmts, + null/* keyOrder */), + computeClosureForStatementIdentifiers); + + // Copy the state of the isModified() flag and notify changeLog + for (int i = 0; i < numStmts; i++) { + + if (stmts[i].isModified()) { + + // just to be safe + stmts[i].setModified(ModifiedEnum.REMOVED); + + changeLog.changeEvent( + new ChangeRecord(stmts[i], ChangeAction.REMOVED)); + + } + + } + + return n; + + } + + private static int nextChunk(final Iterator<ISPO> itr, + final ISPO[] stmts) { + + assert stmts != null && stmts.length > 0; + + int i = 0; + while (itr.hasNext()) { + stmts[i++] = itr.next(); + if (i == stmts.length) { + // stmts[] is full + return i; + } + } + + /* + * stmts[] is empty (i = 0) or partially + * full (i > 0 && i < stmts.length) + */ + return i; + + } + +// private static SPO[] allocateSPOs(final BigdataStatement[] stmts, +// final int numStmts) { +// +// final SPO[] tmp = new SPO[numStmts]; +// +// for (int i = 0; i < tmp.length; i++) { +// +// final BigdataStatement stmt = stmts[i]; +// +// final SPO spo = new SPO(stmt); +// +// if (log.isDebugEnabled()) +// log.debug("writing: " + stmt.toString() + " (" + spo + ")"); +// +// if(!spo.isFullyBound()) { +// +// throw new AssertionError("Not fully bound? : " + spo); +// +// } +// +// tmp[i] = spo; +// +// } +// +// return tmp; +// +// +// } + +} Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPOAssertionBuffer.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -29,11 +29,15 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.rdf.changesets.IChangeLog; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.ISPOAssertionBuffer; import com.bigdata.rdf.spo.JustificationWriter; @@ -101,8 +105,13 @@ * {@link Justification}s for entailments. */ protected final boolean justify; - + /** + * Used for change set notification (optional). + */ + protected final IChangeLog changeLog; + + /** * Create a buffer. * * @param focusStore @@ -126,6 +135,38 @@ AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, boolean justified) { + this(focusStore, db, filter, capacity, justified, + null/* changeLog */); + + } + + /** + * Create a buffer. + * + * @param focusStore + * The focusStore on which the entailments computed by closure + * will be written (required). This is either the database or a + * temporary focusStore used during incremental TM. + * @param db + * The database in which the terms are defined (required). + * @param filter + * Option filter. When present statements matched by the filter + * are NOT retained by the {@link SPOAssertionBuffer} and will + * NOT be added to the <i>focusStore</i>. + * @param capacity + * The maximum {@link SPO}s that the buffer can hold before it + * is {@link #flush()}ed. + * @param justified + * true iff the Truth Maintenance strategy requires that we + * focusStore {@link Justification}s for entailments. + * @param changeLog + * optional change log for change notification + */ + public SPOAssertionBuffer(AbstractTripleStore focusStore, + AbstractTripleStore db, IElementFilter<ISPO> filter, int capacity, + boolean justified, final IChangeLog changeLog + ) { + super(db, filter, capacity); if (focusStore == null) @@ -142,6 +183,8 @@ justifications = justified ? new Justification[capacity] : null; + this.changeLog = changeLog; + } /** @@ -180,12 +223,26 @@ if (numJustifications == 0) { - // batch insert statements into the focusStore. - n = db.addStatements( + if (changeLog == null) { + + // batch insert statements into the focusStore. + n = db.addStatements( focusStore, true/* copyOnly */, new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), null/*filter*/); + + } else { + + n = com.bigdata.rdf.changesets.StatementWriter.addStatements( + db, + focusStore, + true/* copyOnly */, + null/* filter */, + new ChunkedArrayIterator<ISPO>(numStmts, stmts, null/*keyOrder*/), + changeLog); + + } } else { @@ -209,7 +266,8 @@ // task will write SPOs on the statement indices. tasks.add(new StatementWriter(getTermDatabase(), focusStore, false/* copyOnly */, new ChunkedArrayIterator<ISPO>( - numStmts, stmts, null/*keyOrder*/), nwritten)); + numStmts, stmts, null/*keyOrder*/), nwritten, + changeLog)); // task will write justifications on the justifications index. final AtomicLong nwrittenj = new AtomicLong(); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-11-23 14:27:09 UTC (rev 3978) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/inf/SPORetractionBuffer.java 2010-11-23 15:22:27 UTC (rev 3979) @@ -27,6 +27,11 @@ package com.bigdata.rdf.inf; +import java.util.Map; +import com.bigdata.rdf.changesets.IChangeLog; +import com.bigdata.rdf.changesets.StatementWriter; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.store.AbstractTripleStore; @@ -49,6 +54,11 @@ private final AbstractTripleStore store; private final boolean computeClosureForStatementIdentifiers; + + /** + * Optional change log for change notification. + */ + protected final IChangeLog changeLog; /** * @param store @@ -63,6 +73,27 @@ public SPORetractionBuffer(AbstractTripleStore store, int capacity, boolean computeClosureForStatementIdentifiers) { + this(store, capacity, computeClosureForStatementIdentifiers, + null/* changeLog */); + + } + + /** + * @param store + * The database from which the statement will be removed when the + * buffer is {@link #flush()}ed. + * @param capacity + * The capacity of the retraction buffer. + * @param computeClosureForStatementIdentifiers + * See + * {@link AbstractTripleStore#removeStatements(com.bigdata.rdf.spo.ISPOIterator, boolean)} + * @param changeLog + * optional change log for change notification + */ + public SPORetractionBuffer(AbstractTripleStore store, int capacity, + boolean computeClosureForStatementIdentifiers, + final IChangeLog changeLog) { + super(store, null/*filter*/, capacity); if (store == null) @@ -72,14 +103,31 @@ this.computeClosureForStatementIdentifiers = computeClosureForStatementIdentifiers; + this.changeLog = changeLog; + } public int flush() { if (isEmpty()) return 0; - long n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, + final long n; + + if (changeLog == null) { + + n = store.removeStatements(new ChunkedArrayIterator<ISPO>(numStmts,stmts, null/*keyOrder*/), computeClosureForStatementIdentifiers); + + } else { + + n = StatementWriter.removeStatements( + store, + new ChunkedArrayIterator<ISPO>( + numStmts,stmts,null/*keyOrder*/), + computeClosureForStatementIdentifiers, + changeLog); + ... [truncated message content] |
From: <tho...@us...> - 2011-05-05 19:08:17
|
Revision: 4454 http://bigdata.svn.sourceforge.net/bigdata/?rev=4454&view=rev Author: thompsonbry Date: 2011-05-05 19:08:09 +0000 (Thu, 05 May 2011) Log Message: ----------- This commit ports changes already present in the quads branch back to the trunk. Those changes include: 1. synchronized keyword for BigdataSail#setChangeLog() for visibility guarantee on the field. 2. Semaphore on Journal to protect the unisolated connection used by the Sail. 3. Modified BigdataSail#getUnisolatedConnection() to use the Semaphore declared by the Journal. 4. Modified BTree#handleCommit() to take the lock used by the UnisolatedReadWriteIndex in order to protect against concurrent modification during the commit protocol for a mutable BTree. 5. Renamed TestBigdataSailWithQuads to TestBigdataSailWithQuadsAndNestedSubquery. Nested subquery is no longer supported in the dev branch. The test suite flavors for quads with nested subquery and without inlining have both been deprecated as those store variants are no longer supported in the dev branch. 6. Copied the version of TestRollbacks from the quads branch. 7. Copied the version of TestBootstrapBigdataSail from the quads branch. The only obvious problem as of this commit is that TestRollbacks fails in the trunk while it passes in the quads branch. I suspect that this has to do with the manner in which we use a full transaction for read-only connections in the quads branch while the trunk only uses a read-historical read. This last point needs to be investigated further and the problem demonstrated by TestRollbacks resolved in one way or another. There were several changes made to the quads branch when we changed to use a read-only transaction for BigdataSail#getReadOnlyConnection(). If that behavior is to be ported back to the trunk, then a more extensive survey will have to be performed. Modified Paths: -------------- trunk/bigdata/src/java/com/bigdata/btree/BTree.java trunk/bigdata/src/java/com/bigdata/journal/Journal.java trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBootstrapBigdataSail.java trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java Added Paths: ----------- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndNestedSubquery.java Removed Paths: ------------- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java Modified: trunk/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/btree/BTree.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata/src/java/com/bigdata/btree/BTree.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -29,6 +29,7 @@ import java.lang.ref.WeakReference; import java.lang.reflect.Constructor; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; import com.bigdata.BigdataStatics; import com.bigdata.btree.AbstractBTreeTupleCursor.MutableBTreeTupleCursor; @@ -1126,30 +1127,47 @@ assertNotTransient(); assertNotReadOnly(); - if (/*autoCommit &&*/ needsCheckpoint()) { + /* + * Note: Acquiring this lock provides for atomicity of the checkpoint of + * the BTree during the commit protocol. Without this lock, users of the + * UnisolatedReadWriteIndex could be concurrently modifying the BTree + * while we are attempting to snapshot it for the commit. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/288 + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/278 + */ + final Lock lock = new UnisolatedReadWriteIndex(this).writeLock(); + try { + + if (/* autoCommit && */needsCheckpoint()) { - /* - * Flush the btree, write a checkpoint record, and return the - * address of that checkpoint record. The [checkpoint] reference is - * also updated. - */ + /* + * Flush the btree, write a checkpoint record, and return the + * address of that checkpoint record. The [checkpoint] reference + * is also updated. + */ - return writeCheckpoint(); + return writeCheckpoint(); - } + } - /* - * There have not been any writes on this btree or auto-commit is - * disabled. - * - * Note: if the application has explicitly invoked writeCheckpoint() - * then the returned address will be the address of that checkpoint - * record and the BTree will have a new checkpoint address made restart - * safe on the backing store. - */ + /* + * There have not been any writes on this btree or auto-commit is + * disabled. + * + * Note: if the application has explicitly invoked writeCheckpoint() + * then the returned address will be the address of that checkpoint + * record and the BTree will have a new checkpoint address made + * restart safe on the backing store. + */ - return checkpoint.addrCheckpoint; - + return checkpoint.addrCheckpoint; + + } finally { + lock.unlock(); + } + } /** Modified: trunk/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- trunk/bigdata/src/java/com/bigdata/journal/Journal.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata/src/java/com/bigdata/journal/Journal.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -33,6 +33,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -1203,4 +1204,76 @@ } private final LatchedExecutor readService; + /** + * A Journal level semaphore used to restrict applications to a single + * unisolated connection. The "unisolated" connection is an application + * level construct which supports highly scalable ACID operations but only a + * single such "connection" can exist at a time for a Journal. This + * constraint arises from the need for the application to coordinate + * operations on the low level indices and commit/abort processing while it + * holds the permit. + * <p> + * Note: If by some chance the permit has become "lost" it can be rebalanced + * by {@link Semaphore#release()}. However, uses of this {@link Semaphore} + * should ensure that it is release along all code paths, including a + * finalizer if necessary. + */ + private final Semaphore unisolatedSemaphore = new Semaphore(1/* permits */, + false/* fair */); + + /** + * Acquire a permit for the UNISOLATED connection. + * + * @throws InterruptedException + */ + public void acquireUnisolatedConnection() throws InterruptedException { + + unisolatedSemaphore.acquire(); + + if (log.isDebugEnabled()) + log.debug("acquired semaphore: availablePermits=" + + unisolatedSemaphore.availablePermits()); + + if (unisolatedSemaphore.availablePermits() != 0) { + /* + * Note: This test can not be made atomic with the Semaphore API. It + * is possible unbalanced calls to release() could drive the #of + * permits in the Semaphore above ONE (1) since the Semaphore + * constructor does not place an upper bound on the #of permits, but + * rather sets the initial #of permits available. An attempt to + * acquire a permit which has a post-condition with additional + * permits available will therefore "eat" a permit. + */ + throw new IllegalStateException(); + } + + } + + /** + * Release the permit for the UNISOLATED connection. + * + * @throws IllegalStateException + * unless the #of permits available is zero. + */ + public void releaseUnisolatedConnection() { + + if (log.isDebugEnabled()) + log.debug("releasing semaphore: availablePermits=" + + unisolatedSemaphore.availablePermits()); + + if (unisolatedSemaphore.availablePermits() != 0) { + /* + * Note: This test can not be made atomic with the Semaphore API. It + * is possible that a concurrent call could drive the #of permits in + * the Semaphore above ONE (1) since the Semaphore constructor does + * not place an upper bound on the #of permits, but rather sets the + * initial #of permits available. + */ + throw new IllegalStateException(); + } + + unisolatedSemaphore.release(); + + } + } Modified: trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -1156,12 +1156,18 @@ public BigdataSailConnection getUnisolatedConnection() throws InterruptedException { - Lock writeLock = lock.writeLock(); + if (getDatabase().getIndexManager() instanceof Journal) { + // acquire permit from Journal. + ((Journal) getDatabase().getIndexManager()) + .acquireUnisolatedConnection(); + } + + final Lock writeLock = lock.writeLock(); writeLock.lock(); // new writable connection. - final BigdataSailConnection conn = - new BigdataSailConnection(database, writeLock); + final BigdataSailConnection conn = new BigdataSailConnection(database, + writeLock, true/* unisolated */); return conn; @@ -1197,7 +1203,7 @@ database.getNamespace(), TimestampUtility.asHistoricalRead(timestamp)); - return new BigdataSailConnection(view, null); + return new BigdataSailConnection(view, null, false/*unisolated*/); } @@ -1225,7 +1231,7 @@ final Lock readLock = lock.readLock(); readLock.lock(); - return new BigdataSailConnection(readLock) { + return new BigdataSailConnection(readLock,false/*unisolated*/) { /** * The transaction id. @@ -1428,8 +1434,21 @@ * Used to coordinate between read/write transactions and the unisolated * view. */ - private Lock lock; + final private Lock lock; + /** + * <code>true</code> iff this is the UNISOLATED connection (only one of + * those at a time). + */ + private final boolean unisolated; + + public String toString() { + + return getClass().getName() + "{timestamp=" + + TimestampUtility.toString(database.getTimestamp()) + "}"; + + } + /** * Return the assertion buffer. * <p> @@ -1539,10 +1558,11 @@ } - protected BigdataSailConnection(final Lock lock) { + protected BigdataSailConnection(final Lock lock,final boolean unisolated) { this.lock = lock; - + this.unisolated = unisolated; + } /** @@ -1553,11 +1573,12 @@ * {@link SailConnection} will not support update. */ protected BigdataSailConnection(final AbstractTripleStore database, - final Lock lock) { + final Lock lock, final boolean unisolated) { attach(database); this.lock = lock; + this.unisolated = unisolated; } @@ -2663,6 +2684,11 @@ if (lock != null) { lock.unlock(); } + if (unisolated && getDatabase().getIndexManager() instanceof Journal) { + // release the permit. + ((Journal) getDatabase().getIndexManager()) + .releaseUnisolatedConnection(); + } open = false; } @@ -3471,7 +3497,7 @@ * @param log * the change log */ - public void setChangeLog(final IChangeLog changeLog) { + synchronized public void setChangeLog(final IChangeLog changeLog) { this.changeLog = changeLog; Deleted: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -1,176 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Sep 4, 2008 - */ - -package com.bigdata.rdf.sail; - -import java.util.Properties; - -import junit.extensions.proxy.ProxyTestSuite; -import junit.framework.Test; -import junit.framework.TestSuite; - -import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.sail.BigdataSail.Options; -import com.bigdata.rdf.sail.tck.BigdataConnectionTest; -import com.bigdata.rdf.sail.tck.BigdataSparqlTest; -import com.bigdata.rdf.sail.tck.BigdataStoreTest; -import com.bigdata.relation.AbstractResource; - -/** - * Test suite for the {@link BigdataSail} with quads enabled. The provenance - * mode is disabled. Inference is disabled. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class TestBigdataSailWithQuads extends AbstractBigdataSailTestCase { - - /** - * - */ - public TestBigdataSailWithQuads() { - } - - public TestBigdataSailWithQuads(String name) { - super(name); - } - - public static Test suite() { - - final TestBigdataSailWithQuads delegate = new TestBigdataSailWithQuads(); // !!!! THIS CLASS !!!! - - /* - * Use a proxy test suite and specify the delegate. - */ - - final ProxyTestSuite suite = new ProxyTestSuite(delegate, "SAIL with Quads (nested subquery joins)"); - - // test pruning of variables not required for downstream processing. - suite.addTestSuite(TestPruneBindingSets.class); - - // misc named graph API stuff. - suite.addTestSuite(TestQuadsAPI.class); - - // SPARQL named graphs tests. - suite.addTestSuite(TestNamedGraphs.class); - - // test suite for optionals handling (left joins). - suite.addTestSuite(TestOptionals.class); - - // test of the search magic predicate - suite.addTestSuite(TestSearchQuery.class); - - // high-level query tests. - suite.addTestSuite(TestQuery.class); - - // test of high-level query on a graph with statements about statements. - suite.addTestSuite(TestProvenanceQuery.class); - - // unit tests for custom evaluation of high-level query - suite.addTestSuite(TestBigdataSailEvaluationStrategyImpl.class); - - suite.addTestSuite(TestUnions.class); - - suite.addTestSuite(TestDescribe.class); - - // The Sesame TCK, including the SPARQL test suite. - { - - final TestSuite tckSuite = new TestSuite("Sesame 2.x TCK"); - - tckSuite.addTestSuite(BigdataStoreTest.LTSWithNestedSubquery.class); - - tckSuite.addTestSuite(BigdataConnectionTest.LTSWithNestedSubquery.class); - - try { - - tckSuite.addTest(BigdataSparqlTest.suiteLTSWithNestedSubquery()); - - } catch (Exception ex) { - - throw new RuntimeException(ex); - - } - - suite.addTest(tckSuite); - - } - - return suite; - - } - - @Override - protected BigdataSail getSail(final Properties properties) { - - return new BigdataSail(properties); - - } - - public Properties getProperties() { - - final Properties properties = new Properties(super.getProperties()); -/* - properties.setProperty(Options.STATEMENT_IDENTIFIERS, "false"); - - properties.setProperty(Options.QUADS, "true"); - - properties.setProperty(Options.AXIOMS_CLASS, NoAxioms.class.getName()); -*/ - properties.setProperty(Options.QUADS_MODE, "true"); - - properties.setProperty(Options.TRUTH_MAINTENANCE, "false"); - - properties.setProperty(AbstractResource.Options.NESTED_SUBQUERY, "true"); - - return properties; - - } - - @Override - protected BigdataSail reopenSail(final BigdataSail sail) { - - final Properties properties = sail.database.getProperties(); - - if (sail.isOpen()) { - - try { - - sail.shutDown(); - - } catch (Exception ex) { - - throw new RuntimeException(ex); - - } - - } - - return getSail(properties); - - } - -} Copied: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndNestedSubquery.java (from rev 4431, trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java) =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndNestedSubquery.java (rev 0) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndNestedSubquery.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -0,0 +1,180 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 4, 2008 + */ + +package com.bigdata.rdf.sail; + +import java.util.Properties; + +import junit.extensions.proxy.ProxyTestSuite; +import junit.framework.Test; +import junit.framework.TestSuite; + +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.sail.BigdataSail.Options; +import com.bigdata.rdf.sail.tck.BigdataConnectionTest; +import com.bigdata.rdf.sail.tck.BigdataSparqlTest; +import com.bigdata.rdf.sail.tck.BigdataStoreTest; +import com.bigdata.relation.AbstractResource; + +/** + * Test suite for the {@link BigdataSail} with quads enabled. The provenance + * mode is disabled. Inference is disabled. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + * + * @deprecated Support for nested subquery has been dropped in the development branch. + */ +public class TestBigdataSailWithQuadsAndNestedSubquery extends AbstractBigdataSailTestCase { + + /** + * + */ + public TestBigdataSailWithQuadsAndNestedSubquery() { + } + + public TestBigdataSailWithQuadsAndNestedSubquery(String name) { + super(name); + } + + public static Test suite() { + + final TestBigdataSailWithQuadsAndNestedSubquery delegate = new TestBigdataSailWithQuadsAndNestedSubquery(); // !!!! THIS CLASS !!!! + + /* + * Use a proxy test suite and specify the delegate. + */ + + final ProxyTestSuite suite = new ProxyTestSuite(delegate, "SAIL with Quads (nested subquery joins)"); + + // test pruning of variables not required for downstream processing. + suite.addTestSuite(TestPruneBindingSets.class); + + // misc named graph API stuff. + suite.addTestSuite(TestQuadsAPI.class); + + // SPARQL named graphs tests. + suite.addTestSuite(TestNamedGraphs.class); + + // test suite for optionals handling (left joins). + suite.addTestSuite(TestOptionals.class); + + // test of the search magic predicate + suite.addTestSuite(TestSearchQuery.class); + + // high-level query tests. + suite.addTestSuite(TestQuery.class); + + // test of high-level query on a graph with statements about statements. + suite.addTestSuite(TestProvenanceQuery.class); + + // unit tests for custom evaluation of high-level query + suite.addTestSuite(TestBigdataSailEvaluationStrategyImpl.class); + + suite.addTestSuite(TestUnions.class); + + suite.addTestSuite(TestDescribe.class); + + suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacks.class); + + // The Sesame TCK, including the SPARQL test suite. + { + + final TestSuite tckSuite = new TestSuite("Sesame 2.x TCK"); + + tckSuite.addTestSuite(BigdataStoreTest.LTSWithNestedSubquery.class); + + tckSuite.addTestSuite(BigdataConnectionTest.LTSWithNestedSubquery.class); + + try { + + tckSuite.addTest(BigdataSparqlTest.suiteLTSWithNestedSubquery()); + + } catch (Exception ex) { + + throw new RuntimeException(ex); + + } + + suite.addTest(tckSuite); + + } + + return suite; + + } + + @Override + protected BigdataSail getSail(final Properties properties) { + + return new BigdataSail(properties); + + } + + public Properties getProperties() { + + final Properties properties = new Properties(super.getProperties()); +/* + properties.setProperty(Options.STATEMENT_IDENTIFIERS, "false"); + + properties.setProperty(Options.QUADS, "true"); + + properties.setProperty(Options.AXIOMS_CLASS, NoAxioms.class.getName()); +*/ + properties.setProperty(Options.QUADS_MODE, "true"); + + properties.setProperty(Options.TRUTH_MAINTENANCE, "false"); + + properties.setProperty(AbstractResource.Options.NESTED_SUBQUERY, "true"); + + return properties; + + } + + @Override + protected BigdataSail reopenSail(final BigdataSail sail) { + + final Properties properties = sail.database.getProperties(); + + if (sail.isOpen()) { + + try { + + sail.shutDown(); + + } catch (Exception ex) { + + throw new RuntimeException(ex); + + } + + } + + return getSail(properties); + + } + +} Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -32,7 +32,6 @@ import junit.framework.Test; import junit.framework.TestSuite; -import com.bigdata.rdf.axioms.NoAxioms; import com.bigdata.rdf.sail.BigdataSail.Options; import com.bigdata.rdf.sail.tck.BigdataConnectionTest; import com.bigdata.rdf.sail.tck.BigdataSparqlTest; @@ -97,7 +96,9 @@ suite.addTestSuite(TestDescribe.class); - // The Sesame TCK, including the SPARQL test suite. + suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacks.class); + + // The Sesame TCK, including the SPARQL test suite. { final TestSuite tckSuite = new TestSuite("Sesame 2.x TCK"); Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -46,6 +46,8 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * @deprecated Support for inlining is non-optional in the development branch. */ public class TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining extends AbstractBigdataSailTestCase { @@ -97,6 +99,8 @@ suite.addTestSuite(TestDescribe.class); + suite.addTestSuite(com.bigdata.rdf.sail.contrib.TestRollbacks.class); + // The Sesame TCK, including the SPARQL test suite. { Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBootstrapBigdataSail.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBootstrapBigdataSail.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBootstrapBigdataSail.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -30,8 +30,15 @@ import java.io.File; import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; -import junit.framework.TestCase; +import junit.framework.TestCase2; import org.openrdf.model.Statement; import org.openrdf.model.URI; @@ -41,8 +48,13 @@ import org.openrdf.sail.SailConnection; import org.openrdf.sail.SailException; +import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.sail.BigdataSail.BigdataSailConnection; import com.bigdata.rdf.sail.BigdataSail.Options; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.store.LocalTripleStore; /** * Bootstrap test case for bringing up the {@link BigdataSail}. @@ -50,7 +62,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class TestBootstrapBigdataSail extends TestCase { +public class TestBootstrapBigdataSail extends TestCase2 { /** * @@ -71,33 +83,19 @@ * @throws SailException */ public void test_ctor_1() throws SailException { - + final BigdataSail sail = new BigdataSail(); - sail.initialize(); - try { + sail.initialize(); + sail.shutDown(); + + } finally { - } + sail.getDatabase().getIndexManager().destroy(); - finally { - - String filename = sail.properties.getProperty(Options.FILE); - - if (filename != null) { - - File file = new File(filename); - - if(file.exists() && ! file.delete()) { - - fail("Could not delete file after test: "+filename); - - } - - } - } } @@ -121,22 +119,20 @@ } - Properties properties = new Properties(); + final Properties properties = new Properties(); properties.setProperty(Options.FILE, file.toString()); - BigdataSail sail = new BigdataSail(properties); + final BigdataSail sail = new BigdataSail(properties); - sail.initialize(); - try { + sail.initialize(); + sail.shutDown(); - } + } finally { - finally { - if (!file.exists()) { fail("Could not locate store: " + file); @@ -161,94 +157,403 @@ */ public void test_getConnection() throws SailException { - final File file = new File(getName() + Options.JNL); - - if(file.exists()) { - - if(!file.delete()) { - - fail("Could not delete file before test: " + file); + final Properties properties = new Properties(); - } - - } + properties.setProperty(Options.CREATE_TEMP_FILE, "true"); - final Properties properties = new Properties(); + final BigdataSail sail = new BigdataSail(properties); - properties.setProperty(Options.FILE, file.toString()); + try { - final BigdataSail sail = new BigdataSail(properties); + sail.initialize(); - sail.initialize(); - - try { + final SailConnection conn = sail.getConnection(); - final SailConnection conn = sail.getConnection(); - - conn.close(); - - sail.shutDown(); + conn.close(); - } + sail.shutDown(); - finally { + } finally { - if (!file.exists()) { + sail.getDatabase().getIndexManager().destroy(); - fail("Could not locate store: " + file); + } - if (!file.delete()) { + } - fail("Could not delete file after test: " + file); + /** + * Unit test verifies that a thread may not obtain more than one instance of + * the unisolated connection at a time from the {@link BigdataSail}. + * + * @throws SailException + * @throws InterruptedException + * + * FIXME Re-propagate test changes to the trunk along with the + * changes to Journal (the semaphore) and to BigdataSail (using + * the semaphore). + * + * @throws ExecutionException + */ + public void test_getConnectionAllowedExactlyOnce1() throws SailException, + InterruptedException, ExecutionException { - } + final Properties properties = new Properties(); - } + properties.setProperty(Options.CREATE_TEMP_FILE, "true"); - } + ExecutorService service = null; + final BigdataSail sail = new BigdataSail(properties); - } + try { - /** - * Test creates a database, obtains a writable connection, writes some data - * on the store, verifies that the data can be read back from within the - * connection but that it is not visible in a read-committed view, commits - * the write set, and verifies that the data is now visible in a - * read-committed view. - * - * @todo variant that writes, aborts the write, and verifies that the data - * was not made restart safe. - * - * @throws SailException - */ - public void test_isolation() throws SailException { + sail.initialize(); + service = Executors.newSingleThreadExecutor(); - final File file = new File(getName() + Options.JNL); - - if(file.exists()) { - - if(!file.delete()) { - - fail("Could not delete file before test: " + file); + Future<Void> f = null; + + try { - } - - } + final Callable<Void> task = new Callable<Void>() { - final Properties properties = new Properties(); + public Void call() throws Exception { - properties.setProperty(Options.FILE, file.toString()); + SailConnection conn1 = null; + SailConnection conn2 = null; - final BigdataSail sail = new BigdataSail(properties); + try { - sail.initialize(); - - final SailConnection conn = sail.getConnection(); - - final SailConnection readConn = sail.getReadOnlyConnection(); - - try { + log.info("Requesting 1st unisolated connection."); + conn1 = sail.getUnisolatedConnection(); + + log.info("Requesting 2nd unisolated connection."); + + conn2 = sail.getUnisolatedConnection(); + + fail("Not expecting a 2nd unisolated connection"); + + return (Void) null; + + } finally { + + if (conn1 != null) + conn1.close(); + + if (conn2 != null) + conn2.close(); + + } + } + + }; + + // run task. it should block when attempting to get the 2nd + // connection. + f = service.submit(task); + + // wait up to a timeout to verify that the task blocked rather + // than acquiring the 2nd connection. + f.get(250, TimeUnit.MILLISECONDS); + + } catch (TimeoutException e) { + + /* + * This is the expected outcome. + */ + log.info("timeout"); + + } finally { + + if (f != null) { + // Cancel task. + f.cancel(true/* mayInterruptIfRunning */); + } + + sail.shutDown(); + + } + + } finally { + + if (service != null) { + service.shutdownNow(); + } + + sail.getDatabase().getIndexManager().destroy(); + + } + + } + + /** + * Unit test verifies exactly one unisolated connection for two different + * {@link BigdataSail} instances for the same {@link AbstractTripleStore} on + * the same {@link Journal}. + * + * @throws SailException + * @throws InterruptedException + */ + public void test_getConnectionAllowedExactlyOnce2() throws SailException, + InterruptedException, ExecutionException { + + final Properties properties = new Properties(); + + properties.setProperty(Options.CREATE_TEMP_FILE, "true"); + + ExecutorService service = null; + final BigdataSail sail = new BigdataSail(properties); + + try { + + sail.initialize(); + service = Executors.newSingleThreadExecutor(); + + // wrap a 2nd sail around the same tripleStore. + final BigdataSail sail2 = new BigdataSail(sail.getDatabase()); + sail2.initialize(); + + Future<Void> f = null; + + try { + + final Callable<Void> task = new Callable<Void>() { + + public Void call() throws Exception { + + SailConnection conn1 = null; + SailConnection conn2 = null; + + try { + + log.info("Requesting 1st unisolated connection."); + + conn1 = sail.getUnisolatedConnection(); + + log.info("Requesting 2nd unisolated connection."); + + conn2 = sail2.getUnisolatedConnection(); + + fail("Not expecting a 2nd unisolated connection"); + + return (Void) null; + + } finally { + + if (conn1 != null) + conn1.close(); + + if (conn2 != null) + conn2.close(); + + } + } + + }; + + // run task. it should block when attempting to get the 2nd + // connection. + f = service.submit(task); + + // wait up to a timeout to verify that the task blocked rather + // than acquiring the 2nd connection. + f.get(250, TimeUnit.MILLISECONDS); + + } catch (TimeoutException e) { + + /* + * This is the expected outcome. + */ + log.info("timeout"); + + } finally { + + if (f != null) { + // Cancel task. + f.cancel(true/* mayInterruptIfRunning */); + } + + if (sail2 != null) + sail2.shutDown(); + + sail.shutDown(); + + } + + } finally { + + if (service != null) { + service.shutdownNow(); + } + + sail.getDatabase().getIndexManager().destroy(); + + } + + } + + /** + * Unit test verifying that exactly one unisolated connection is allowed at + * a time for two sails wrapping different {@link AbstractTripleStore} + * instances. (This guarantee is needed to preserve ACID semantics for the + * unisolated connection when there is more than one + * {@link AbstractTripleStore} on the same {@link Journal}. However, + * scale-out should not enforce this constraint since it is shard-wise ACID + * for unisolated operations.) + * + * @throws SailException + * @throws InterruptedException + */ + public void test_getConnectionAllowedExactlyOnce3() throws SailException, + InterruptedException, ExecutionException { + + final Properties properties = new Properties(); + + properties.setProperty(Options.CREATE_TEMP_FILE, "true"); + + ExecutorService service = null; + final BigdataSail sail = new BigdataSail(properties); + + try { + + sail.initialize(); + service = Executors.newSingleThreadExecutor(); + + // wrap a 2nd sail around a different tripleStore. + final BigdataSail sail2; + { + + // tunnel through to the Journal. + final Journal jnl = (Journal) sail.getDatabase() + .getIndexManager(); + + // describe another tripleStore with a distinct namespace. + final AbstractTripleStore tripleStore = new LocalTripleStore( + jnl, "foo", ITx.UNISOLATED, properties); + + // create that triple store. + tripleStore.create(); + + // wrap a 2nd sail around the 2nd tripleStore. + sail2 = new BigdataSail(tripleStore); + sail2.initialize(); + + } + + Future<Void> f = null; + + try { + + final Callable<Void> task = new Callable<Void>() { + + public Void call() throws Exception { + + SailConnection conn1 = null; + SailConnection conn2 = null; + + try { + + log.info("Requesting 1st unisolated connection."); + + conn1 = sail.getUnisolatedConnection(); + + log.info("Requesting 2nd unisolated connection."); + + conn2 = sail2.getUnisolatedConnection(); + + fail("Not expecting a 2nd unisolated connection"); + + return (Void) null; + + } finally { + + if (conn1 != null) + conn1.close(); + + if (conn2 != null) + conn2.close(); + + } + } + + }; + + // run task. it should block when attempting to get the 2nd + // connection. + f = service.submit(task); + + // wait up to a timeout to verify that the task blocked rather + // than acquiring the 2nd connection. + f.get(250, TimeUnit.MILLISECONDS); + + } catch (TimeoutException e) { + + /* + * This is the expected outcome. + */ + log.info("timeout"); + + } finally { + + if (f != null) { + // Cancel task. + f.cancel(true/* mayInterruptIfRunning */); + } + + if (sail2 != null) + sail2.shutDown(); + + sail.shutDown(); + + } + + } finally { + + if (service != null) { + service.shutdownNow(); + } + + sail.getDatabase().getIndexManager().destroy(); + + } + + } + + /** + * Test creates a database, obtains a writable connection, writes some data + * on the store, verifies that the data can be read back from within the + * connection but that it is not visible in a read-committed view, commits + * the write set, and verifies that the data is now visible in a + * read-committed view. + * + * TODO variant that writes, aborts the write, and verifies that the data + * was not made restart safe. + * + * @throws SailException + * @throws InterruptedException + */ + public void test_isolationOfUnisolatedConnection() throws SailException, + InterruptedException { + + final Properties properties = new Properties(); + + properties.setProperty(Options.CREATE_TEMP_FILE, "true"); + + BigdataSailConnection conn = null; + + BigdataSailConnection readConn = null; + + final BigdataSail sail = new BigdataSail(properties); + + try { + + sail.initialize(); + + // the unisolated connection + conn = sail.getUnisolatedConnection(); + + // a read-only transaction. + readConn = sail.getReadOnlyConnection(); + final URI s = new URIImpl("http://www.bigdata.com/s"); final URI p = new URIImpl("http://www.bigdata.com/p"); @@ -348,32 +653,26 @@ } - } + } finally { - finally { - if (conn != null) conn.close(); if (readConn != null) readConn.close(); - sail.shutDown(); + sail.getDatabase().getIndexManager().destroy(); - if (!file.exists()) { - - fail("Could not locate store: " + file); - - if (!file.delete()) { - - fail("Could not delete file after test: " + file); - - } - - } - } } + /** + * Unit test verifies that we can mix read/write transactions and the use + * of the unisolated connection. + */ + public void test_readWriteTxAndUnisolatedConnection() { + fail("write this test"); + } + } Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java 2011-05-05 18:12:38 UTC (rev 4453) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/sail/contrib/TestRollbacks.java 2011-05-05 19:08:09 UTC (rev 4454) @@ -1,296 +1,358 @@ -/** -Copyright (C) SYSTAP, LLC 2011. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -package com.bigdata.rdf.sail.contrib; - -import java.util.LinkedList; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicReference; - -import org.openrdf.OpenRDFException; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.query.MalformedQueryException; -import org.openrdf.query.QueryEvaluationException; -import org.openrdf.query.QueryLanguage; -import org.openrdf.query.TupleQuery; -import org.openrdf.query.TupleQueryResult; -import org.openrdf.repository.RepositoryConnection; -import org.openrdf.repository.RepositoryException; -import org.openrdf.repository.sail.SailRepository; - -import com.bigdata.journal.IIndexManager; -import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.sail.BigdataSail; -import com.bigdata.rdf.sail.QuadsTestCase; -import com.bigdata.rdf.vocab.NoVocabulary; - -/** - * Unit test template for use in submission of bugs. - * <p> - * This test case will delegate to an underlying backing store. You can specify - * this store via a JVM property as follows: - * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> - * <p> - * There are three possible configurations for the testClass: - * <ul> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> - * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> - * </ul> - * <p> - * The default for triples and SIDs mode is for inference with truth maintenance - * to be on. If you would like to turn off inference, make sure to do so in - * {@link #getProperties()}. - * - * @author <a href="mailto:mrp...@us...">Mike Personick</a> - * @version $Id$ - */ -public class TestRollbacks extends QuadsTestCase { - public TestRollbacks() { - } - - public TestRollbacks(String arg0) { - super(arg0); - } - - /** - * Please set your database properties here, except for your journal file, - * please DO NOT SPECIFY A JOURNAL FILE. - */ - @Override - public Properties getProperties() { - Properties props = super.getProperties(); - - /* - * For example, here is a set of five properties that turns off - * inference, truth maintenance, and the free text index. - */ - props.setProperty(BigdataSail.Options.AXIOMS_CLASS, - NoAxioms.class.getName()); - props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, - NoVocabulary.class.getName()); - props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); - props.setProperty(BigdataSail.Options.JUSTIFY, "false"); - props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true"); -// props.setProperty(BigdataSail.Options.EXACT_SIZE, "true"); - - return props; - } - - - /** The thrown exception which is the first cause of failure. */ - private AtomicReference<Throwable> firstCause; - - /** - * Service used to run the individual tasks. This makes it possible to - * interrupt them as soon as one of the tasks fails. - */ - private ExecutorService executorService = null; - - @Override - protected void setUp() throws Exception { - super.setUp(); - firstCause = new AtomicReference<Throwable>(null); - executorService = Executors.newFixedThreadPool(3/*nthreads*/); - } - - @Override - protected void tearDown() throws Exception { - if (executorService != null) { - // interrupt any running tasks. - executorService.shutdownNow(); - } - // clear references so that junit does not hold onto them. - executorService = null; - firstCause = null; - super.tearDown(); - } - - public void testBug() throws Exception { - BigdataSail sail = getSail(); - try { - SailRepository repo = new SailRepository(sail); - repo.initialize(); - runConcurrentStuff(repo); - } finally { - final IIndexManager db = sail.getDatabase().getIndexManager(); - if (sail.isOpen()) - sail.shutDown(); - db.destroy(); - } - } - - private void runConcurrentStuff(final SailRepository repo) - throws Exception, - InterruptedException { - try { - final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); - tasks.add(new DoStuff(repo, true)); - tasks.add(new DoStuff(repo, false)); - tasks.add(new DoStuff(repo, false)); - final List<Future<Void>> futures = executorService.invokeAll(tasks); - // Look for the first cause. - final Throwable t = firstCause.get(); - if (t != null) { - // Found it. - throw new RuntimeException(t); - } - // test each future. - for (Future<Void> f : futures) { - f.get(); - } - } finally { - repo.shutDown(); - } - } - - private class DoStuff implements Callable<Void> { - private SailRepository repo; - private boolean writer; - int counter = 0; - - private DoStuff(SailRepository repo, boolean writer) - throws OpenRDFException { - this.repo = repo; - this.writer = writer; - } - - public Void call() throws Exception { -// if (writer) { -// // Initial sleep on the writer. -// Thread.sleep(500); -// } - RepositoryConnection conn = null; - try { - while (firstCause.get() == null) { - /* - * Note: If connection obtained/closed within the loop then - * the query is more likely to have some data to visit - * within its tx view. - */ - conn = repo.getConnection(); - conn.setAutoCommit(false); - if (writer) - writer(conn); - else - reader(conn); - conn.close(); - } - return (Void) null; - } catch (Throwable t) { - firstCause.compareAndSet(null/* expect */, t); - throw new RuntimeException(t); - } finally { - if (conn != null && conn.isOpen()) - conn.close(); - } - } - - private void reader(final RepositoryConnection conn) - throws RepositoryException, MalformedQueryException, - QueryEvaluationException, InterruptedException { - query(conn); -// Thread.sleep(100); - query(conn); - ++counter; - - if (counter % 3 == 0) - conn.commit(); - else - conn.rollback(); - - // if (counter % 7 == 0) { - // conn.close(); - // conn = repo.getConnection(); - // conn.setAutoCommit(false); - // } - } - - private void writer(final RepositoryConnection conn) throws RepositoryException, - MalformedQueryException, QueryEvaluationException, - InterruptedException { - - final URI subj = conn.getValueFactory().createURI( - "u:s" + (counter++)); - final Value value = conn.getValueFactory().createLiteral( - "literal" + counter); - query(conn); -// Thread.sleep(200); - conn.add(subj, conn.getValueFactory().createURI("u:p"), subj); - conn.add(subj, conn.getValueFactory().createURI("u:p"), value); - conn.commit(); - - if(log.isInfoEnabled()) - log.info("Added statements: size="+conn.size()); - - // if (counter % 12 == 0) { - // conn.close(); - // conn = repo.getConnection(); - // conn.setAutoCommit(false); - // } - } - - private void query(final RepositoryConnection conn) throws RepositoryException, - MalformedQueryException, QueryEvaluationException { - final long begin = System.currentTimeMillis(); - /* - * Note: This query will do an access path scan rather than a join. - * There are different code paths involved with a join, so there - * might be problems on those code paths as well. - */ - final boolean useJoin = counter % 2 == 0; - final String query = !useJoin// - // access path scan - ? "SELECT ?b { ?a ?b ?c } LIMIT 20"// - // join - : "SELECT ?b { ?a ?b ?c . ?d ?b ?e} LIMIT 20"// - ; - final TupleQuery q = conn.prepareTupleQuery(QueryLanguage.SPARQL, query); - q.setBinding("b", conn.getValueFactory().createURI("u:p")); - if (useJoin) - q.setBinding("d", conn.getValueFactory().createLiteral( - "literal1")); - final TupleQueryResult tqr = q.evaluate(); - int n = 0; - try { - while (tqr.hasNext()) { - tqr.next(); - n++; - } - } finally { - tqr.close(); - } - if (log.isInfoEnabled()) - log.info("Query: writer=" + writer + ", counter=" + counter - + ", nresults=" + n + ", elapsed=" - + (System.currentTimeMillis() - begin)); - } - } - -} +/** +Copyright (C) SYSTAP, LLC 2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +package com.bigdata.rdf.sail.contrib; + +import java.util.LinkedList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.log4j.Logger; +import org.openrdf.OpenRDFException; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.query.MalformedQueryException; +import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.repository.RepositoryConnection; +import org.openrdf.repository.RepositoryException; +import org.openrdf.repository.sail.SailRepository; + +import com.bigdata.journal.IIndexManager; +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSailRepository; +import com.bigdata.rdf.sail.QuadsTestCase; +import com.bigdata.rdf.vocab.NoVocabulary; + +/** + * This is a stress test for abort/rollback semantics. + * <p> + * This test case will delegate to an underlying backing store. You can specify + * this store via a JVM property as follows: + * <code>-DtestClass=com.bigdata.rdf.sail.TestBigdataSailWithQuads</code> + * <p> + * There are three possible configurations for the testClass: + * <ul> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithQuads (quads mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithoutSids (triples mode)</li> + * <li>com.bigdata.rdf.sail.TestBigdataSailWithSids (SIDs mode)</li> + * </ul> + * <p> + * The default for triples and SIDs mode is for inference with truth maintenance + * to be on. If you would like to turn off inference, make sure to do so in + * {@link #getProperties()}. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/278 + * + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @author <a href="mailto:ge...@us...">Gerjon</a> + * @version $Id$ + */ +public class TestRollbacks extends QuadsTestCase { + + private static final Logger log = Logger.getLogger(TestRollbacks.class); + + public TestRollbacks() { + } + + public TestRollbacks(String arg0) { + super(arg0); + } + + @Override + public Properties getProperties() { + + final Properties props = super.getProperties(); + + /* + * For example, here is a set of five properties that turns off + * inference, truth maintenance, and the free text index. + */ + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.ISOLATABLE_INDICES, "true"); + +// props.setProperty(BigdataSail.Options.CREATE_TEMP_FILE, "true"); +// props.setProperty(BigdataSail.Options.BUFFER_MODE, BufferMode.DiskRW +// .toString()); + +// props.setProperty(BigdataSail.Options.EXACT_SIZE, "true"); + + return props; + } + + + /** The thrown exception which is the first cause of failure. */ + private AtomicReference<Throwable> firstCause; + + /** + * Service used to run the individual tasks. This makes it possible to + * interrupt them as soon as one of the tasks fails. + */ + private ExecutorService executorService = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + firstCause = new AtomicReference<Throwable>(null); + executorService = Executors.newFixedThreadPool(3/*nthreads*/); + } + + @Override + protected void tearDown() throws Exception { + if (executorService != null) { + // interrupt any running tasks. + executorService.shutdownNow(); + } + // clear references so that junit does not hold onto them. + executorService = null; + firstCause = null; + super.tearDown(); + } + + /** + * Stress test for abort/rollback semantics consisting of many short + * runs of the basic test. + * + * @throws Exception + */ + public void testManyShortRuns() throws Exception { + + for (int i = 0; i < 20; i++) { + + doTest(10); + + } + + } + + /** + * Stress test for abort/rollback semantics consisting of one moderate + * duration run of the basic test. + * + * @throws Exception + */ + public void testModerateDuration() throws Exception { + + doTest(100); + + } + + private void doTest(final int maxCounter) throws InterruptedException, Exception { + final BigdataSail sail = getSail(); + try { + // Note: Modified to use the BigdataSailRepository rather than the base SailRepository class. + final BigdataSailRepository repo = new BigdataSailRepository(sail); + repo.initialize(); + runConcurrentStuff(repo,maxCounter); + } finally { + final IIndexManager db = sail.getDatabase().getIndexManager(); + if (sail.isOpen()) + sail.shutDown(); + db.destroy(); + } + } + + private void runConcurrentStuff(final SailRepository repo,final int maxCounter) + throws Exception, + InterruptedException { + try { + final List<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); + tasks.add(new DoStuff(repo, true/*writer*/, maxCounter)); + tasks.add(new DoStuff(repo, false/*reader*/, maxCounter)); + tasks.add(new DoStuff(repo, false/*reader*/, maxCounter)); + final List<Future<Void>> futures = executorService.invokeAll(tasks); + // Look for the first cause. + final Throwable t = firstCause.get(); + if (t != null) { + // Found it. + throw new RuntimeException(t); + } + // test each future. + for (Future<Void> f : futures) { + f.get(); + } + } finally { + repo.shutDown(); + } + } + + private class DoStuff implements Callable<Void> { + + private SailRepository repo; + private boolean writer; + private final int maxCounter; + int counter = 0; + + /** + * @param repo + * The repository. + * @param writer + * <code>true</code> iff this is a writer. + * @param maxCounter + * Sets a limit on the length of the stress test. A value of + * 1000 results in a 26 second run. A value of 100-200 is + * more reasonable and is sufficient to readily identify any + * ... [truncated message content] |