|
From: <tho...@us...> - 2010-07-27 21:15:56
|
Revision: 3326
http://bigdata.svn.sourceforge.net/bigdata/?rev=3326&view=rev
Author: thompsonbry
Date: 2010-07-27 21:15:49 +0000 (Tue, 27 Jul 2010)
Log Message:
-----------
Cleaned up IndexMetadata to remove support for IAddressSerializer and ISplitHandler. Those interfaces and their implementations are now gone.
Modified Paths:
--------------
trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java
trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java
trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java
trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java
trunk/bigdata/src/java/com/bigdata/resources/ViewMetadata.java
trunk/bigdata/src/java/com/bigdata/search/ReadIndexTask.java
trunk/bigdata-jini/src/java/com/bigdata/service/jini/benchmark/ThroughputMaster.java
Removed Paths:
-------------
trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java
trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java
trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java
trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java
trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java
Deleted: trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/AddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -1,121 +0,0 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-/*
- * Created on Dec 26, 2006
- */
-
-package com.bigdata.btree;
-
-import java.io.DataInput;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-import com.bigdata.io.DataOutputBuffer;
-import com.bigdata.rawstore.IAddressManager;
-
-/**
- * Serializes each address as a long integer and does not attempt to pack or
- * compress the addresses.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
- *
- * @deprecated This class is no longer used. The implementation exists solely to
- * facilitate de-serialization of older {@link IndexMetadata} record
- * versions.
- */
-public class AddressSerializer implements IAddressSerializer, Externalizable {
-
- private static final long serialVersionUID = -1434032311796654357L;
-
-// public static final IAddressSerializer INSTANCE = new AddressSerializer();
-
- /**
- * De-serialization ctor.
- */
- public AddressSerializer() {
- }
-
- public void putChildAddresses(IAddressManager addressManager, DataOutputBuffer os, long[] childAddr,
- int nchildren) throws IOException {
-
- throw new UnsupportedOperationException();
-
-// for (int i = 0; i < nchildren; i++) {
-//
-// final long addr = childAddr[i];
-//
-// /*
-// * Children MUST have assigned persistent identity.
-// */
-// if (addr == 0L) {
-//
-// throw new RuntimeException("Child is not persistent: index="
-// + i);
-//
-// }
-//
-// os.writeLong(addr);
-//
-// }
-
- }
-
- public void getChildAddresses(IAddressManager addressManager,DataInput is, long[] childAddr,
- int nchildren) throws IOException {
-
- throw new UnsupportedOperationException();
-
-// for (int i = 0; i < nchildren; i++) {
-//
-// final long addr = is.readLong();
-//
-// if (addr == 0L) {
-//
-// throw new RuntimeException(
-// "Child does not have persistent address: index=" + i);
-//
-// }
-//
-// childAddr[i] = addr;
-//
-// }
-
- }
-
- public void readExternal(ObjectInput arg0) throws IOException, ClassNotFoundException {
-
- // NOP (no state)
-
- }
-
- public void writeExternal(ObjectOutput arg0) throws IOException {
-
- // NOP (no state)
-
- }
-
-}
Modified: trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/BloomFilterFactory.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -31,7 +31,6 @@
import java.io.Serializable;
import com.bigdata.relation.accesspath.IAccessPath;
-import com.bigdata.resources.DefaultSplitHandler;
/**
* An interface that is used to generate a bloom filter for an
@@ -145,16 +144,12 @@
* scale-out indices DO NOT share the same limitation. Each time a scale-out
* index is partitioned, it is broken into a mutable {@link BTree} for
* absorbing writes for an index partition and zero or more
- * {@link IndexSegment}s. The default configuration of the
- * {@link DefaultSplitHandler} for a scale-out index caps the #of entries in
- * an index partition at ~ 1.5M. However, many of those index entries are
- * going to migrate to the {@link IndexSegment}s, so the #of index entries
- * in the {@link BTree} is never that large. Finally, #of index entries in
- * an {@link IndexSegment} is always known when the {@link IndexSegment} is
+ * {@link IndexSegment}s. Each time an overflow occurs, index entries are
+ * migrated to the {@link IndexSegment}s, so the #of index entries in the
+ * {@link BTree} is never that large. Finally, #of index entries in an
+ * {@link IndexSegment} is always known when the {@link IndexSegment} is
* built, so the {@link BloomFilter} for an {@link IndexSegment} is always a
* perfect fit.
- *
- * @see DefaultSplitHandler
*/
public static final transient BloomFilterFactory DEFAULT = new BloomFilterFactory(
DEFAULT_N, DEFAULT_ERROR_RATE, DEFAULT_MAX_ERROR_RATE);
@@ -234,15 +229,15 @@
* Create and return a new (empty) bloom filter for a {@link BTree} or
* {@link IndexSegment}.
* <p>
- * The bloom filter can be provisioned with reference to
- * {@link src/architecture/bloomfilter.xls}. Let <code>p</code> be the
- * probability of a false positive (aka the error rate) and <code>n</code>
- * be the #of index entries. The values p=.02 and n=1M result in a space
- * requirement of 8656171 bits or approximately 1mb and uses ~ 8.6 bits per
- * element. In order to achieve the same error rate with n=10M, the size
- * requirements of the bloom filter will be approximately 10mb since the
- * filter will still use ~ 8.6 bits per element for that error rate, or
- * roughly one byte per index entry.
+ * The bloom filter can be provisioned with reference to {@link src
+ * /architecture/bloomfilter.xls}. Let <code>p</code> be the probability of
+ * a false positive (aka the error rate) and <code>n</code> be the #of index
+ * entries. The values p=.02 and n=1M result in a space requirement of
+ * 8656171 bits or approximately 1mb and uses ~ 8.6 bits per element. In
+ * order to achieve the same error rate with n=10M, the size requirements of
+ * the bloom filter will be approximately 10mb since the filter will still
+ * use ~ 8.6 bits per element for that error rate, or roughly one byte per
+ * index entry.
* <p>
* The maximum record length for the backing store can easily be exceeded by
* a large bloom filter, large bloom filters will require significant time
@@ -252,12 +247,12 @@
* While the scale-out architecture uses group commits and hence can be
* expected to perform more commits during a bulk data load, it also uses
* one bloom filter per {@link AbstractBTree} so the #of index entries is
- * bounded by the configured {@link ISplitHandler}. On the other hand, the
- * bloom filter performance will degrade as a scale-up index grows in size
- * since the bloom filter can not be made very large for a scale-up store
- * (the maximum record size is reduced in order to permit more records) and
- * large indices will therefore experience increasing false positive rates
- * as they grow.
+ * bounded by the configured {@link ISimpleSplitHandler} in an application
+ * dependent manner. On the other hand, the bloom filter performance will
+ * degrade as a scale-up index grows in size since the bloom filter can not
+ * be made very large for a scale-up store (the maximum record size is
+ * reduced in order to permit more records) and large indices will therefore
+ * experience increasing false positive rates as they grow.
* <p>
* Whether or not a bloom filter is useful depends on the application. The
* bloom filter will ONLY be used for point tests such as contains(),
Deleted: trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/IAddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -1,79 +0,0 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-/*
- * Created on Dec 26, 2006
- */
-
-package com.bigdata.btree;
-
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.Serializable;
-
-import com.bigdata.io.DataOutputBuffer;
-import com.bigdata.rawstore.IAddressManager;
-
-/**
- * Interface for (de-)serialization of addresses of child nodes and leaves as
- * recorded on a {@link Node}.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
- *
- * @deprecated This interface is no longer used. It exists solely to facilitate
- * de-serialization of older {@link IndexMetadata} record versions.
- */
-public interface IAddressSerializer extends Serializable {
-
- /**
- * De-serialize the child addresses for a node.
- *
- * @param is
- * The input stream.
- * @param childAddr
- * The array into which the addresses must be written.
- * @param nchildren
- * The #of valid values in the array. The values in indices
- * [0:n-1] are defined and must be read from the buffer and
- * written on the array.
- */
- public void getChildAddresses(IAddressManager addressManager, DataInput is,
- long[] childAddr, int nchildren) throws IOException;
-
- /**
- * Serialize the child addresses for a node.
- *
- * @param os
- * The output stream.
- * @param childAddr
- * The array of child addresses to be written.
- * @param nchildren
- * The #of valid values in the array. The values in indices
- * [0:n-1] are defined and must be written.
- */
- public void putChildAddresses(IAddressManager addressManager,
- DataOutputBuffer os, long[] childAddr, int nchildren)
- throws IOException;
-
-}
Deleted: trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/ISplitHandler.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -1,119 +0,0 @@
-/*
-
-Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-*/
-/*
- * Created on Feb 12, 2008
- */
-
-package com.bigdata.btree;
-
-import java.io.Serializable;
-
-import com.bigdata.sparse.SparseRowStore;
-
-/**
- * An interface used to decide when and index partition is overcapacity and
- * should be split, including the split point(s), and when an index partition is
- * undercapacity and should be joined with its right sibling.
- * <p>
- * Note: applications frequency must constrain the allowable separator keys when
- * splitting an index partition into two or more index partitions. For example,
- * the {@link SparseRowStore} must to maintain an guarantee of atomic operations
- * for a logical row, which is in turn defined as the ordered set of index
- * entries sharing the same primary key. You can use this interface to impose
- * application specific constraints such that the index partition boundaries
- * only fall on acceptable separator keys.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
- *
- * @deprecated by {@link ISimpleSplitHandler}. This is only kept around to
- * deserialize existing instances.
- */
-public interface ISplitHandler extends Serializable {
-
-// /**
-// * Return <code>true</code> if a cursory examination of an index partition
-// * suggests that it SHOULD be split into 2 or more index partitions.
-// *
-// * @param rangeCount
-// * A fast range count (may overestimate).
-// *
-// * @return <code>true</code> if the index partition should be split.
-// */
-// public boolean shouldSplit(long rangeCount);
-//
-// /**
-// * Return the percentage of a single nominal split that would be satisified
-// * by an index partition based on the specified range count. If the index
-// * partition has exactly the desired number of tuples, then return ONE
-// * (1.0). If the index partition has 50% of the desired #of tuples, then
-// * return <code>.5</code>. If the index partition could be used to build
-// * two splits, then return TWO (2.0), etc.
-// *
-// * @param rangeCount
-// * A fast range count (may overestimate).
-// *
-// * @return The percentage of a split per above.
-// */
-// public double percentOfSplit(long rangeCount);
-//
-// /**
-// * Return <code>true</code> if a cursory examination of an index partition
-// * suggests that it SHOULD be joined with either its left or right sibling.
-// * The basic determination is that the index partition is "undercapacity".
-// * Normally this is decided in terms of the range count of the index
-// * partition.
-// *
-// * @param rangeCount
-// * A fast range count (may overestimate).
-// *
-// * @return <code>true</code> if the index partition should be joined.
-// */
-// public boolean shouldJoin(long rangeCount);
-//
-// /**
-// * Choose a set of splits that completely span the key range of the index
-// * view. The first split MUST use the leftSeparator of the index view as its
-// * leftSeparator. The last split MUST use the rightSeparator of the index
-// * view as its rightSeparator. The #of splits SHOULD be chosen such that the
-// * resulting index partitions are each at least 50% full.
-// *
-// * @param partitionIdFactory
-// *
-// * @param ndx
-// * The source index partition.
-// *
-// * @return A {@link Split}[] array contains everything that we need to
-// * define the new index partitions -or- <code>null</code> if a more
-// * detailed examination reveals that the index SHOULD NOT be split
-// * at this time.
-// */
-//// * @param btreeCounters
-//// * Performance counters for the index partition view collected
-//// * since the last overflow.
-// public Split[] getSplits(IPartitionIdFactory partitionIdFactory,
-// ILocalBTreeView ndx);//, BTreeCounters btreeCounters);
-
-}
Modified: trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -2377,86 +2377,87 @@
*/
private static transient final int VERSION0 = 0x0;
- /**
- * This version introduced the {@link #asynchronousIndexWriteConfiguration}.
- * Reads of an earlier version create a instance of that field based on a
- * default configuration.
- */
- private static transient final int VERSION1 = 0x1;
-
- /**
- * This version introduced the {@link #scatterSplitConfiguration}. Reads of
- * an earlier version create a instance of that field based on a default
- * configuration.
- */
- private static transient final int VERSION2 = 0x2;
+// /**
+// * This version introduced the {@link #asynchronousIndexWriteConfiguration}.
+// * Reads of an earlier version create a instance of that field based on a
+// * default configuration.
+// */
+// private static transient final int VERSION1 = 0x1;
+//
+// /**
+// * This version introduced the {@link #scatterSplitConfiguration}. Reads of
+// * an earlier version create a instance of that field based on a default
+// * configuration.
+// */
+// private static transient final int VERSION2 = 0x2;
+//
+// /**
+// * This version introduced {@link #indexSegmentLeafCacheTimeout}. Reads of
+// * an earlier version use the
+// * {@link Options#DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT} for this field.
+// */
+// private static transient final int VERSION3 = 0x3;
+//
+// /**
+// * This version introduced {@link #btreeRecordCompressorFactory} and
+// * {@link #indexSegmentRecordCompressorFactory}. Both of these fields are
+// * optional, which implies no compression provider. Reads of prior versions
+// * set these fields to <code>null</code>.
+// *
+// * @see Options#BTREE_RECORD_COMPRESSOR_FACTORY
+// * @see Options#INDEX_SEGMENT_RECORD_COMPRESSOR_FACTORY
+// */
+// private static transient final int VERSION4 = 0x04;
+//
+// /**
+// * This version introduced {@link #childLocks}. Reads of prior versions set
+// * this field to <code>true</code>.
+// *
+// * @see Options#CHILD_LOCKS
+// */
+// private static transient final int VERSION5 = 0x05;
+//
+// /**
+// * This version introduced {@link #versionTimestampFilters}. Reads of prior
+// * versions set this field to <code>false</code>.
+// */
+// private static transient final int VERSION6 = 0x06;
+//
+// /**
+// * This version gets rid of the read-retention queue capacity and nscan
+// * properties and the index segment leaf cache capacity and timeout
+// * properties.
+// */
+// private static transient final int VERSION7 = 0x07;
+//
+// /**
+// * This version gets rid of the IAddressSerializer interface used by the
+// * older {@link NodeSerializer} class to (de-)serialize the child addresses
+// * for a {@link Node}.
+// */
+// private static transient final int VERSION8 = 0x08;
+//
+// /**
+// * The childLocks feature was dropped in this version.
+// */
+// private static transient final int VERSION9 = 0x09;
+//
+// /**
+// * The split handler was changed from an implementation based on the #of
+// * tuples to one based on the size on disk of an index segment after a
+// * compacting merge. The old split handlers are replaced by a
+// * <code>null</code> reference when they are de-serialized.
+// *
+// * @see ISplitHandler
+// * @see ISimpleSplitHandler
+// */
+// private static transient final int VERSION10 = 0x10;
/**
- * This version introduced {@link #indexSegmentLeafCacheTimeout}. Reads of
- * an earlier version use the
- * {@link Options#DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT} for this field.
- */
- private static transient final int VERSION3 = 0x3;
-
- /**
- * This version introduced {@link #btreeRecordCompressorFactory} and
- * {@link #indexSegmentRecordCompressorFactory}. Both of these fields are
- * optional, which implies no compression provider. Reads of prior versions
- * set these fields to <code>null</code>.
- *
- * @see Options#BTREE_RECORD_COMPRESSOR_FACTORY
- * @see Options#INDEX_SEGMENT_RECORD_COMPRESSOR_FACTORY
- */
- private static transient final int VERSION4 = 0x04;
-
- /**
- * This version introduced {@link #childLocks}. Reads of prior versions set
- * this field to <code>true</code>.
- *
- * @see Options#CHILD_LOCKS
- */
- private static transient final int VERSION5 = 0x05;
-
- /**
- * This version introduced {@link #versionTimestampFilters}. Reads of prior
- * versions set this field to <code>false</code>.
- */
- private static transient final int VERSION6 = 0x06;
-
- /**
- * This version gets rid of the read-retention queue capacity and nscan
- * properties and the index segment leaf cache capacity and timeout
- * properties.
- */
- private static transient final int VERSION7 = 0x07;
-
- /**
- * This version gets rid of the IAddressSerializer interface used by the
- * older {@link NodeSerializer} class to (de-)serialize the child addresses
- * for a {@link Node}.
- */
- private static transient final int VERSION8 = 0x08;
-
- /**
- * The childLocks feature was dropped in this version.
- */
- private static transient final int VERSION9 = 0x09;
-
- /**
- * The split handler was changed from an implementation based on the #of
- * tuples to one based on the size on disk of an index segment after a
- * compacting merge. The old split handlers are replaced by a
- * <code>null</code> reference when they are de-serialized.
- *
- * @see ISplitHandler
- * @see ISimpleSplitHandler
- */
- private static transient final int VERSION10 = 0x10;
-
- /**
* The version that will be serialized by this class.
*/
- private static transient final int CURRENT_VERSION = VERSION10;
+ private static transient final int CURRENT_VERSION = VERSION0;
+// private static transient final int CURRENT_VERSION = VERSION10;
/**
* @todo review generated record for compactness.
@@ -2468,16 +2469,16 @@
switch (version) {
case VERSION0:
- case VERSION1:
- case VERSION2:
- case VERSION3:
- case VERSION4:
- case VERSION5:
- case VERSION6:
- case VERSION7:
- case VERSION8:
- case VERSION9:
- case VERSION10:
+// case VERSION1:
+// case VERSION2:
+// case VERSION3:
+// case VERSION4:
+// case VERSION5:
+// case VERSION6:
+// case VERSION7:
+// case VERSION8:
+// case VERSION9:
+// case VERSION10:
break;
default:
throw new IOException("Unknown version: version=" + version);
@@ -2499,94 +2500,94 @@
writeRetentionQueueScan = (int)LongPacker.unpackLong(in);
- if (version < VERSION7) {
-
- /* btreeReadRetentionQueueCapacity = (int) */LongPacker
- .unpackLong(in);
+// if (version < VERSION7) {
+//
+// /* btreeReadRetentionQueueCapacity = (int) */LongPacker
+// .unpackLong(in);
+//
+// /* btreeReadRetentionQueueScan = (int) */LongPacker.unpackLong(in);
+//
+// }
- /* btreeReadRetentionQueueScan = (int) */LongPacker.unpackLong(in);
-
- }
-
pmd = (LocalPartitionMetadata)in.readObject();
btreeClassName = in.readUTF();
checkpointClassName = in.readUTF();
- if (version < VERSION8) {
+// if (version < VERSION8) {
+//
+// // Read and discard the IAddressSerializer object.
+// in.readObject();
+//
+// }
- // Read and discard the IAddressSerializer object.
- in.readObject();
-
- }
-
nodeKeysCoder = (IRabaCoder) in.readObject();
tupleSer = (ITupleSerializer) in.readObject();
- if (version < VERSION4) {
+// if (version < VERSION4) {
+//
+// btreeRecordCompressorFactory = null;
+//
+// } else {
- btreeRecordCompressorFactory = null;
-
- } else {
-
btreeRecordCompressorFactory = (IRecordCompressorFactory) in
.readObject();
- }
+// }
conflictResolver = (IConflictResolver)in.readObject();
- if (version < VERSION5 || version >= VERSION9) {
-
-// childLocks = true;
-
- } else {
-
-// childLocks =
- in.readBoolean();
-
- }
+// if (version < VERSION5 || version >= VERSION9) {
+//
+//// childLocks = true;
+//
+// } else {
+//
+//// childLocks =
+// in.readBoolean();
+//
+// }
deleteMarkers = in.readBoolean();
versionTimestamps = in.readBoolean();
- if (version < VERSION6) {
+// if (version < VERSION6) {
+//
+// versionTimestampFilters = false;
+//
+// } else {
- versionTimestampFilters = false;
-
- } else {
-
versionTimestampFilters = in.readBoolean();
-
- }
+//
+// }
bloomFilterFactory = (BloomFilterFactory) in.readObject();
overflowHandler = (IOverflowHandler)in.readObject();
- if (version < VERSION10) {
+// if (version < VERSION10) {
+//
+// /*
+// * The old style of split handler is discarded. The default behavior
+// * for the new style of split handler covers all known uses of the
+// * old style split handler. While some indices (the sparse row store
+// * for example) will have to register a new split handler for
+// * safety, those indices were not safe for splits historically.
+// */
+//
+// // read and discard the old split handler.
+// in.readObject();
+//
+// splitHandler2 = null;
+//
+// } else {
- /*
- * The old style of split handler is discarded. The default behavior
- * for the new style of split handler covers all known uses of the
- * old style split handler. While some indices (the sparse row store
- * for example) will have to register a new split handler for
- * safety, those indices were not safe for splits historically.
- */
-
- // read and discard the old split handler.
- in.readObject();
-
- splitHandler2 = null;
-
- } else {
-
splitHandler2 = (ISimpleSplitHandler) in.readObject();
- }
+// }
/*
* IndexSegment.
@@ -2594,118 +2595,118 @@
indexSegmentBranchingFactor = (int) LongPacker.unpackLong(in);
- if (version < VERSION7) {
+// if (version < VERSION7) {
+//
+// /* indexSegmentLeafCacheCapacity = (int) */LongPacker
+// .unpackLong(in);
+//
+// if (version < VERSION3) {
+//
+// /*
+// * indexSegmentLeafCacheTimeout = Long
+// * .parseLong(Options.DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT);
+// */
+//
+// } else {
+//
+// /* indexSegmentLeafCacheTimeout = (long) */LongPacker
+// .unpackLong(in);
+//
+// }
+//
+// }
- /* indexSegmentLeafCacheCapacity = (int) */LongPacker
- .unpackLong(in);
-
- if (version < VERSION3) {
-
- /*
- * indexSegmentLeafCacheTimeout = Long
- * .parseLong(Options.DEFAULT_INDEX_SEGMENT_LEAF_CACHE_TIMEOUT);
- */
-
- } else {
-
- /* indexSegmentLeafCacheTimeout = (long) */LongPacker
- .unpackLong(in);
-
- }
-
- }
-
indexSegmentBufferNodes = in.readBoolean();
- if (version < VERSION4) {
+// if (version < VERSION4) {
+//
+// indexSegmentRecordCompressorFactory = null;
+//
+// } else {
- indexSegmentRecordCompressorFactory = null;
-
- } else {
-
indexSegmentRecordCompressorFactory = (IRecordCompressorFactory) in
.readObject();
- }
+// }
- if (version < VERSION1) {
-
- /*
- * Use the default configuration since not present in the serialized
- * form before VERSION1.
- */
-
- final int masterQueueCapacity = Integer
- .parseInt(Options.DEFAULT_MASTER_QUEUE_CAPACITY);
-
- final int masterChunkSize = Integer
- .parseInt(Options.DEFAULT_MASTER_CHUNK_SIZE);
-
- final long masterChunkTimeoutNanos = Long
- .parseLong(Options.DEFAULT_MASTER_CHUNK_TIMEOUT_NANOS);
-
- final long sinkIdleTimeoutNanos = Long
- .parseLong(Options.DEFAULT_SINK_IDLE_TIMEOUT_NANOS);
-
- final long sinkPollTimeoutNanos = Long
- .parseLong(Options.DEFAULT_SINK_POLL_TIMEOUT_NANOS);
-
- final int sinkQueueCapacity = Integer
- .parseInt(Options.DEFAULT_SINK_QUEUE_CAPACITY);
-
- final int sinkChunkSize = Integer
- .parseInt(Options.DEFAULT_SINK_CHUNK_SIZE);
-
- final long sinkChunkTimeoutNanos = Long
- .parseLong(Options.DEFAULT_SINK_CHUNK_TIMEOUT_NANOS);
-
- asynchronousIndexWriteConfiguration = new AsynchronousIndexWriteConfiguration(
- masterQueueCapacity,//
- masterChunkSize,//
- masterChunkTimeoutNanos,//
- sinkIdleTimeoutNanos,//
- sinkPollTimeoutNanos,//
- sinkQueueCapacity,//
- sinkChunkSize,//
- sinkChunkTimeoutNanos//
- );
-
- } else {
+// if (version < VERSION1) {
+//
+// /*
+// * Use the default configuration since not present in the serialized
+// * form before VERSION1.
+// */
+//
+// final int masterQueueCapacity = Integer
+// .parseInt(Options.DEFAULT_MASTER_QUEUE_CAPACITY);
+//
+// final int masterChunkSize = Integer
+// .parseInt(Options.DEFAULT_MASTER_CHUNK_SIZE);
+//
+// final long masterChunkTimeoutNanos = Long
+// .parseLong(Options.DEFAULT_MASTER_CHUNK_TIMEOUT_NANOS);
+//
+// final long sinkIdleTimeoutNanos = Long
+// .parseLong(Options.DEFAULT_SINK_IDLE_TIMEOUT_NANOS);
+//
+// final long sinkPollTimeoutNanos = Long
+// .parseLong(Options.DEFAULT_SINK_POLL_TIMEOUT_NANOS);
+//
+// final int sinkQueueCapacity = Integer
+// .parseInt(Options.DEFAULT_SINK_QUEUE_CAPACITY);
+//
+// final int sinkChunkSize = Integer
+// .parseInt(Options.DEFAULT_SINK_CHUNK_SIZE);
+//
+// final long sinkChunkTimeoutNanos = Long
+// .parseLong(Options.DEFAULT_SINK_CHUNK_TIMEOUT_NANOS);
+//
+// asynchronousIndexWriteConfiguration = new AsynchronousIndexWriteConfiguration(
+// masterQueueCapacity,//
+// masterChunkSize,//
+// masterChunkTimeoutNanos,//
+// sinkIdleTimeoutNanos,//
+// sinkPollTimeoutNanos,//
+// sinkQueueCapacity,//
+// sinkChunkSize,//
+// sinkChunkTimeoutNanos//
+// );
+//
+// } else {
asynchronousIndexWriteConfiguration = (AsynchronousIndexWriteConfiguration) in
.readObject();
- }
+// }
- if (version < VERSION2) {
+// if (version < VERSION2) {
+//
+// /*
+// * Use the default configuration since not present in the serialized
+// * form before VERSION2.
+// */
+//
+// final boolean scatterSplitEnabled = Boolean
+// .parseBoolean(Options.DEFAULT_SCATTER_SPLIT_ENABLED);
+//
+// final double scatterSplitPercentOfSplitThreshold = Double
+// .parseDouble(Options.DEFAULT_SCATTER_SPLIT_PERCENT_OF_SPLIT_THRESHOLD);
+//
+// final int scatterSplitDataServicesCount = Integer
+// .parseInt(Options.DEFAULT_SCATTER_SPLIT_DATA_SERVICE_COUNT);
+//
+// final int scatterSplitIndexPartitionsCount = Integer
+// .parseInt(Options.DEFAULT_SCATTER_SPLIT_INDEX_PARTITION_COUNT);
+//
+// this.scatterSplitConfiguration = new ScatterSplitConfiguration(
+// scatterSplitEnabled, scatterSplitPercentOfSplitThreshold,
+// scatterSplitDataServicesCount,
+// scatterSplitIndexPartitionsCount);
+//
+// } else {
- /*
- * Use the default configuration since not present in the serialized
- * form before VERSION2.
- */
-
- final boolean scatterSplitEnabled = Boolean
- .parseBoolean(Options.DEFAULT_SCATTER_SPLIT_ENABLED);
-
- final double scatterSplitPercentOfSplitThreshold = Double
- .parseDouble(Options.DEFAULT_SCATTER_SPLIT_PERCENT_OF_SPLIT_THRESHOLD);
-
- final int scatterSplitDataServicesCount = Integer
- .parseInt(Options.DEFAULT_SCATTER_SPLIT_DATA_SERVICE_COUNT);
-
- final int scatterSplitIndexPartitionsCount = Integer
- .parseInt(Options.DEFAULT_SCATTER_SPLIT_INDEX_PARTITION_COUNT);
-
- this.scatterSplitConfiguration = new ScatterSplitConfiguration(
- scatterSplitEnabled, scatterSplitPercentOfSplitThreshold,
- scatterSplitDataServicesCount,
- scatterSplitIndexPartitionsCount);
-
- } else {
-
scatterSplitConfiguration = (ScatterSplitConfiguration) in.readObject();
- }
+// }
}
@@ -2752,30 +2753,30 @@
out.writeObject(tupleSer);
- if (version >= VERSION4) {
+// if (version >= VERSION4) {
out.writeObject(btreeRecordCompressorFactory);
- }
+// }
out.writeObject(conflictResolver);
- if (version >= VERSION5 && version < VERSION9 ) {
+// if (version >= VERSION5 && version < VERSION9 ) {
+//
+//// out.writeBoolean(childLocks);
+// out.writeBoolean(false/* childLocks */);
+//
+// }
-// out.writeBoolean(childLocks);
- out.writeBoolean(false/* childLocks */);
-
- }
-
out.writeBoolean(deleteMarkers);
out.writeBoolean(versionTimestamps);
- if (version >= VERSION6) {
+// if (version >= VERSION6) {
out.writeBoolean(versionTimestampFilters);
- }
+// }
out.writeObject(bloomFilterFactory);
@@ -2800,25 +2801,25 @@
out.writeBoolean(indexSegmentBufferNodes);
- if (version >= VERSION4) {
+// if (version >= VERSION4) {
out.writeObject(btreeRecordCompressorFactory);
- }
+// }
+//
+// if (version >= VERSION1) {
- if (version >= VERSION1) {
-
// introduced in VERSION1
out.writeObject(asynchronousIndexWriteConfiguration);
- }
+// }
+//
+// if (version >= VERSION2) {
- if (version >= VERSION2) {
-
// introduced in VERSION2
out.writeObject(scatterSplitConfiguration);
- }
+// }
}
Deleted: trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/PackedAddressSerializer.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -1,124 +0,0 @@
-/**
-
-Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved.
-
-Contact:
- SYSTAP, LLC
- 4501 Tower Road
- Greensboro, NC 27410
- lic...@bi...
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-/*
- * Created on Dec 26, 2006
- */
-
-package com.bigdata.btree;
-
-import java.io.DataInput;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-import com.bigdata.io.DataOutputBuffer;
-import com.bigdata.rawstore.IAddressManager;
-import com.bigdata.rawstore.IRawStore;
-
-/**
- * Packs the addresses using the {@link IAddressManager} for the backing
- * {@link IRawStore}.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id$
- *
- * @deprecated This class is no longer used. The implementation exists solely to
- * facilitate de-serialization of older {@link IndexMetadata} record
- * versions.
- */
-public class PackedAddressSerializer implements IAddressSerializer, Externalizable {
-
- /**
- *
- */
- private static final long serialVersionUID = 7533128830948670801L;
-
-// public static final IAddressSerializer INSTANCE = new PackedAddressSerializer();
-
- public PackedAddressSerializer() {
-
- }
-
- public void putChildAddresses(IAddressManager addressManager, DataOutputBuffer os,
- long[] childAddr, int nchildren) throws IOException {
-
- throw new UnsupportedOperationException();
-
-// for (int i = 0; i < nchildren; i++) {
-//
-// final long addr = childAddr[i];
-//
-// /*
-// * Children MUST have assigned persistent identity.
-// */
-// if (addr == 0L) {
-//
-// throw new RuntimeException("Child is not persistent: index="
-// + i);
-//
-// }
-//
-// addressManager.packAddr(os, addr);
-//
-// }
-
- }
-
- public void getChildAddresses(IAddressManager addressManager, DataInput is,
- long[] childAddr, int nchildren) throws IOException {
-
- throw new UnsupportedOperationException();
-
-// for (int i = 0; i < nchildren; i++) {
-//
-// final long addr = addressManager.unpackAddr(is);
-//
-// if (addr == 0L) {
-//
-// throw new RuntimeException(
-// "Child does not have persistent address: index=" + i);
-//
-// }
-//
-// childAddr[i] = addr;
-//
-// }
-
- }
-
-
- public void readExternal(ObjectInput arg0) throws IOException, ClassNotFoundException {
-
- // NOP (no state)
-
- }
-
- public void writeExternal(ObjectOutput arg0) throws IOException {
-
- // NOP (no state)
-
- }
-
-}
Modified: trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/isolation/IsolatedFusedView.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -137,7 +137,7 @@
* <p>
* Reads will read through the <i>writeSet</i> and then the resource(s) in
* the <i>groundState</i> in the order in which they are given. A read is
- * satisified by the first resource containing an index entry for the search
+ * satisfied by the first resource containing an index entry for the search
* key.
* <p>
* Writes will first read through looking for a @todo javadoc
Modified: trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/btree/view/FusedView.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -65,7 +65,6 @@
import com.bigdata.mdi.IResourceMetadata;
import com.bigdata.mdi.LocalPartitionMetadata;
import com.bigdata.relation.accesspath.AbstractAccessPath;
-import com.bigdata.resources.DefaultSplitHandler;
import com.bigdata.service.MetadataService;
import com.bigdata.service.Split;
@@ -86,10 +85,9 @@
* {@link ILocalBTreeView} and {@link IAutoboxBTree}.
*
* @todo Can I implement {@link ILinearList} here? That would make it possible
- * to use keyAt() and indexOf() and might pave the way for a faster
- * {@link DefaultSplitHandler} and also for a {@link MetadataService} that
- * supports overflow since the index segments could be transparent at that
- * point.
+ * to use keyAt() and indexOf() and might pave the way for a
+ * {@link MetadataService} that supports overflow since the index segments
+ * could be transparent at that point.
*
* @author <a href="mailto:tho...@us...">Bryan Thompson</a>
* @version $Id$
Deleted: trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java
===================================================================
--- trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java 2010-07-27 21:09:15 UTC (rev 3325)
+++ trunk/bigdata/src/java/com/bigdata/resources/DefaultSplitHandler.java 2010-07-27 21:15:49 UTC (rev 3326)
@@ -1,922 +0,0 @@
-package com.bigdata.resources;
-
-import com.bigdata.bfs.BigdataFileSystem;
-import com.bigdata.btree.ISimpleSplitHandler;
-import com.bigdata.btree.ISplitHandler;
-
-/**
- * A configurable default policy for deciding when and where to split an index
- * partition into 2 or more index partitions.
- * <p>
- * Note: There is probably no single value for {@link #getEntryCountPerSplit()}
- * that is going to be "right" across applications. The space requirements for
- * keys is very difficult to estimate since leading key compression will often
- * provide a good win. Likewise, indices are free to use compression on their
- * values as well so the size of the byte[] values is not a good estimate of
- * their size in the index.
- * <p>
- * Note: The #of index entries is a good proxy for the space requirements of
- * most indices. The {@link BigdataFileSystem} is one case where the space
- * requirements could be quite different since 64M blocks may be stored along
- * with the index entries, however in that case you can also test for the size
- * of the index segment that is part of the view and decide that it's time to
- * split the view.
- *
- * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
- * @version $Id: DefaultSplitHandler.java 2265 2009-10-26 12:51:06Z thompsonbry
- * $
- *
- * @deprecated by {@link ISimpleSplitHandler}. This is only kept around to
- * deserialize existing instances.
- */
-public class DefaultSplitHandler implements ISplitHandler {
-
- /**
- *
- */
- private static final long serialVersionUID = 1675517991163473445L;
-
-// /**
-// * Logger.
-// */
-// protected static final Logger log = Logger
-// .getLogger(DefaultSplitHandler.class);
-//
-// /**
-// * True iff the {@link #log} level is DEBUG or less.
-// */
-// final protected static boolean DEBUG = log.isDebugEnabled();
-//
-// /**
-// * True iff the {@link #log} level is INFO or less.
-// */
-// final protected static boolean INFO = log.isInfoEnabled();
-
- private int minimumEntryCount;
-
- private int entryCountPerSplit;
-
- private int sampleRate;
-
- private double overCapacityMultiplier;
-
- private double underCapacityMultiplier;
-
-// public String toString() {
-//
-// final StringBuilder sb = new StringBuilder();
-//
-// sb.append(getClass().getName());
-//
-// sb.append("{ minimumEntryCount=" + minimumEntryCount);
-//
-// sb.append(", entryCountPerSplit=" + entryCountPerSplit);
-//
-// sb.append(", sampleRate=" + sampleRate);
-//
-// sb.append(", overCapacityMultiplier=" + overCapacityMultiplier);
-//
-// sb.append(", underCapacityMultiplier=" + underCapacityMultiplier);
-//
-// sb.append(", targetCountPerSplit=" + getTargetEntryCountPerSplit());
-//
-// sb.append("}");
-//
-// return sb.toString();
-//
-// }
-
- /**
- * De-serialization ctor.
- */
- public DefaultSplitHandler() {
-
- }
-
-// /**
-// * Setup a split handler.
-// *
-// * @param minimumEntryCount
-// * An index partition which has no more than this many tuples
-// * should be joined with its rightSibling (if any).
-// * @param entryCountPerSplit
-// * The target #of tuples for an index partition.
-// * @param overCapacityMultiplier
-// * The index partition will be split when its actual entry count
-// * is GTE to
-// * <code>overCapacityMultiplier * entryCountPerSplit</code>
-// * @param underCapacityMultiplier
-// * When an index partition will be split, the #of new index
-// * partitions will be chosen such that each index partition is
-// * approximately <i>underCapacityMultiplier</i> full.
-// * @param sampleRate
-// * The #of samples to take per estimated split (non-negative, and
-// * generally on the order of 10s of samples). The purpose of the
-// * samples is to accommodate the actual distribution of the keys
-// * in the index.
-// *
-// * @throws IllegalArgumentException
-// * if any argument, or combination or arguments, is out of
-// * range.
-// */
-// public DefaultSplitHandler(final int minimumEntryCount,
-// final int entryCountPerSplit, final double overCapacityMultiplier,
-// final double underCapacityMultiplier, final int sampleRate) {
-//
-// /*
-// * Bootstap parameter settings.
-// *
-// * First, verify combination of parameters is legal.
-// */
-// assertSplitJoinStable(minimumEntryCount, entryCountPerSplit,
-// underCapacityMultiplier);
-//
-// /*
-// * Now that we know the combination is legal, set individual parameters
-// * that have dependencies in their legal range. This will let us set the
-// * individual parameters with their settor methods below.
-// */
-// this.minimumEntryCount = minimumEntryCount;
-// this.entryCountPerSplit = entryCountPerSplit;
-// this.underCapacityMultiplier = underCapacityMultiplier;
-//
-// /*
-// * Use individual set methods to validate each parameter by itself.
-// */
-//
-// setMinimumEntryCount(minimumEntryCount);
-//
-// setEntryCountPerSplit(entryCountPerSplit);
-//
-// setOverCapacityMultiplier(overCapacityMultiplier);
-//
-// setUnderCapacityMultiplier(underCapacityMultiplier);
-//
-// setSampleRate(sampleRate);
-//
-// }
-
-// /**
-// * Return <code>true</code> iff the range count of the index is less than
-// * the {@link #getMinimumEntryCount()}.
-// * <p>
-// * Note: This relies on the fast range count, which is the upper bound on
-// * the #of index entries. For this reason an index partition which has
-// * undergone a lot of deletes will not underflow until it has gone through a
-// * build to purge the deleted index entries. This is true even when all
-// * index entries in the index partition have been deleted!
-// */
-// public boolean shouldJoin(final long rangeCount) {
-//
-// final boolean shouldJoin = rangeCount <= getMinimumEntryCount();
-//
-// if (INFO)
-// log.info("shouldJoin=" + shouldJoin + " : rangeCount=" + rangeCount
-// + ", minimumEntryCount=" + getMinimumEntryCount());
-//
-// return shouldJoin;
-//
-// }
-//
-// /**
-// * Verify that a split will not result in index partitions whose range
-// * counts are such that they would be immediately eligible for a join.
-// *
-// * @throws IllegalArgumentException
-// * if split / join is not stable for the specified values.
-// *
-// * @todo it might be worth while to convert this to a warning since actions
-// * such as a scatter split are designed with the expectation that the
-// * splits may be undercapacity but will fill up before the next
-// * overflow (or that joins will simply not be triggered for N
-// * overflows after a split).
-// */
-// static void assertSplitJoinStable(final int minimumEntryCount,
-// final int entryCountPerSplit, final double underCapacityMultiplier) {
-//
-// final int targetEntryCount = (int) Math.round(underCapacityMultiplier
-// * entryCountPerSplit);
-//
-// if (minimumEntryCount > targetEntryCount) {
-//
-// throw new IllegalArgumentException("minimumEntryCount("
-// + minimumEntryCount + ") exceeds underCapacityMultiplier("
-// + underCapacityMultiplier + ") * entryCountPerSplit("
-// + entryCountPerSplit + ")");
-//
-// }
-//
-// }
-//
-// /**
-// * The minimum #of index entries before the index partition becomes eligible
-// * to be joined.
-// */
-// public int getMinimumEntryCount() {
-//
-// return minimumEntryCount;
-//
-// }
-//
-// public void setMinimumEntryCount(final int minimumEntryCount) {
-//
-// if (minimumEntryCount < 0)
-// throw new IllegalArgumentException("minimumEntryCount="
-// + minimumEntryCount);
-//
-// assertSplitJoinStable(minimumEntryCount, getEntryCountPerSplit(),
-// getUnderCapacityMultiplier());
-//
-// this.minimumEntryCount = minimumEntryCount;
-//
-// }
-//
-// /**
-// * The target maximum #of index entries in an index partition.
-// */
-// public int getEntryCountPerSplit() {
-//
-// return entryCountPerSplit;
-//
-// }
-//
-// public void setEntryCountPerSplit(final int entryCountPerSplit) {
-//
-//// if (entryCountPerSplit < Options.MIN_BRANCHING_FACTOR) {
-////
-//// throw new IllegalArgumentException(
-//// "entryCountPerSplit must be GTE the minimum branching factor: entryCountPerSplit="
-//// + entryCountPerSplit
-//// + ", minBranchingFactor="
-//// + Options.MIN_BRANCHING_FACTOR);
-////
-//// }
-// if (entryCountPerSplit < 1) {
-//
-// throw new IllegalArgumentException(
-// "entryCountPerSplit must be GTE ONE(1): entryCountPerSplit="
-// + entryCountPerSplit);
-//
-// }
-//
-// assertSplitJoinStable(getMinimumEntryCount(), entryCountPerSplit,
-// getUnderCapacityMultiplier());
-//
-// this.entryCountPerSplit = entryCountPerSplit;
-//
-// }
-//
-// /**
-// * The #of samples per estimated #of splits.
-// */
-// public int getSampleRate() {
-//
-// return sampleRate;
-//
-// }
-//
-// public void setSampleRate(final int sampleRate) {
-//
-// if (sampleRate <= 0)
-// throw new IllegalArgumentException();
-//
-// this.sampleRate = sampleRate;
-//
-// }
-//
-// /**
-// * The threshold for splitting an index is the
-// * {@link #getOverCapacityMultiplier()} times
-// * {@link #getEntryCountPerSplit()}. If there are fewer than this many
-// * entries in the index then it will not be split.
-// */
-// public double getOverCapacityMultiplier() {
-//
-// return overCapacityMultiplier;
-//
-// }
-//
-// /**
-// *
-// * @param overCapacityMultiplier
-// * A value in [1.0:2.0].
-// */
-// public void setOverCapacityMultiplier(final double overCapacityMultiplier) {
-//
-// final double min = 1.0;
-// final double max = 2.0;
-//
-// if (overCapacityMultiplier < min || overCapacityMultiplier > max) {
-//
-// throw new IllegalArgumentException("Must be in [" + min + ":" + max
-// + "], but was " + overCapacityMultiplier);
-//
-// }
-//
-// this.overCapacityMultiplier = overCapacityMultiplier;
-//
-// }
-//
-// /**
-// * This is the target under capacity rate for a new index partition. For
-// * example, if the {@link #getEntryCountPerSplit()} is 5M and this
-// * property is <code>.75</code> then an attempt will be made to divide
-// * the index partition into N splits such that each split is at 75% of
-// * the {@link #getEntryCountPerSplit()} capacity.
-// */
-// public double getUnderCapacityMultiplier() {
-//
-// return underCapacityMultiplier;
-//
-// }
-//
-// /**
-// *
-// * @param underCapacityMultiplier
-// * A value in [0.5,1.0).
-// */
-// public void setUnderCapacityMultiplier(final double underCapacityMultiplier) {
-//
-// final double min = 0.5;
-// final double max = 1.0;
-//
-// if (underCapacityMultiplier < min || underCapacityMultiplier >= max) {
-//
-// throw new IllegalArgumentException("Must be in [" + min + ":" + max
-// + "), but was " + underCapacityMultiplier);
-//
-// }
-//
-// assertSplitJoinStable(getMinimumEntryCount(), getEntryCountPerSplit(),
-// underCapacityMultiplier);
-//
-// this.underCapacityMultiplier = underCapacityMultiplier;
-//
-// }
-//
-// /**
-// * The target #of tuples per split, which is given by:
-// *
-// * <pre>
-// * targetEntryCountPerSplit := underCapacityMultiplier * entryCountPerSplit
-// * </pre>
-// *
-// */
-// public int getTargetEntryCountPerSplit() {
-//
-// return (int) Math.round(getUnderCapacityMultiplier()
-// * getEntryCountPerSplit());
-//
-// }
-//
-// public boolean shouldSplit(final long rangeCount) {
-//
-// /*
-// * Recommend split if the range count equals or exceeds the overcapacity
-// * multiplier.
-// */
-//
-// if (rangeCount >= (getOverCapacityMultiplier() * entryCountPerSplit)) {
-//
-// if(INFO)
-// log.info("Recommending split: rangeCount(" + rangeCount
-// + ") >= (entryCountPerSplit(" + entryCountPerSplit
-// + ") * overCapacityMultiplier("
-// + getOverCapacityMultiplier() + "))");
-//
-// return true;
-//
-// }
-//
-// return false;
-//
-// }
-//
-// public double percentOfSplit(final long rangeCount) {
-//
-// final double percentOfSplit = (double) rangeCount
-// / (double) entryCountPerSplit;
-//
-// if (INFO)
-// log.info("percentOfSplit=" + percentOfSplit + " = rangeCount("
-// + rangeCount + ") / entryCountPerSplit("
-// + entryCountPerSplit + ")");
-//
-// return percentOfSplit;
-//
-// }
-//
-// /**
-// * A sample collected from a key-range scan.
-// *
-// * @author <a href="mailto:tho...@us...">Bryan Thompson</a>
-// * @version $Id$
-// */
-// static public class Sample {
-//
-// /**
-// * A key from the index.
-// */
-// final byte[] key;
-//
-// /**
-// * The origin zero (0) offset at which that key was found
-// * (interpretation is that the key was visited by the Nth
-// * {@link ITuple}).
-// */
-// final int offset;
-//
-// public Sample(byte[] key, int offset) {
-//
-// assert key != null;
-//
-// assert offset >= 0;
-//
-// this.key = key;
-//
-// this.offset = offset;
-//
-// }
-//
-// public String toString() {
-//
-// return super.toString() + "{offset=" + offset + ", key="
-// + Arrays.toString(key) + "}";
-//
-// }
-//
-// }
-//
-// /**
-// * Sample index using a range scan choosing ({@link #getSampleRate()} x N)
-// * {@link Sample}s. The key range scan will filter out both duplicates and
-// * deleted i...
[truncated message content] |