From: <btm...@us...> - 2010-08-08 15:57:50
|
Revision: 3433 http://bigdata.svn.sourceforge.net/bigdata/?rev=3433&view=rev Author: btmurphy Date: 2010-08-08 15:57:40 +0000 (Sun, 08 Aug 2010) Log Message: ----------- merge -r3378:HEAD(3430) ~/bigdata/trunk ~/bigdata/branches/dev-btm [trunk --> branch dev-btm] Modified Paths: -------------- branches/dev-btm/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/BTree.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/IndexMetadata.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/ICUSortKeyGenerator.java branches/dev-btm/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java branches/dev-btm/bigdata/src/java/com/bigdata/io/DirectBufferPool.java branches/dev-btm/bigdata/src/java/com/bigdata/io/WriteCache.java branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java branches/dev-btm/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java branches/dev-btm/bigdata/src/java/com/bigdata/journal/WriteExecutorService.java branches/dev-btm/bigdata/src/java/com/bigdata/mdi/PartitionLocator.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/AbstractResource.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/IMutableResource.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/RelationFusedView.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/locator/DefaultResourceLocator.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/locator/ILocatableResource.java branches/dev-btm/bigdata/src/java/com/bigdata/relation/rule/eval/pipeline/DistributedJoinTask.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/BTreeMetadata.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/IndexManager.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/OverflowManager.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/ResourceEvents.java branches/dev-btm/bigdata/src/java/com/bigdata/resources/StoreManager.java branches/dev-btm/bigdata/src/java/com/bigdata/service/AbstractFederation.java branches/dev-btm/bigdata/src/java/com/bigdata/service/AbstractScaleOutFederation.java branches/dev-btm/bigdata/src/java/com/bigdata/service/AbstractTransactionService.java branches/dev-btm/bigdata/src/java/com/bigdata/service/DataService.java branches/dev-btm/bigdata/src/java/com/bigdata/service/DefaultServiceFederationDelegate.java branches/dev-btm/bigdata/src/java/com/bigdata/service/DistributedTransactionService.java branches/dev-btm/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java branches/dev-btm/bigdata/src/java/com/bigdata/sparse/KeyDecoder.java branches/dev-btm/bigdata/src/java/com/bigdata/sparse/TPS.java branches/dev-btm/bigdata/src/java/com/bigdata/striterator/ChunkedConvertingIterator.java branches/dev-btm/bigdata/src/resources/logging/log4j.properties branches/dev-btm/bigdata/src/test/com/bigdata/btree/AbstractIndexSegmentTestCase.java branches/dev-btm/bigdata/src/test/com/bigdata/btree/TestIndexSegmentMultiBlockIterators.java branches/dev-btm/bigdata/src/test/com/bigdata/btree/keys/TestKeyBuilder.java branches/dev-btm/bigdata/src/test/com/bigdata/concurrent/StressTestNonBlockingLockManagerWithTxDag.java branches/dev-btm/bigdata/src/test/com/bigdata/journal/TestAll.java branches/dev-btm/bigdata/src/test/com/bigdata/journal/TestTransactionService.java branches/dev-btm/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerBootstrapTestCase.java branches/dev-btm/bigdata/src/test/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/dev-btm/bigdata/src/test/com/bigdata/resources/TestReleaseResources.java branches/dev-btm/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java branches/dev-btm/bigdata/src/test/com/bigdata/service/TestDistributedTransactionServiceRestart.java branches/dev-btm/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java branches/dev-btm/bigdata-jini/src/java/com/bigdata/service/jini/TransactionServer.java branches/dev-btm/bigdata-jini/src/java/com/bigdata/service/jini/lookup/AbstractCachingServiceClient.java branches/dev-btm/bigdata-jini/src/java/com/bigdata/service/jini/master/TaskMaster.java branches/dev-btm/bigdata-jini/src/java/com/bigdata/service/jini/util/DumpFederation.java branches/dev-btm/bigdata-perf/bsbm/src/resources/bsbm-data/queries/query9.txt branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/DefaultExtensionFactory.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtension.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/IExtensionFactory.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/ILexiconConfiguration.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/LexiconConfiguration.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/internal/XSDDecimalIV.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/Term2IdWriteTask.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/rules/AbstractRuleFastClosure_3_5_6_7_9.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/store/LocalTripleStore.java branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.config branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/internal/ColorsEnumExtension.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/internal/EpochExtension.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/internal/SampleExtensionFactory.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/internal/TestEncodeDecodeKeys.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/rio/small.rdf branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/store/AbstractTestCase.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/store/TestAll.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithEmbeddedFederation.java branches/dev-btm/bigdata-rdf/src/test/com/bigdata/rdf/store/TestScaleOutTripleStoreWithJiniFederation.java branches/dev-btm/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl2.java branches/dev-btm/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/dev-btm/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java branches/dev-btm/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoins.java branches/dev-btm/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuadsAndPipelineJoinsWithoutInlining.java branches/dev-btm/build.xml branches/dev-btm/src/resources/analysis/queries/benchmark.txt branches/dev-btm/src/resources/config/README branches/dev-btm/src/resources/config/bigdataCluster.config branches/dev-btm/src/resources/config/bigdataCluster16.config branches/dev-btm/src/resources/config/log4j.properties branches/dev-btm/src/resources/scripts/bigdata.initd Added Paths: ----------- branches/dev-btm/bigdata-perf/bsbm/src/resources/bsbm-data/queries/query9-modified.txt branches/dev-btm/src/resources/config/bigdataStandalone.config branches/dev-btm/src/resources/scripts/dumpFed.sh branches/dev-btm/src/resources/scripts/nanoSparqlServer.sh Property Changed: ---------------- branches/dev-btm/ branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr/ branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco/ branches/dev-btm/bigdata-jini/src/java/com/bigdata/util/config/ branches/dev-btm/bigdata-perf/ branches/dev-btm/bigdata-perf/lubm/lib/ branches/dev-btm/bigdata-perf/lubm/src/resources/ branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/dev-btm/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/dev-btm/dsi-utils/LEGAL/ branches/dev-btm/dsi-utils/lib/ branches/dev-btm/dsi-utils/src/ branches/dev-btm/dsi-utils/src/test/ branches/dev-btm/dsi-utils/src/test/it/ branches/dev-btm/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/dev-btm/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/dev-btm/osgi/ branches/dev-btm/src/resources/config/ Property changes on: branches/dev-btm ___________________________________________________________________ Modified: svn:ignore - ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.*.tgz REL.*.tgz Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/fko:3150-3194 /trunk:2575-2594,2596-2877,2882-2903,2910-3378 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/fko:3150-3194 /trunk:2575-2594,2596-2877,2882-2903,2910-3430 Modified: branches/dev-btm/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/bfs/BigdataFileSystem.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -420,7 +420,7 @@ } } - + /** * Note: A commit is required in order for a read-committed view to have * access to the registered indices. When running against an Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/AbstractBTree.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -2840,7 +2840,8 @@ * might also want to limit the maximum size of the reads. */ - final DirectBufferPool pool = DirectBufferPool.INSTANCE_10M; +// final DirectBufferPool pool = DirectBufferPool.INSTANCE_10M; + final DirectBufferPool pool = DirectBufferPool.INSTANCE; if (true && ((flags & REVERSE) == 0) Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/BTree.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/BTree.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/BTree.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -644,7 +644,18 @@ this.lastCommitTime = lastCommitTime; } - private long lastCommitTime = 0L;// Until the first commit. + + /** + * The lastCommitTime of the {@link Checkpoint} record from which the + * {@link BTree} was loaded. + * <p> + * Note: Made volatile on 8/2/2010 since it is not otherwise obvious what + * would guarantee visibility of this field, through I do seem to remember + * that visibility might be guaranteed by how the BTree class is discovered + * and returned to the class. Still, it does no harm to make this a volatile + * read. + */ + volatile private long lastCommitTime = 0L;// Until the first commit. /** * Return the {@link IDirtyListener}. @@ -1525,45 +1536,63 @@ } - /** - * Load an instance of a {@link BTree} or derived class from the store. The - * {@link BTree} or derived class MUST declare a constructor with the - * following signature: <code> + /** + * Load an instance of a {@link BTree} or derived class from the store. The + * {@link BTree} or derived class MUST declare a constructor with the + * following signature: <code> * * <i>className</i>(IRawStore store, Checkpoint checkpoint, BTreeMetadata metadata, boolean readOnly) * * </code> - * - * @param store - * The store. - * @param addrCheckpoint - * The address of a {@link Checkpoint} record for the index. - * @param readOnly - * When <code>true</code> the {@link BTree} will be marked as - * read-only. Marking has some advantages relating to the locking - * scheme used by {@link Node#getChild(int)} since the root node - * is known to be read-only at the time that it is allocated as - * per-child locking is therefore in place for all nodes in the - * read-only {@link BTree}. It also results in much higher - * concurrency for {@link AbstractBTree#touch(AbstractNode)}. - * - * @return The {@link BTree} or derived class loaded from that - * {@link Checkpoint} record. - */ + * + * @param store + * The store. + * @param addrCheckpoint + * The address of a {@link Checkpoint} record for the index. + * @param readOnly + * When <code>true</code> the {@link BTree} will be marked as + * read-only. Marking has some advantages relating to the locking + * scheme used by {@link Node#getChild(int)} since the root node + * is known to be read-only at the time that it is allocated as + * per-child locking is therefore in place for all nodes in the + * read-only {@link BTree}. It also results in much higher + * concurrency for {@link AbstractBTree#touch(AbstractNode)}. + * + * @return The {@link BTree} or derived class loaded from that + * {@link Checkpoint} record. + * + * @throws IllegalArgumentException + * if store is <code>null</code>. + */ @SuppressWarnings("unchecked") public static BTree load(final IRawStore store, final long addrCheckpoint, final boolean readOnly) { + if (store == null) + throw new IllegalArgumentException(); + /* * Read checkpoint record from store. */ - final Checkpoint checkpoint = Checkpoint.load(store, addrCheckpoint); + final Checkpoint checkpoint; + try { + checkpoint = Checkpoint.load(store, addrCheckpoint); + } catch (Throwable t) { + throw new RuntimeException("Could not load Checkpoint: store=" + + store + ", addrCheckpoint=" + + store.toString(addrCheckpoint), t); + } - /* - * Read metadata record from store. - */ - final IndexMetadata metadata = IndexMetadata.read(store, checkpoint - .getMetadataAddr()); + /* + * Read metadata record from store. + */ + final IndexMetadata metadata; + try { + metadata = IndexMetadata.read(store, checkpoint.getMetadataAddr()); + } catch (Throwable t) { + throw new RuntimeException("Could not read IndexMetadata: store=" + + store + ", checkpoint=" + checkpoint, t); + } if (log.isInfoEnabled()) { Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/DumpIndexSegment.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -36,6 +36,7 @@ import org.apache.log4j.Logger; import com.bigdata.btree.IndexSegment.ImmutableNodeFactory.ImmutableLeaf; +import com.bigdata.io.DirectBufferPool; import com.bigdata.journal.DumpJournal; import com.bigdata.rawstore.IRawStore; @@ -154,6 +155,16 @@ } + // multi-block scan of the index segment. + boolean multiBlockScan = false; // @todo command line option. + if (multiBlockScan) { + + writeBanner("dump leaves using multi-block forward scan"); + + dumpLeavesMultiBlockForwardScan(store); + + } + // dump the leaves using a fast reverse scan. boolean fastReverseScan = true;// @todo command line option if (fastReverseScan) { @@ -524,6 +535,36 @@ } + /** + * Dump leaves using the {@link IndexSegmentMultiBlockIterator}. + * + * @param store + */ + static void dumpLeavesMultiBlockForwardScan(final IndexSegmentStore store) { + + final long begin = System.currentTimeMillis(); + + final IndexSegment seg = store.loadIndexSegment(); + + final ITupleIterator<?> itr = new IndexSegmentMultiBlockIterator(seg, DirectBufferPool.INSTANCE, + null/* fromKey */, null/* toKey */, IRangeQuery.DEFAULT/* flags */); + + int nscanned = 0; + + while(itr.hasNext()) { + + itr.next(); + + nscanned++; + + } + + final long elapsed = System.currentTimeMillis() - begin; + + System.out.println("Visited "+nscanned+" tuples using multi-block forward scan in "+elapsed+" ms"); + + } + static void writeBanner(String s) { System.out.println(bar); Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/IndexMetadata.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/IndexMetadata.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -2049,10 +2049,14 @@ // Note: default assumes NOT an index partition. this.pmd = null; + /* Intern'd to reduce duplication on the heap. Will be com.bigdata.btree.BTree or + * com.bigdata.btree.IndexSegment and occasionally a class derived from BTree. + */ this.btreeClassName = getProperty(indexManager, properties, namespace, - Options.BTREE_CLASS_NAME, BTree.class.getName().toString()); + Options.BTREE_CLASS_NAME, BTree.class.getName()).intern(); - this.checkpointClassName = Checkpoint.class.getName(); + // Intern'd to reduce duplication on the heap. + this.checkpointClassName = Checkpoint.class.getName().intern(); // this.addrSer = AddressSerializer.INSTANCE; Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/DefaultKeyBuilderFactory.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -207,7 +207,7 @@ if (properties != null) { - val = properties.getProperty(key, def); + val = properties.getProperty(key);//, def); } Modified: branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/ICUSortKeyGenerator.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/ICUSortKeyGenerator.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/btree/keys/ICUSortKeyGenerator.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -108,7 +108,7 @@ } - ICUSortKeyGenerator(Locale locale, Object strength, DecompositionEnum mode) { + ICUSortKeyGenerator(final Locale locale, final Object strength, final DecompositionEnum mode) { if (locale == null) throw new IllegalArgumentException(); @@ -132,7 +132,7 @@ } else { - StrengthEnum str = (StrengthEnum) strength; + final StrengthEnum str = (StrengthEnum) strength; if (log.isInfoEnabled()) log.info("strength=" + str); @@ -200,9 +200,9 @@ * Buffer is reused for each {@link String} from which a sort key is * derived. */ - private RawCollationKey raw = new RawCollationKey(128); + final private RawCollationKey raw = new RawCollationKey(128); - public void appendSortKey(KeyBuilder keyBuilder, String s) { + public void appendSortKey(final KeyBuilder keyBuilder, final String s) { // RawCollationKey raw = collator.getRawCollationKey(s, null); Modified: branches/dev-btm/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -278,19 +278,19 @@ AbstractStatisticsCollector .addGarbageCollectorMXBeanCounters(serviceRoot .makePath(ICounterHierarchy.Memory_GarbageCollectors)); - - /* - * Add counters reporting on the various DirectBufferPools. - */ - { - // general purpose pool. - serviceRoot.makePath( - IProcessCounters.Memory + ICounterSet.pathSeparator - + "DirectBufferPool").attach( - DirectBufferPool.getCounters()); - - } + // Moved since counters must be dynamically reattached to reflect pool hierarchy. +// /* +// * Add counters reporting on the various DirectBufferPools. +// */ +// { +// +// serviceRoot.makePath( +// IProcessCounters.Memory + ICounterSet.pathSeparator +// + "DirectBufferPool").attach( +// DirectBufferPool.getCounters()); +// +// } if (LRUNexus.INSTANCE != null) { Modified: branches/dev-btm/bigdata/src/java/com/bigdata/io/DirectBufferPool.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/io/DirectBufferPool.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -218,12 +218,12 @@ */ public final static DirectBufferPool INSTANCE; - /** - * A JVM-wide pool of direct {@link ByteBuffer}s with a default - * {@link Options#BUFFER_CAPACITY} of <code>10 MB</code>. The main use case - * for the 10M buffers are multi-block IOs for the {@link IndexSegment}s. - */ - public final static DirectBufferPool INSTANCE_10M; +// /** +// * A JVM-wide pool of direct {@link ByteBuffer}s with a default +// * {@link Options#BUFFER_CAPACITY} of <code>10 MB</code>. The main use case +// * for the 10M buffers are multi-block IOs for the {@link IndexSegment}s. +// */ +// public final static DirectBufferPool INSTANCE_10M; /** * An unbounded list of all {@link DirectBufferPool} instances. @@ -251,11 +251,11 @@ bufferCapacity// ); - INSTANCE_10M = new DirectBufferPool(// - "10M",// - Integer.MAX_VALUE, // poolCapacity - 10 * Bytes.megabyte32 // bufferCapacity - ); +// INSTANCE_10M = new DirectBufferPool(// +// "10M",// +// Integer.MAX_VALUE, // poolCapacity +// 10 * Bytes.megabyte32 // bufferCapacity +// ); /* * This configuration will block if there is a concurrent demand for Modified: branches/dev-btm/bigdata/src/java/com/bigdata/io/WriteCache.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/io/WriteCache.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/io/WriteCache.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -51,7 +51,7 @@ import com.bigdata.counters.Instrument; import com.bigdata.journal.AbstractBufferStrategy; import com.bigdata.journal.DiskOnlyStrategy; -import com.bigdata.journal.DiskOnlyStrategy.StoreCounters; +//import com.bigdata.journal.DiskOnlyStrategy.StoreCounters; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.IRawStore; import com.bigdata.rwstore.RWStore; Modified: branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -1027,33 +1027,33 @@ } - case Disk: { +// case Disk: { +// +// /* +// * Setup the buffer strategy. +// */ +// +// fileMetadata = new FileMetadata(file, BufferMode.Disk, +// useDirectBuffers, initialExtent, maximumExtent, create, +// isEmptyFile, deleteOnExit, readOnly, forceWrites, +// offsetBits, //readCacheCapacity, readCacheMaxRecordSize, +// //readOnly ? null : writeCache, +// writeCacheEnabled, +// validateChecksum, +// createTime, checker, alternateRootBlock); +// +// _bufferStrategy = new DiskOnlyStrategy( +// 0L/* soft limit for maximumExtent */, +//// minimumExtension, +// fileMetadata); +// +// this._rootBlock = fileMetadata.rootBlock; +// +// break; +// +// } - /* - * Setup the buffer strategy. - */ - - fileMetadata = new FileMetadata(file, BufferMode.Disk, - useDirectBuffers, initialExtent, maximumExtent, create, - isEmptyFile, deleteOnExit, readOnly, forceWrites, - offsetBits, //readCacheCapacity, readCacheMaxRecordSize, - //readOnly ? null : writeCache, - writeCacheEnabled, - validateChecksum, - createTime, checker, alternateRootBlock); - - _bufferStrategy = new DiskOnlyStrategy( - 0L/* soft limit for maximumExtent */, -// minimumExtension, - fileMetadata); - - this._rootBlock = fileMetadata.rootBlock; - - break; - - } - -// case Disk: + case Disk: case DiskWORM: { /* Modified: branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/journal/AbstractLocalTransactionManager.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -7,6 +7,7 @@ import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; +import com.bigdata.resources.StoreManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; @@ -171,16 +172,18 @@ * Delay between attempts reach the remote service (ms). */ final long delay = 10L; - - /** - * #of attempts to reach the remote service. - * - * Note: delay*maxtries == 1000ms of trying before we give up. - * - * If this is not enough, then consider adding an optional parameter giving - * the time the caller will wait and letting the StoreManager wait longer - * during startup to discover the timestamp service. - */ + + /** + * #of attempts to reach the remote service. + * <p> + * Note: delay*maxtries == 1000ms of trying before we give up, plus however + * long we are willing to wait for service discovery if the problem is + * locating the {@link ITransactionService}. + * <p> + * If this is not enough, then consider adding an optional parameter giving + * the time the caller will wait and letting the {@link StoreManager} wait + * longer during startup to discover the timestamp service. + */ final int maxtries = 100; /** Modified: branches/dev-btm/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- branches/dev-btm/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-08-08 15:20:16 UTC (rev 3432) +++ branches/dev-btm/bigdata/src/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-08-08 15:57:40 UTC (rev 3433) @@ -46,6 +46,7 @@ import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IReopenChannel; +import com.bigdata.journal.WORMStrategy.StoreCounters; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.IRawStore; import com.bigdata.resources.StoreManager.ManagedJournal; @@ -501,7 +502,7 @@ writeCache.flush(); - storeCounters.ncacheFlush++; +// storeCounters.ncacheFlush++; } @@ -544,551 +545,551 @@ } - /** - * Counters for {@link IRawStore} access, including operations that read or - * write through to the underlying media. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * @todo report elapsed time and average latency for force, reopen, and - * writeRootBlock. - * - * @todo counters need to be atomic if we want to avoid the possibility of - * concurrent <code>x++</code> operations failing to correctly - * increment <code>x</code> for each request. - */ - public static class StoreCounters { - - /** - * #of read requests. - */ - public long nreads; - - /** - * #of read requests that are satisfied by our write cache (vs the - * OS or disk level write cache). - */ - public long ncacheRead; - - /** - * #of read requests that read through to the backing file. - */ - public long ndiskRead; - - /** - * #of bytes read. - */ - public long bytesRead; - - /** - * #of bytes that have been read from the disk. - */ - public long bytesReadFromDisk; - - /** - * The size of the largest record read. - */ - public long maxReadSize; - - /** - * Total elapsed time for reads. - */ - public long elapsedReadNanos; - - /** - * Total elapsed time checking the disk write cache for records to be - * read. - */ - public long elapsedCacheReadNanos; - - /** - * Total elapsed time for reading on the disk. - */ - public long elapsedDiskReadNanos; - - /** - * #of write requests. - */ - public long nwrites; - - /** - * #of write requests that are absorbed by our write cache (vs the OS or - * disk level write cache). - */ - public long ncacheWrite; - - /** - * #of times the write cache was flushed to disk. - */ - public long ncacheFlush; - - /** - * #of write requests that write through to the backing file. - */ - public long ndiskWrite; - - /** - * The size of the largest record written. - */ - public long maxWriteSize; - - /** - * #of bytes written. - */ - public long bytesWritten; - - /** - * #of bytes that have been written on the disk. - */ - public long bytesWrittenOnDisk; - - /** - * Total elapsed time for writes. - */ - public long elapsedWriteNanos; - - /** - * Total elapsed time writing records into the cache (does not count - * time to flush the cache when it is full or to write records that do - * not fit in the cache directly to the disk). - */ - public long elapsedCacheWriteNanos; - - /** - * Total elapsed time for writing on the disk. - */ - public long elapsedDiskWriteNanos; - - /** - * #of times the data were forced to the disk. - */ - public long nforce; - - /** - * #of times the length of the file was changed (typically, extended). - */ - public long ntruncate; - - /** - * #of times the file has been reopened after it was closed by an - * interrupt. - */ - public long nreopen; - - /** - * #of times one of the root blocks has been written. - */ - public long nwriteRootBlock; - - /** - * Initialize a new set of counters. - */ - public StoreCounters() { - - } - - /** - * Copy ctor. - * @param o - */ - public StoreCounters(final StoreCounters o) { - - add( o ); - - } - - /** - * Adds counters to the current counters. - * - * @param o - */ - public void add(final StoreCounters o) { - - nreads += o.nreads; - ncacheRead += o.ncacheRead; - ndiskRead += o.ndiskRead; - bytesRead += o.bytesRead; - bytesReadFromDisk += o.bytesReadFromDisk; - maxReadSize += o.maxReadSize; - elapsedReadNanos += o.elapsedReadNanos; - elapsedCacheReadNanos += o.elapsedCacheReadNanos; - elapsedDiskReadNanos += o.elapsedDiskReadNanos; - - nwrites += o.nwrites; - ncacheWrite += o.ncacheWrite; - ncacheFlush += o.ncacheFlush; - ndiskWrite += o.ndiskWrite; - maxWriteSize += o.maxWriteSize; - bytesWritten += o.bytesWritten; - bytesWrittenOnDisk += o.bytesWrittenOnDisk; - elapsedWriteNanos += o.elapsedWriteNanos; - elapsedCacheWriteNanos += o.elapsedCacheWriteNanos; - elapsedDiskWriteNanos += o.elapsedDiskWriteNanos; - - nforce += o.nforce; - ntruncate += o.ntruncate; - nreopen += o.nreopen; - nwriteRootBlock += o.nwriteRootBlock; - - } - - /** - * Returns a new {@link StoreCounters} containing the current counter values - * minus the given counter values. - * - * @param o - * - * @return - */ - public StoreCounters subtract(final StoreCounters o) { - - // make a copy of the current counters. - final StoreCounters t = new StoreCounters(this); - - // subtract out the given counters. - t.nreads -= o.nreads; - t.ncacheRead -= o.ncacheRead; - t.ndiskRead -= o.ndiskRead; - t.bytesRead -= o.bytesRead; - t.bytesReadFromDisk -= o.bytesReadFromDisk; - t.maxReadSize -= o.maxReadSize; - t.elapsedReadNanos -= o.elapsedReadNanos; - t.elapsedCacheReadNanos -= o.elapsedCacheReadNanos; - t.elapsedDiskReadNanos -= o.elapsedDiskReadNanos; - - t.nwrites -= o.nwrites; - t.ncacheWrite -= o.ncacheWrite; - t.ncacheFlush -= o.ncacheFlush; - t.ndiskWrite -= o.ndiskWrite; - t.maxWriteSize -= o.maxWriteSize; - t.bytesWritten -= o.bytesWritten; - t.bytesWrittenOnDisk -= o.bytesWrittenOnDisk; - t.elapsedWriteNanos -= o.elapsedWriteNanos; - t.elapsedCacheWriteNanos -= o.elapsedCacheWriteNanos; - t.elapsedDiskWriteNanos -= o.elapsedDiskWriteNanos; - - t.nforce -= o.nforce; - t.ntruncate -= o.ntruncate; - t.nreopen -= o.nreopen; - t.nwriteRootBlock -= o.nwriteRootBlock; - - return t; - - } - - synchronized public CounterSet getCounters() { - - if (root == null) { - - root = new CounterSet(); - - // IRawStore API - { - - /* - * reads - */ - - root.addCounter("nreads", new Instrument<Long>() { - public void sample() { - setValue(nreads); - } - }); - - root.addCounter("bytesRead", new Instrument<Long>() { - public void sample() { - setValue(bytesRead); - } - }); - - root.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - final double elapsedReadSecs = (elapsedReadNanos / 1000000000.); - setValue(elapsedReadSecs); - } - }); - - root.addCounter("bytesReadPerSec", - new Instrument<Double>() { - public void sample() { - final double readSecs = (elapsedReadNanos / 1000000000.); - final double bytesReadPerSec = (readSecs == 0L ? 0d - : (bytesRead / readSecs)); - setValue(bytesReadPerSec); - } - }); - - root.addCounter("maxReadSize", new Instrument<Long>() { - public void sample() { - setValue(maxReadSize); - } - }); - - /* - * writes - */ - - root.addCounter("nwrites", new Instrument<Long>() { - public void sample() { - setValue(nwrites); - } - }); - - root.addCounter("bytesWritten", new Instrument<Long>() { - public void sample() { - setValue(bytesWritten); - } - }); - - root.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - final double writeSecs = (elapsedWriteNanos / 1000000000.); - setValue(writeSecs); - } - }); - - root.addCounter("bytesWrittenPerSec", - new Instrument<Double>() { - public void sample() { - final double writeSecs = (elapsedWriteNanos / 1000000000.); - final double bytesWrittenPerSec = (writeSecs == 0L ? 0d - : (bytesWritten / writeSecs)); - setValue(bytesWrittenPerSec); - } - }); - - root.addCounter("maxWriteSize", new Instrument<Long>() { - public void sample() { - setValue(maxWriteSize); - } - }); - - } - - /* - * write cache statistics - */ - { - - final CounterSet writeCache = root.makePath("writeCache"); - - /* - * read - */ - writeCache.addCounter("nread", new Instrument<Long>() { - public void sample() { - setValue(ncacheRead); - } - }); - - writeCache.addCounter("readHitRate", new Instrument<Double>() { - public void sample() { - setValue(nreads == 0L ? 0d : (double) ncacheRead - / nreads); - } - }); - - writeCache.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - setValue(elapsedCacheReadNanos / 1000000000.); - } - }); - - /* - * write - */ - - // #of writes on the write cache. - writeCache.addCounter("nwrite", new Instrument<Long>() { - public void sample() { - setValue(ncacheWrite); - } - }); - - /* - * % of writes that are buffered vs writing through to the - * disk. - * - * Note: This will be 1.0 unless you are writing large - * records. Large records are written directly to the disk - * rather than first into the write cache. When this happens - * the writeHitRate on the cache can be less than one. - */ - writeCache.addCounter("writeHitRate", new Instrument<Double>() { - public void sample() { - setValue(nwrites == 0L ? 0d : (double) ncacheWrite - / nwrites); - } - }); - - writeCache.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - setValue(elapsedCacheWriteNanos / 1000000000.); - } - }); - - // #of times the write cache was flushed to the disk. - writeCache.addCounter("nflush", new Instrument<Long>() { - public void sample() { - setValue(ncacheFlush); - } - }); - - } - - // disk statistics - { - final CounterSet disk = root.makePath("disk"); - - /* - * read - */ - - disk.addCounter("nreads", new Instrument<Long>() { - public void sample() { - setValue(ndiskRead); - } - }); - - disk.addCounter("bytesRead", new Instrument<Long>() { - public void sample() { - setValue(bytesReadFromDisk); - } - }); - - disk.addCounter("bytesPerRead", new Instrument<Double>() { - public void sample() { - final double bytesPerDiskRead = (ndiskRead == 0 ? 0d - : (bytesReadFromDisk / (double)ndiskRead)); - setValue(bytesPerDiskRead); - } - }); - - disk.addCounter("readSecs", new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - setValue(diskReadSecs); - } - }); - - disk.addCounter("bytesReadPerSec", - new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - final double bytesReadPerSec = (diskReadSecs == 0L ? 0d - : bytesReadFromDisk / diskReadSecs); - setValue(bytesReadPerSec); - } - }); - - disk.addCounter("secsPerRead", new Instrument<Double>() { - public void sample() { - final double diskReadSecs = (elapsedDiskReadNanos / 1000000000.); - final double readLatency = (diskReadSecs == 0 ? 0d - : diskReadSecs / ndiskRead); - setValue(readLatency); - } - }); - - /* - * write - */ - - disk.addCounter("nwrites", new Instrument<Long>() { - public void sample() { - setValue(ndiskWrite); - } - }); - - disk.addCounter("bytesWritten", new Instrument<Long>() { - public void sample() { - setValue(bytesWrittenOnDisk); - } - }); - - disk.addCounter("bytesPerWrite", new Instrument<Double>() { - public void sample() { - final double bytesPerDiskWrite = (ndiskWrite == 0 ? 0d - : (bytesWrittenOnDisk / (double)ndiskWrite)); - setValue(bytesPerDiskWrite); - } - }); - - disk.addCounter("writeSecs", new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - setValue(diskWriteSecs); - } - }); - - disk.addCounter("bytesWrittenPerSec", - new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - final double bytesWrittenPerSec = (diskWriteSecs == 0L ? 0d - : bytesWrittenOnDisk - / diskWriteSecs); - setValue(bytesWrittenPerSec); - } - }); - - disk.addCounter("secsPerWrite", new Instrument<Double>() { - public void sample() { - final double diskWriteSecs = (elapsedDiskWriteNanos / 1000000000.); - final double writeLatency = (diskWriteSecs == 0 ? 0d - : diskWriteSecs / ndiskWrite); - setValue(writeLatency); - } - }); - - /* - * other - */ - - disk.addCounter("nforce", new Instrument<Long>() { - public void sample() { - setValue(nforce); - } - }); - - disk.addCounter("nextend", new Instrument<Long>() { - public void sample() { - setValue(ntruncate); - } - }); - - disk.addCounter("nreopen", new Instrument<Long>() { - public void sample() { - setValue(nreopen); - } - }); - - disk.addCounter("rootBlockWrites", new Instrument<Long>() { - public void sample() { - setValue(nwriteRootBlock); - } - }); - - } - - } - - return root; - - } - private CounterSet root; - - /** - * Human readable representation of the counters. - */ - public String toString() { - - return getCounters().toString(); - - } - - } +// /** +// * Counters for {@link IRawStore} access, including operations that read or +// * write through to the underlying media. +// * +// * @author <a href="mailto:tho...@us...">Bryan Thompson</a> +// * @version $Id$ +// * +// * @todo report elapsed time and average latency for force, reopen, and +// * writeRootBlock. +// * +// * @todo counters need to be atomic if we want to avoid the possibility of +// * concurrent <code>x++</code> operations failing to correctly +// * increment <code>x</code> for each request. +// */ +// public static class StoreCounters { +// +// /** +// * #of read requests. +// */ +// public long nreads; +// +// /** +// * #of read requests that are satisfied by our write cache (vs the +// * OS or disk level write cache). +// */ +// public long ncacheRead; +// +// /** +// * #of read requests that read through to the backing file. +// */ +// public long ndiskRead; +// +// /** +// * #of bytes read. +// */ +// public long bytesRead; +// +// /** +// * #of bytes that have been read from the disk. +// */ +// public long bytesReadFromDisk; +// +// /** +// * The size of the largest record read. +// */ +// public long maxReadSize; +// +// /** +// * Total elapsed time for reads. +// */ +// public long elapsedReadNanos; +// +// /** +// * Total elapsed time checking the disk write cache for records to be +// * read. +// */ +// public long elapsedCacheReadNanos; +// +// /** +// * Total elapsed time for reading on the disk. +// */ +// public long elapsedDiskReadNanos; +// +// /** +// * #of write requests. +// */ +// public long nwrites; +// +// /** +// * #of write requests that are absorbed by our write cache (vs the OS or +// * disk level write cache). +// */ +// public long ncacheWrite; +// +// /** +// * #of times the write cache was flushed to disk. +// */ +// public long ncacheFlush; +// +// /** +// * #of write requests that write through to the backing file. +// */ +// public long ndiskWrite; +// +// /** +// * The size of the largest record written. +// */ +// public long maxWriteSize; +// +// /** +// * #of bytes written. +// */ +// public long bytesWritten; +// +// /** +// * #of bytes that have been written on the disk. +// */ +// public long bytesWrittenOnDisk; +// +// /** +// * Total elapsed time for writes. +// */ +// public long elapsedWriteNanos; +// +// /** +// * Total elapsed time writing records into the cache (does not count +// * time to flush the cache when it is full or to write records that do +// * not fit in the cache directly to the disk). +// */ +// public long elapsedCacheWriteNanos; +// +// /** +// * Total elapsed time for writing on the disk. +// */ +// public long elapsedDiskWriteNanos; +// +// /** +// * #of times the data were forced to the disk. +// */ +// public long nforce; +// +// /** +// * #of times the length of the file was changed (typically, extended). +// */ +// public long ntruncate; +// +// /** +// * #of times the file has been reopened after it was closed by an +// * interrupt. +// */ +// public long nreopen; +// +// /** +// * #of times one of the root blocks has been written. +// */ +// public long nwriteRootBlock; +// +// /** +// * Initialize a new set of counters. +// */ +// public StoreCounters() { +// +// } +// +// /** +// * Copy ctor. +// * @param o +// */ +// public StoreCounters(final StoreCounters o) { +// +// add( o ); +// +// } +// +// /** +// * Adds counters to the current counters. +// * +// * @param o +// */ +// public void add(final StoreCounters o) { +// +// nreads += o.nreads; +// ncacheRead += o.ncacheRead; +// ndiskRead += o.ndiskRead; +// bytesRead += o.bytesRead; +// bytesReadFromDisk += o.bytesReadFromDisk; +// maxReadSize += o.maxReadSize; +// elapsedReadNanos += o.elapsedReadNanos; +// elapsedCacheReadNanos += o.elapsedCacheReadNanos; +// elapsedDiskReadNanos += o.elapsedDiskReadNanos; +// +// nwrites += o.nwrites; +// ncacheWrite += o.ncacheWrite; +// ncacheFlush += o.ncacheFlush; +// ndiskWrite += o.ndiskWrite; +// maxWriteSize += o.maxWriteSize; +// bytesWritten += o.bytesWritten; +// bytesWrittenOnDisk += o.bytesWrittenOnDisk; +// elapsedWriteNanos += o.elapsedWriteNanos; +// elapsedCacheWriteNanos += o.elapsedCacheWriteNanos; +// elapsedDiskWriteNanos += o.elapsedDiskWriteNanos; +// +// nforce += o.nforce; +// ntruncate += o.ntruncate; +// nreopen += o.nreopen; +// nwriteRootBlock += o.nwriteRootBlock; +// +// } +// +// /** +// * Returns a new {@link StoreCounters} containing the current counter values +// * minus the given counter values. +// * +// * @param o +// * +// * @return +// */ +// public StoreCounters subtract(final StoreCounters o) { +// +// // make a copy of the current counters. +// final StoreCounters t = new StoreCounters(this); +// +// // subtract out the given counters. +// t.nreads -= o.nreads; +// t.ncacheRead -= o.ncacheRead; +// t.ndiskRead -= o.ndiskRead; +// t.bytesRead -= o.bytesRead; +// t.bytesReadFromDisk -= o.bytesReadFromDisk; +// t.maxReadSize -= o.maxReadSize; +// t.elapsedReadNanos -= o.elapsedReadNanos; +// t.elapsedCacheReadNanos -= o.elapsedCacheReadNanos; +// t.elapsedDiskReadNanos -= o.elapsedDiskReadNanos; +// +// t.nwrites -= o.nwrites; +// t.ncacheWrite -= o.ncacheWrite; +// t.ncacheFlush -= o.ncacheFlush; +// t.ndiskWrite -= o.ndiskWrite; +// t.maxWriteSize -= o.maxWriteSize; +// t.bytesWritten -= o.bytesWritten; +// t.bytesWrittenOnDisk -= o.bytesWrittenOnDisk; +// t.elapsedWriteNanos -= o.elapsedWriteNanos; +// t.elapsedCacheWriteNanos -= o.elapsedCacheWriteNanos; +// t.elapsedDiskWriteNanos -= o.elapsedDiskWriteNanos; +// +// t.nforce -= o.nforce; +// t.ntruncate -= o.ntruncate; +// t.nreopen -= o.nreopen; +// t.nwriteRootBlock -= o.nwriteRootBlock; +// +// return t; +// +// } +// +// synchronized public CounterSet getCounters() { +// +// if (root == null) { +// +// root = new CounterSet(); +// +// // IRawStore API +// { +// +// /* +// * reads +// */ +// +// root.addCounter("nreads", new Instrument<Long>() { +// public void sample() { +// setValue(nreads); +// } +// }); +// +// root.addCounter("bytesRead", new Instrument<Long>() { +// public void sample() { +// setValue(bytesRead); +// } +// }); +// +// root.addCounter("readSecs", new Instrument<Double>() { +// public void sample() { +// final double elapsedReadSecs = (elapsedReadNanos / 1000000000.); +// setValue(elapsedReadSecs); +// } +// }); +// +// root.addCounter("bytesReadPerSec", +// new Instrument<Double>() { +// public void sample() { +// final double readSecs = (elapsedReadNanos / 1000000000.); +// final double bytesReadPerSec = (readSecs == 0L ? 0d +// : (bytesRead / readSecs)); +// setValue(bytesReadPerSec); +// } +// }); +// +// root.addCounter("maxReadSize", new Instrument<Long>() { +// public void sample() { +// setValue(maxReadSize); +// } +// }); +// +// /* +// * writ... [truncated message content] |