From: <tho...@us...> - 2010-09-13 15:17:03
|
Revision: 3535 http://bigdata.svn.sourceforge.net/bigdata/?rev=3535&view=rev Author: thompsonbry Date: 2010-09-13 15:16:56 +0000 (Mon, 13 Sep 2010) Log Message: ----------- Modified the WriteCacheService to log cache evictions @ INFO. Modified the bsbm ant script and properties to locate the correct log4j configuration file. Fixed reporting for nclean and perhaps hitRate for the write cache / write cache service Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-13 15:16:56 UTC (rev 3535) @@ -48,6 +48,7 @@ import org.apache.log4j.Logger; import com.bigdata.btree.IndexSegmentBuilder; +import com.bigdata.counters.CAT; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.io.DirectBufferPool; @@ -792,7 +793,7 @@ if ((md = recordMap.get(offset)) == null) { // The record is not in this write cache. - counters.nmiss.incrementAndGet(); + counters.nmiss.increment(); return null; } @@ -843,7 +844,7 @@ } - counters.nhit.incrementAndGet(); + counters.nhit.increment(); if (log.isTraceEnabled()) { log.trace(show(dst, "read bytes")); @@ -1329,12 +1330,12 @@ /** * #of read requests that are satisfied by the write cache. */ - public final AtomicLong nhit = new AtomicLong(); + public final CAT nhit = new CAT(); /** * The #of read requests that are not satisfied by the write cache. */ - public final AtomicLong nmiss = new AtomicLong(); + public final CAT nmiss = new CAT(); /* * write on the cache. Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-13 15:16:56 UTC (rev 3535) @@ -628,10 +628,23 @@ try { cleanList.add(cache); cleanListNotEmpty.signalAll(); - counters.get().nclean = dirtyList.size(); + counters.get().nclean = cleanList.size(); } finally { cleanListLock.unlock(); } + if(log.isInfoEnabled()) { + final WriteCacheServiceCounters tmp = counters.get(); + final long nhit = tmp.nhit.get(); + final long ntests = nhit + tmp.nmiss.get(); + final double hitRate=(ntests == 0L ? 0d : (double) nhit / ntests); + log.info("WriteCacheService: bufferSize=" + + buffers[0].capacity() + ",nbuffers=" + + tmp.nbuffers + ",nclean=" + tmp.nclean + + ",ndirty=" + tmp.ndirty + ",maxDirty=" + + tmp.maxdirty + ",nflush=" + tmp.nflush + + ",nwrite=" + tmp.nwrite + ",hitRate=" + + hitRate); + } } catch (InterruptedException t) { /* @@ -1394,8 +1407,8 @@ public boolean write(final long offset, final ByteBuffer data, final int chk, final boolean useChecksum) throws InterruptedException, IllegalStateException { - if (log.isInfoEnabled()) { - log.info("offset: " + offset + ", length: " + data.limit() + if (log.isTraceEnabled()) { + log.trace("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + useChecksum); } @@ -1675,8 +1688,8 @@ protected boolean writeLargeRecord(final long offset, final ByteBuffer data, final int chk, final boolean useChecksum) throws InterruptedException, IllegalStateException { - if (log.isInfoEnabled()) { - log.info("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + if (log.isTraceEnabled()) { + log.trace("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + useChecksum); } @@ -1905,6 +1918,9 @@ if (cache == null) { // No match. + + counters.get().nmiss.increment(); + return null; } Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties 2010-09-13 15:16:56 UTC (rev 3535) @@ -13,6 +13,21 @@ com.bigdata.btree.writeRetentionQueue.capacity=4000 com.bigdata.btree.BTree.branchingFactor=128 +# Reduce the branching factor for the lexicon since BSBM uses a lot of long +# literals. Note that you have to edit this override to specify the namespace +# into which the BSBM data will be loaded. +com.bigdata.namespace.BSBM_284826.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=32 +com.bigdata.namespace.BSBM_284826.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=32 + +# Override the #of write cache buffers. +com.bigdata.journal.AbstractJournal.writeCacheBufferCount=12 + +# Note: You must override the buffer capacity in build.xml on the +# "run-load" target, but this would give you 10M write cache buffers +# if you placed that override there. +# +# -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 + # 200M initial extent. com.bigdata.journal.AbstractJournal.initialExtent=209715200 com.bigdata.journal.AbstractJournal.maximumExtent=209715200 Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties 2010-09-13 15:16:56 UTC (rev 3535) @@ -57,9 +57,9 @@ # Laptop #bsbm.baseDir=d:/bigdata-perf-analysis/bsbm/bsbm_${bsbm.pc} # Server -#bsbm.baseDir=/nas/data/bsbm/bsbm_${bsbm.pc} +bsbm.baseDir=/nas/data/bsbm/bsbm_${bsbm.pc} # Windows 2008 Server -bsbm.baseDir=c:/usr/local/data/bsbm/bsbm_${bsbm.pc} +#bsbm.baseDir=c:/usr/local/data/bsbm/bsbm_${bsbm.pc} # Where to put the XML results files. bsbm.resultsDir=${bsbm.baseDir}/.. @@ -71,12 +71,12 @@ bsbm.outputType=nt # Specify ".gz" or ".zip" if pre-generated files have been compressed. -bsbm.compressType= -#bsbm.compressType=".gz" +#bsbm.compressType= +bsbm.compressType=".gz" # Which mode to use for the Journal. (DiskRW or DiskWORM) -#journalMode=RW -journalMode=WORM +journalMode=RW +#journalMode=WORM # The name of the file containing the generated RDF data without the filename extension. bsbm.outputFile=${bsbm.baseDir}/dataset @@ -89,11 +89,11 @@ #bsbm.journalFile=${bsbm.baseDir}/bigdata-bsbm.worm #bsbm.journalFile=${bsbm.baseDir}/bigdata-bsbm.jnl # Note: This is on the large volume. -#bsbm.journalFile=/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl +bsbm.journalFile=/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # Windows 2008 Server: SSD. #bsbm.journalFile=e:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # Windows 2008 Server: SAS. -bsbm.journalFile=f:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl +#bsbm.journalFile=f:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # # Qualification of the system under test. @@ -144,7 +144,7 @@ # Use a specific seed (hot disk cache run with only JVM tuning effects). #bsbm.seed=1273687925860 -bsbm.seed=1273687925861 +bsbm.seed=919191 # # Profiler parameters. @@ -167,7 +167,7 @@ profiler=${profilerAgent} ${profilerAgentOptions} # Configure GC. -gcopts= +#gcopts= #gcopts=-verbose:gc #gcopts=-XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode gcopts=-XX:+UseParallelOldGC @@ -191,4 +191,5 @@ ## -Dcom.bigdata.LRUNexus.percentHeap=.1 # all jvm args for query. -queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties +queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:log4j.properties +# -Dlog4j.debug Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml 2010-09-13 15:16:56 UTC (rev 3535) @@ -50,14 +50,14 @@ <exclude name="**/*.java" /> <exclude name="**/package.html" /> </fileset> - <!-- copy log4j configuration file. --> - <fileset dir="${bsbm.dir}/src/resources/logging" /> </copy> <copy toDir="${build.dir}/bin"> <!-- copy benchmark data and queries. --> <fileset dir="${bsbm.dir}/src/resources/bsbm-data" /> <!-- copy the journal configuration file. --> <fileset file="${bsbm.dir}/*.properties" /> + <!-- copy log4j configuration file. --> + <fileset dir="${bsbm.dir}/src/resources/logging" /> </copy> </target> @@ -144,7 +144,10 @@ <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true" dir="${build.dir}/bin"> <arg line="-namespace ${bsbm.namespace} ${bsbm.journalPropertyFile} ${bsbm.outputFile}.${bsbm.outputType}${bsbm.compressType}" /> <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${bsbm.journalFile}" /> + <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${bsbm.journalFile} + -Dcom.bigdata.rdf.store.DataLoader.bufferCapacity=1000000 + -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 + " /> <classpath> <path refid="runtime.classpath" /> </classpath> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |