This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2010-09-14 19:14:36
|
Revision: 3549 http://bigdata.svn.sourceforge.net/bigdata/?rev=3549&view=rev Author: thompsonbry Date: 2010-09-14 19:14:30 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Modified LoadStats and ClosureStats to use CATs in preparation for a concurrent data loader. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/ClosureStats.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/LoadStats.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TaskATest.java branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/ClosureStats.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/ClosureStats.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/ClosureStats.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -24,6 +24,8 @@ */ package com.bigdata.rdf.inf; +import com.bigdata.counters.CAT; + /** * Statistics collected when performing inference. * @@ -38,13 +40,13 @@ * change in the #of statements in the database across the closure * operation. */ - public long mutationCount; + public final CAT mutationCount = new CAT(); /** * Time to compute the entailments and store them within the database * (milliseconds). */ - public long elapsed; + public final CAT elapsed = new CAT(); public ClosureStats() { @@ -55,26 +57,26 @@ * @param mutationCount * @param elapsed */ - public ClosureStats(long mutationCount,long elapsed) { + public ClosureStats(final long mutationCount,final long elapsed) { - this.mutationCount = mutationCount; + this.mutationCount.set(mutationCount); - this.elapsed = elapsed; + this.elapsed.set( elapsed); } - public synchronized void add(ClosureStats o) { + public void add(final ClosureStats o) { - this.mutationCount += o.mutationCount; + this.mutationCount.add( o.mutationCount.get()); - this.elapsed += o.elapsed; + this.elapsed.add(o.elapsed.get()); } public String toString() { - return getClass().getSimpleName() + "{mutationCount=" + mutationCount - + ", elapsed=" + elapsed + "ms}"; + return getClass().getSimpleName() + "{mutationCount=" + mutationCount.estimate_get() + + ", elapsed=" + elapsed.estimate_get() + "ms}"; } Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/inf/TruthMaintenance.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -440,7 +440,7 @@ final long elapsed = System.currentTimeMillis() - begin; - stats.elapsed += elapsed; + stats.elapsed.add(elapsed); if (INFO) log.info("Computed closure in " + elapsed + "ms"); Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -148,7 +148,7 @@ } // Note: IFF the task succeeds! - toldTriples.addAndGet(loadStats.toldTriples); + toldTriples.addAndGet(loadStats.toldTriples.get()); } @@ -194,9 +194,11 @@ final long now = System.currentTimeMillis(); - stats.toldTriples = nstmts; + stats.toldTriples.set(nstmts); - stats.totalTime = stats.loadTime = now - begin; + stats.totalTime.set( now - begin ); + + stats.loadTime.set( now - begin ); /* * This reports the load rate for the file, but this will only Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/LoadStats.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/LoadStats.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rio/LoadStats.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -24,6 +24,7 @@ */ package com.bigdata.rdf.rio; +import com.bigdata.counters.CAT; import com.bigdata.rdf.inf.ClosureStats; /** @@ -34,12 +35,12 @@ */ public class LoadStats { - public long toldTriples; - public long loadTime; - public long commitTime; - public long totalTime; + public final CAT toldTriples = new CAT(); + public final CAT loadTime = new CAT(); + public final CAT commitTime = new CAT(); + public final CAT totalTime = new CAT(); - private transient long lastReportTime = 0l; + private transient volatile long lastReportTime = 0l; /** * The internal with which this class will log on {@link System#out} in @@ -55,19 +56,19 @@ public long triplesPerSecond() { - return ((long) (((double) toldTriples) / ((double) totalTime) * 1000d)); + return ((long) (((double) toldTriples.estimate_get()) / ((double) totalTime.estimate_get()) * 1000d)); } public void add(final LoadStats stats) { - toldTriples += stats.toldTriples; + toldTriples.add(stats.toldTriples.get()); - loadTime += stats.loadTime; + loadTime.add(stats.loadTime.get()); - commitTime += stats.commitTime; + commitTime.add(stats.commitTime.get()); - totalTime += stats.totalTime; + totalTime.add(stats.totalTime.get()); if (stats.closureStats != null) { @@ -82,7 +83,7 @@ if (lastReportTime == 0L) { - if (loadTime >= REPORT_INTERVAL) { + if (loadTime.estimate_get() >= REPORT_INTERVAL) { System.out.println("loading: " + toString()); @@ -111,14 +112,14 @@ return toldTriples + " stmts added in " - + ((double) loadTime) + + ((double) loadTime.estimate_get()) / 1000d + " secs, rate= " + triplesPerSecond() + ", commitLatency=" - + commitTime + + commitTime.estimate_get() + "ms" - + (closureStats.elapsed!=0L? "\n"+closureStats.toString() : ""); + + (closureStats.elapsed.estimate_get()!=0L? "\n"+closureStats.toString() : ""); } Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -230,8 +230,8 @@ if (buffer != null) { - if(log.isInfoEnabled()) - log.info("Flushing the buffer."); + if(log.isDebugEnabled()) + log.debug("Flushing the buffer."); buffer.flush(); @@ -610,7 +610,7 @@ database.commit(); - totals.commitTime += System.currentTimeMillis() - beginCommit; + totals.commitTime.add(System.currentTimeMillis() - beginCommit); if (log.isInfoEnabled()) log.info("commit: latency="+totals.commitTime+"ms"); @@ -838,8 +838,8 @@ if (file.isDirectory()) { - if (log.isInfoEnabled()) - log.info("loading directory: " + file); + if (log.isDebugEnabled()) + log.debug("loading directory: " + file); // final LoadStats loadStats = new LoadStats(); @@ -1007,9 +1007,9 @@ final long nstmts = loader.getStatementsAdded(); - stats.toldTriples = nstmts; + stats.toldTriples.set( nstmts ); - stats.loadTime = System.currentTimeMillis() - begin; + stats.loadTime.set(System.currentTimeMillis() - begin); if (closureEnum == ClosureEnum.Incremental || (endOfBatch && closureEnum == ClosureEnum.Batch)) { @@ -1037,20 +1037,24 @@ database.commit(); - stats.commitTime = System.currentTimeMillis() - beginCommit; + stats.commitTime.set(System.currentTimeMillis() - beginCommit); if (log.isInfoEnabled()) log.info("commit: latency=" + stats.commitTime + "ms"); } - stats.totalTime = System.currentTimeMillis() - begin; + stats.totalTime.set(System.currentTimeMillis() - begin); + // aggregate stats + totals.add(stats); + if (log.isInfoEnabled()) { - log.info(stats.toString()); + log.info("file:: " + stats + "; totals:: " + totals); if (buffer != null && buffer.getDatabase() instanceof AbstractLocalTripleStore) { - log.info(((AbstractLocalTripleStore) buffer.getDatabase()) + if(log.isDebugEnabled()) + log.debug(((AbstractLocalTripleStore) buffer.getDatabase()) .getLocalBTreeBytesWritten(new StringBuilder()) .toString()); } @@ -1060,6 +1064,9 @@ } catch ( Exception ex ) { + // aggregate stats even for exceptions. + totals.add(stats); + /* * Note: discard anything in the buffer in case auto-flush is * disabled. This prevents the buffer from retaining data after a @@ -1096,11 +1103,11 @@ throw ex2; - } finally { +// } finally { +// +// // aggregate regardless of the outcome. +// totals.add(stats); - // aggregate regardless of the outcome. - totals.add(stats); - } } @@ -1436,8 +1443,8 @@ || (name.endsWith(".gz") && RDFFormat.forFileName(name .substring(0, name.length() - 3)) != null); - if (log.isInfoEnabled()) - log.info("dir=" + dir + ", name=" + name + " : isRDF=" + isRDF); + if (log.isDebugEnabled()) + log.debug("dir=" + dir + ", name=" + name + " : isRDF=" + isRDF); return isRDF; Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TaskATest.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TaskATest.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TaskATest.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -475,23 +475,23 @@ // Explicit + (Entailments = Axioms + Inferred) - final long totalTriples = loadStats[run].toldTriples - + (closureStats!=null?closureStats.mutationCount : 0); + final long totalTriples = loadStats[run].toldTriples.get() + + (closureStats!=null?closureStats.mutationCount.get() : 0); // loadTime + closureTime + commitTime. - final long totalTime = loadStats[run].loadTime - + (closureStats != null ? closureStats.elapsed : 0) - + loadStats[run].commitTime; + final long totalTime = loadStats[run].loadTime.get() + + (closureStats != null ? closureStats.elapsed.get() : 0) + + loadStats[run].commitTime.get(); System.out.println( all_sources[ run * 3 ]+ ", " + ( errors[ run ] == null ? "Ok" +", "+loadStats[run].toldTriples - +", "+loadStats[run].loadTime/1000 - +", "+tps(loadStats[run].toldTriples,loadStats[run].loadTime) - +", "+(closureStats!=null?closureStats.mutationCount:"") - +", "+(closureStats!=null?closureStats.elapsed/1000:"") - +", "+(closureStats!=null?tps(closureStats.mutationCount,closureStats.elapsed):"") + +", "+loadStats[run].loadTime.get()/1000 + +", "+tps(loadStats[run].toldTriples.get(),loadStats[run].loadTime.get()) + +", "+(closureStats!=null?closureStats.mutationCount.get():"") + +", "+(closureStats!=null?closureStats.elapsed.get()/1000:"") + +", "+(closureStats!=null?tps(closureStats.mutationCount.get(),closureStats.elapsed.get()):"") +", "+loadStats[run].commitTime +", "+tps(totalTriples,totalTime) Modified: branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java 2010-09-14 19:13:44 UTC (rev 3548) +++ branches/JOURNAL_HA_BRANCH/bigdata-rdf/src/test/com/bigdata/rdf/metrics/TestMetrics.java 2010-09-14 19:14:30 UTC (rev 3549) @@ -1085,7 +1085,7 @@ /* * #of explicit statements loaded. */ - toldTriples = loadStats.toldTriples; + toldTriples = loadStats.toldTriples.get(); /* * This is the elapsed time for the entire transaction in which the file @@ -1093,19 +1093,19 @@ * store, and the time required to perform the transaction commit. */ // transactionTime = System.currentTimeMillis() - begin; - transactionTime = loadStats.totalTime; + transactionTime = loadStats.totalTime.get(); /* * This is the time required to load the triples exclusive of the * startup and commit time for the transaction. */ - loadTime = loadStats.loadTime; + loadTime = loadStats.loadTime.get(); /* * A pragmatic estimate of the commit time that assumes the transaction * start time is zero. */ - commitTime = loadStats.commitTime; + commitTime = loadStats.commitTime.get(); // commitTime = transactionTime - loadTime; /* @@ -1132,7 +1132,7 @@ statementsAdded = statementCount1 - statementCount0; // inferencesAdded = inferenceCount1 - inferenceCount0; - inferencesAdded = loadStats.closureStats.mutationCount; + inferencesAdded = loadStats.closureStats.mutationCount.get(); // int explicitAdded = statementsAdded - inferencesAdded; proofsAdded = proofCount1 - proofCount0; urisAdded = uriCount1 - uriCount0; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 19:13:50
|
Revision: 3548 http://bigdata.svn.sourceforge.net/bigdata/?rev=3548&view=rev Author: thompsonbry Date: 2010-09-14 19:13:44 +0000 (Tue, 14 Sep 2010) Log Message: ----------- simplified logging Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/resources/logging/log4j.properties Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-14 17:52:22 UTC (rev 3547) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-14 19:13:44 UTC (rev 3548) @@ -7,7 +7,6 @@ # You probably want to set the default log level to ERROR. # log4j.rootCategory=WARN, dest1 -#log4j.rootCategory=WARN, dest2 # Loggers. # Note: logging here at INFO or DEBUG will significantly impact throughput! @@ -20,177 +19,17 @@ #log4j.logger.com.bigdata.rdf.sail.bench.NanoSparqlServer=INFO log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR -# My Stuff -#log4j.logger.com.bigdata.rdf.sail.BigdataSail=DEBUG -#log4j.logger.com.bigdata.rdf.sail.TestNamedGraphs=DEBUG -#log4j.logger.com.bigdata.rdf.sail.QuadsTestCase=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryWithJoinThreadsTask=DEBUG -#log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl=INFO -#log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO -#log4j.logger.com.bigdata.samples=INFO -#log4j.logger.com.bigdata.rdf.sail.tck=INFO - log4j.logger.com.bigdata.io.writecache.WriteCacheService=INFO -#log4j.logger.com.bigdata.io.DataOutputBuffer=INFO -#log4j.logger.com.bigdata.io.FileChannelUtility=INFO -#log4j.logger.com.bigdata.util.concurrent=INFO - -#log4j.logger.com.bigdata.btree=WARN - -#log4j.logger.com.bigdata.service.ResourceService=INFO -#log4j.logger.com.bigdata.service.ResourceService$ReadResourceTask=INFO - -#log4j.logger.com.bigdata.config=DEBUG - -#log4j.logger.com.bigdata.btree.BloomFilter=DEBUG -#log4j.logger.com.bigdata.btree.AbstractBTree=INFO -#log4j.logger.com.bigdata.btree.AbstractBTreeTupleCursor=DEBUG -#log4j.logger.com.bigdata.btree.IndexSegmentStore=DEBUG -#log4j.logger.com.bigdata.btree.IndexSegmentBuilder=INFO -#log4j.logger.com.bigdata.btree.TupleFilter=INFO -#log4j.logger.com.bigdata.btree.AbstractKeyArrayIndexProcedure=DEBUG -#log4j.logger.com.bigdata.btree.FusedTupleIterator=INFO -#log4j.logger.com.bigdata.btree.ResultSet=INFO -#log4j.logger.com.bigdata.btree.AbstractChunkedTupleIterator=DEBUG - -# detailed debugging for the canonical huffman coder -#log4j.logger.com.bigdata.btree.raba.codec.CanonicalHuffmanRabaCoder=DEBUG - -#log4j.logger.com.bigdata.journal=INFO -#log4j.logger.com.bigdata.journal.IJournal=INFO -#log4j.logger.com.bigdata.journal.CompactTask=INFO -#log4j.logger.com.bigdata.cache=DEBUG -#log4j.logger.com.bigdata.resources=INFO -#log4j.logger.com.bigdata.journal.ConcurrencyManager=INFO -#log4j.logger.com.bigdata.journal.QueueLengthTask=INFO -#log4j.logger.com.bigdata.journal.Name2Addr=INFO -#log4j.logger.com.bigdata.journal.AbstractTask=INFO -#log4j.logger.com.bigdata.journal.WriteExecutorService=INFO -#log4j.logger.com.bigdata.service.AbstractTransactionService=INFO -#log4j.logger.com.bigdata.journal.AbstractLocalTransactionManager=INFO -#log4j.logger.com.bigdata.concurrent.TxDag=WARN -#log4j.logger.com.bigdata.concurrent.NonBlockingLockManager=WARN -#log4j.logger.com.bigdata.concurrent.TestNonBlockingLockManager=INFO -#log4j.logger.com.bigdata.concurrent.AbstractStressTestNonBlockingLockManager=INFO -#log4j.logger.com.bigdata.concurrent.LockManager=INFO -#log4j.logger.com.bigdata.concurrent.LockManagerTask=INFO -#log4j.logger.com.bigdata.resources.ResourceEvents=WARN -#log4j.logger.com.bigdata.resources.IndexManager=INFO -#log4j.logger.com.bigdata.resources.ResourceManager=INFO -#log4j.logger.com.bigdata.resources.DefaultSplitHandler=INFO -#log4j.logger.com.bigdata.resources.OverflowManager=INFO -#log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=WARN -#log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO -#log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO -#log4j.logger.com.bigdata.repo=INFO -#log4j.logger.com.bigdata.search=INFO -#log4j.logger.com.bigdata.search.ReadIndexTask=INFO -#log4j.logger.com.bigdata.search.TokenBuffer=DEBUG -#log4j.logger.com.bigdata.sparse=DEBUG -#log4j.logger.com.bigdata.service=INFO - -#log4j.logger.com.bigdata.service.ndx=INFO -#log4j.logger.com.bigdata.service.ndx.pipeline=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=DEBUG -#log4j.logger.com.bigdata.concurrent.Latch=DEBUG - -#log4j.logger.com.bigdata.relation=INFO -#log4j.logger.com.bigdata.relation.accesspath=INFO -#log4j.logger.com.bigdata.relation.locator=INFO -#log4j.logger.com.bigdata.relation.rule=INFO - -#log4j.logger.com.bigdata.relation.rule.eval=INFO -#log4j.logger.com.bigdata.relation.rule.eval.RuleState=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryEvaluator=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.DefaultEvaluationPlan=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.AbstractArrayBuffer=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=INFO - # Rule execution (query, closure, etc). #log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO -#log4j.logger.com.bigdata.service.IBigdataFederation=DEBUG -#log4j.logger.com.bigdata.service.LoadBalancerService=INFO -#log4j.logger.com.bigdata.service.LoadBalancerService$UpdateTask=INFO -#log4j.logger.com.bigdata.service.DataService$StatusTask=INFO -#log4j.logger.com.bigdata.service.DataService$ReportTask=INFO -#log4j.logger.com.bigdata.service.DataService$StartPerformanceCounterCollectionTask=INFO -#log4j.logger.com.bigdata.service.ndx.ClientIndexView=INFO - -#log4j.logger.com.bigdata.service.mapReduce=INFO -#log4j.logger.com.bigdata.striterator=INFO -#log4j.logger.com.bigdata.counters=INFO -#log4j.logger.com.bigdata.counters.win=DEBUG -#log4j.logger.com.bigdata.counters.linux=INFO -#log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR -#log4j.logger.com.bigdata.counters.History=ERROR -#log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO -#log4j.logger.com.bigdata.counters.store=INFO -#log4j.logger.com.bigdata.counters.httpd=INFO -#log4j.logger.com.bigdata.counters.query.CounterSetBTreeSelector=INFO -#log4j.logger.com.bigdata.counters.query.PivotTable=INFO -#log4j.logger.com.bigdata.counters.query=INFO -#log4j.logger.com.bigdata.counters.XMLUtility=INFO -#log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=INFO -#log4j.logger.com.bigdata.util.httpd.NanoHTTPD=INFO -#log4j.logger.com.bigdata.journal.TestConcurrentWritersOnNamedIndices=DEBUG -#log4j.logger.com.bigdata.concurrent=INFO - -#log4j.logger.com.bigdata.rdf.rules=INFO - -#log4j.logger.com.bigdata.rdf=INFO -#log4j.logger.com.bigdata.rdf.spo.DistinctTermScanner$DistinctTermScanTask=INFO -#log4j.logger.com.bigdata.rdf.store.ITripleStore=DEBUG -#log4j.logger.com.bigdata.rdf.rio=TRACE -#log4j.logger.com.bigdata.rdf.load=DEBUG -#log4j.logger.com.bigdata.rdf.rio.StatementBuffer=INFO -#log4j.logger.com.bigdata.rdf.rio.AbstractStatementBuffer=INFO -#log4j.logger.com.bigdata.rdf.rio.AsynchronousStatementBufferFactory=INFO -#log4j.logger.com.bigdata.rdf.lexicon.LexiconRelation=INFO -#log4j.logger.com.bigdata.rdf.lexicon.TermIdEncoder=DEBUG -#log4j.logger.com.bigdata.rdf.lexicon.Term2IdWriteProc=DEBUG -#log4j.logger.com.bigdata.rdf.store.IndexWriteProc=WARN -#log4j.logger.com.bigdata.rdf.store.AbstractTripleStore=INFO -#log4j.logger.com.bigdata.rdf.store.BigdataStatementIteratorImpl=DEBUG -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader=INFO -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader$WorkflowTask=INFO -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader$VerifyStatementBuffer=INFO log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -# Note: turns on ground truth testing (not scalable). -#log4j.logger.com.bigdata.rdf.store.Term2IdWriteProc=DEBUG -#log4j.logger.com.bigdata.rdf.spo.ISPOBuffer=DEBUG -#log4j.logger.com.bigdata.rdf.spo.SPOIterator=WARN -#log4j.logger.com.bigdata.rdf.inf.TruthMaintenance=DEBUG -#log4j.logger.com.bigdata.rdf.inf.InferenceEngine=INFO -#log4j.logger.com.bigdata.rdf.inf.Justification=DEBUG -#log4j.logger.com.bigdata.rdf.inf.BackchainOwlSameAs2=DEBUG -#log4j.logger.com.bigdata.rdf.spo.DefaultGraphSolutionExpander=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.IAccessPath=DEBUG - -# Lehigh benchmark integration -#log4j.logger.edu.lehigh.swat.bench.ubt.bigdata=INFO - # RIO #log4j.logger.com.bigdata.rdf.rio=INFO #log4j.logger.com.bigdata.rdf.rio=DEBUG -# Bulk data loader. -#log4j.logger.com.bigdata.rdf.load=INFO -#log4j.logger.com.bigdata.rdf.load=DEBUG -#log4j.logger.com.bigdata.util.concurrent.Latch=INFO -#log4j.logger.com.bigdata.rdf.load.RDFDataLoadMaster=INFO -#log4j.logger.com.bigdata.rdf.load.ConcurrentDataLoader=INFO - -# Normal data loader (single threaded). -#log4j.logger.com.bigdata.rdf.store.DataLoader=INFO - -# Test suite logger. -#log4j.logger.junit=INFO -#log4j.logger.junit=DEBUG -#log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO - # dest1 log4j.appender.dest1=org.apache.log4j.ConsoleAppender log4j.appender.dest1.layout=org.apache.log4j.PatternLayout @@ -200,16 +39,6 @@ #log4j.appender.dest1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n #log4j.appender.dest1.layout.ConversionPattern=%-4r(%d) [%t] %-5p %c(%l:%M) %x - %m%n -# dest2 includes the thread name and elapsed milliseconds. -# Note: %r is elapsed milliseconds. -# Note: %t is the thread name. -# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html -log4j.appender.dest2=org.apache.log4j.ConsoleAppender -log4j.appender.dest2.layout=org.apache.log4j.PatternLayout -log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n - -#log4j.logger.alex.LoadPdb=INFO - ## # Rule execution log. This is a formatted log file (comma delimited). log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/resources/logging/log4j.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/resources/logging/log4j.properties 2010-09-14 17:52:22 UTC (rev 3547) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/src/resources/logging/log4j.properties 2010-09-14 19:13:44 UTC (rev 3548) @@ -7,159 +7,20 @@ # You probably want to set the default log level to ERROR. # log4j.rootCategory=WARN, dest2 -#log4j.rootCategory=WARN, dest2 # Loggers. # Note: logging here at INFO or DEBUG will significantly impact throughput! log4j.logger.com.bigdata=WARN -#log4j.logger.com.bigdata.io.DataOutputBuffer=INFO -#log4j.logger.com.bigdata.io.FileChannelUtility=INFO -#log4j.logger.com.bigdata.util.concurrent=INFO log4j.logger.com.bigdata.btree=WARN -#log4j.logger.com.bigdata.cache.BCHMGlobalLRU2=TRACE +log4j.logger.com.bigdata.io.writecache.WriteCacheService=INFO #log4j.logger.com.bigdata.rdf.sail.bench.NanoSparqlServer=INFO log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR -log4j.logger.com.bigdata.service.ResourceService=INFO -log4j.logger.com.bigdata.service.ResourceService$ReadResourceTask=INFO - -#log4j.logger.com.bigdata.journal.AbstractBufferStrategy=TRACE - -#log4j.logger.com.bigdata.config=DEBUG - -#log4j.logger.com.bigdata.btree.BloomFilter=DEBUG -#log4j.logger.com.bigdata.btree.AbstractBTree=INFO -#log4j.logger.com.bigdata.btree.AbstractBTreeTupleCursor=DEBUG -#log4j.logger.com.bigdata.btree.IndexSegmentStore=DEBUG -#log4j.logger.com.bigdata.btree.IndexSegmentBuilder=INFO -#log4j.logger.com.bigdata.btree.TupleFilter=INFO -#log4j.logger.com.bigdata.btree.AbstractKeyArrayIndexProcedure=DEBUG -#log4j.logger.com.bigdata.btree.FusedTupleIterator=INFO -#log4j.logger.com.bigdata.btree.ResultSet=INFO -#log4j.logger.com.bigdata.btree.AbstractChunkedTupleIterator=DEBUG - -# detailed debugging for the canonical huffman coder -#log4j.logger.com.bigdata.btree.raba.codec.CanonicalHuffmanRabaCoder=DEBUG - -#log4j.logger.com.bigdata.journal=INFO -#log4j.logger.com.bigdata.journal.IJournal=INFO -log4j.logger.com.bigdata.journal.CompactTask=INFO -#log4j.logger.com.bigdata.cache=DEBUG -#log4j.logger.com.bigdata.resources=INFO -#log4j.logger.com.bigdata.journal.ConcurrencyManager=INFO -#log4j.logger.com.bigdata.journal.QueueLengthTask=INFO -#log4j.logger.com.bigdata.journal.Name2Addr=INFO -#log4j.logger.com.bigdata.journal.AbstractTask=INFO -#log4j.logger.com.bigdata.journal.WriteExecutorService=INFO -#log4j.logger.com.bigdata.service.AbstractTransactionService=INFO -#log4j.logger.com.bigdata.journal.AbstractLocalTransactionManager=INFO -log4j.logger.com.bigdata.concurrent.TxDag=WARN -log4j.logger.com.bigdata.concurrent.NonBlockingLockManager=WARN -log4j.logger.com.bigdata.concurrent.TestNonBlockingLockManager=INFO -log4j.logger.com.bigdata.concurrent.AbstractStressTestNonBlockingLockManager=INFO -#log4j.logger.com.bigdata.concurrent.LockManager=INFO -#log4j.logger.com.bigdata.concurrent.LockManagerTask=INFO -log4j.logger.com.bigdata.resources.SplitUtility=INFO -#log4j.logger.com.bigdata.resources.ResourceEvents=WARN -#log4j.logger.com.bigdata.resources.IndexManager=INFO -#log4j.logger.com.bigdata.resources.ResourceManager=INFO -#log4j.logger.com.bigdata.resources.DefaultSplitHandler=INFO -log4j.logger.com.bigdata.resources.OverflowManager=INFO -log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO -log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO -#log4j.logger.com.bigdata.resources.AbstractResourceManagerTask=INFO -#log4j.logger.com.bigdata.repo=INFO -#log4j.logger.com.bigdata.search=INFO -#log4j.logger.com.bigdata.search.ReadIndexTask=INFO -#log4j.logger.com.bigdata.search.TokenBuffer=DEBUG -#log4j.logger.com.bigdata.sparse=DEBUG -#log4j.logger.com.bigdata.service=INFO - -#log4j.logger.com.bigdata.service.ndx=INFO -#log4j.logger.com.bigdata.service.ndx.pipeline=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=DEBUG -#log4j.logger.com.bigdata.concurrent.Latch=DEBUG - -#log4j.logger.com.bigdata.relation=INFO -#log4j.logger.com.bigdata.relation.accesspath=INFO -#log4j.logger.com.bigdata.relation.locator=INFO -#log4j.logger.com.bigdata.relation.rule=INFO - -#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO -#log4j.logger.com.bigdata.relation.rule.eval=INFO -#log4j.logger.com.bigdata.relation.rule.eval.RuleState=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryEvaluator=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.DefaultEvaluationPlan=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.AbstractArrayBuffer=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=INFO - -#log4j.logger.com.bigdata.service.IBigdataFederation=DEBUG -#log4j.logger.com.bigdata.service.LoadBalancerService=INFO -#log4j.logger.com.bigdata.service.LoadBalancerService$UpdateTask=INFO -#log4j.logger.com.bigdata.service.DataService$StatusTask=INFO -#log4j.logger.com.bigdata.service.DataService$ReportTask=INFO -#log4j.logger.com.bigdata.service.DataService$StartPerformanceCounterCollectionTask=INFO -#log4j.logger.com.bigdata.service.ndx.ClientIndexView=INFO - -#log4j.logger.com.bigdata.service.mapReduce=INFO -#log4j.logger.com.bigdata.striterator=INFO -#log4j.logger.com.bigdata.counters=INFO -#log4j.logger.com.bigdata.counters.win=DEBUG -#log4j.logger.com.bigdata.counters.linux=INFO -log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR -log4j.logger.com.bigdata.counters.History=ERROR -log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO -#log4j.logger.com.bigdata.counters.store=INFO -#log4j.logger.com.bigdata.counters.httpd=INFO -#log4j.logger.com.bigdata.counters.query.CounterSetBTreeSelector=INFO -#log4j.logger.com.bigdata.counters.query.PivotTable=INFO -#log4j.logger.com.bigdata.counters.query=INFO -#log4j.logger.com.bigdata.counters.XMLUtility=INFO -#log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=INFO -#log4j.logger.com.bigdata.util.httpd.NanoHTTPD=INFO -#log4j.logger.com.bigdata.journal.TestConcurrentWritersOnNamedIndices=DEBUG -#log4j.logger.com.bigdata.concurrent=INFO - -#log4j.logger.com.bigdata.rdf.rules=INFO - -#log4j.logger.com.bigdata.rdf=INFO -#log4j.logger.com.bigdata.rdf.spo.DistinctTermScanner$DistinctTermScanTask=INFO -#log4j.logger.com.bigdata.rdf.store.ITripleStore=DEBUG -#log4j.logger.com.bigdata.rdf.rio=TRACE -#log4j.logger.com.bigdata.rdf.load=DEBUG -#log4j.logger.com.bigdata.rdf.rio.StatementBuffer=INFO -#log4j.logger.com.bigdata.rdf.rio.AbstractStatementBuffer=INFO -#log4j.logger.com.bigdata.rdf.rio.AsynchronousStatementBufferFactory=INFO -#log4j.logger.com.bigdata.rdf.lexicon.LexiconRelation=INFO -#log4j.logger.com.bigdata.rdf.lexicon.TermIdEncoder=DEBUG -#log4j.logger.com.bigdata.rdf.lexicon.Term2IdWriteProc=DEBUG -log4j.logger.com.bigdata.rdf.store.IndexWriteProc=WARN -log4j.logger.com.bigdata.rdf.store.AbstractTripleStore=INFO -#log4j.logger.com.bigdata.rdf.store.BigdataStatementIteratorImpl=DEBUG -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader=INFO -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader$WorkflowTask=INFO -#log4j.logger.com.bigdata.rdf.store.ConcurrentDataLoader$VerifyStatementBuffer=INFO log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -# Note: turns on ground truth testing (not scalable). -#log4j.logger.com.bigdata.rdf.store.Term2IdWriteProc=DEBUG -#log4j.logger.com.bigdata.rdf.spo.ISPOBuffer=DEBUG -log4j.logger.com.bigdata.rdf.spo.SPOIterator=WARN -#log4j.logger.com.bigdata.rdf.inf.TruthMaintenance=DEBUG -#log4j.logger.com.bigdata.rdf.inf.InferenceEngine=INFO -#log4j.logger.com.bigdata.rdf.inf.Justification=DEBUG -#log4j.logger.com.bigdata.rdf.inf.BackchainOwlSameAs2=DEBUG -#log4j.logger.com.bigdata.rdf.spo.DefaultGraphSolutionExpander=DEBUG -#log4j.logger.com.bigdata.relation.accesspath.IAccessPath=DEBUG - -#log4j.logger.com.bigdata.rdf.sail.BigdataSail=DEBUG -#log4j.logger.com.bigdata.rdf.sail.TestNamedGraphs=DEBUG -log4j.logger.com.bigdata.rdf.sail.QuadsTestCase=DEBUG -#log4j.logger.com.bigdata.relation.rule.eval.NestedSubqueryWithJoinThreadsTask=DEBUG - # Lehigh benchmark integration log4j.logger.edu.lehigh.swat.bench.ubt.bigdata=INFO @@ -167,29 +28,6 @@ #log4j.logger.com.bigdata.rdf.rio=INFO #log4j.logger.com.bigdata.rdf.rio=DEBUG -# Bulk data loader. -log4j.logger.com.bigdata.rdf.load=INFO -#log4j.logger.com.bigdata.rdf.load=DEBUG -#log4j.logger.com.bigdata.util.concurrent.Latch=INFO -#log4j.logger.com.bigdata.rdf.load.RDFDataLoadMaster=INFO -#log4j.logger.com.bigdata.rdf.load.ConcurrentDataLoader=INFO - -# Normal data loader (single threaded). -log4j.logger.com.bigdata.rdf.store.DataLoader=INFO - -# Test suite logger. -log4j.logger.junit=INFO -#log4j.logger.junit=DEBUG -log4j.logger.com.bigdata.btree.AbstractBTreeTestCase=INFO - -# dest1 -log4j.appender.dest1=org.apache.log4j.ConsoleAppender -log4j.appender.dest1.layout=org.apache.log4j.PatternLayout -log4j.appender.dest1.layout.ConversionPattern=%-5p: %r %l: %m%n -#log4j.appender.dest1.layout.ConversionPattern=%-5p: %m%n -#log4j.appender.dest1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n -#log4j.appender.dest1.layout.ConversionPattern=%-4r(%d) [%t] %-5p %c(%l:%M) %x - %m%n - # dest2 includes the thread name and elapsed milliseconds. # Note: %r is elapsed milliseconds. # Note: %t is the thread name. @@ -197,5 +35,3 @@ log4j.appender.dest2=org.apache.log4j.ConsoleAppender log4j.appender.dest2.layout=org.apache.log4j.PatternLayout log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n - -#log4j.logger.alex.LoadPdb=INFO This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-14 17:52:29
|
Revision: 3547 http://bigdata.svn.sourceforge.net/bigdata/?rev=3547&view=rev Author: sgossard Date: 2010-09-14 17:52:22 +0000 (Tue, 14 Sep 2010) Log Message: ----------- [maven_scaleout] : Breaking transitive dependency cycle between util, io, and counters packages. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentCheckpoint.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/FileMetadata.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RootBlockView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/BlobAllocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/FixedAllocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/RWStore.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRollbackCommit.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRootBlockView.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestChecksumUtility.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ChecksumUtility.java Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/ChecksumUtility.java Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentCheckpoint.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentCheckpoint.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -37,7 +37,7 @@ import com.bigdata.journal.RootBlockException; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.IAddressManager; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * The checkpoint record for an {@link IndexSegment}. Copied: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ChecksumUtility.java (from rev 3544, branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/ChecksumUtility.java) =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ChecksumUtility.java (rev 0) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ChecksumUtility.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -0,0 +1,160 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 5, 2006 + */ + +package com.bigdata.io; + +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.zip.Adler32; + +/** + * Utility class for computing the {@link Adler32} checksum of a buffer. This + * class is NOT thread-safe. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class ChecksumUtility { + + private static ThreadLocal threadChk = new ThreadLocal(); + /** + * static access to a ThreadLocal Checksum utility + * + * @return the ChecksumUtility + */ + public static ChecksumUtility getCHK() { + ChecksumUtility chk = (ChecksumUtility) threadChk.get(); + + if (chk == null) { + chk = new ChecksumUtility(); + threadChk.set(chk); + } + + return chk; + } + + /** + * Private helper object. + */ + private final Adler32 chk = new Adler32(); + + /** + * Compute the {@link Adler32} checksum of the buffer. The position, + * mark, and limit are unchanged by this operation. The operation is + * optimized when the buffer is backed by an array. + * + * @param buf + * The buffer. + * @param pos + * The starting position. + * @param limit + * The limit. + * + * @return The checksum. + */ + public int checksum(final ByteBuffer buf, final int pos, final int limit) { + + assert buf != null; + assert pos >= 0; + assert limit > pos; + + // reset before computing the checksum. + chk.reset(); + + if (buf.hasArray()) { + + /* + * Optimized when the buffer is backed by an array. + */ + + final byte[] bytes = buf.array(); + + final int len = limit - pos; + + if (pos > bytes.length - len) { + + throw new BufferUnderflowException(); + + } + + chk.update(bytes, pos + buf.arrayOffset(), len); + + } else { + + for (int i = pos; i < limit; i++) { + + chk.update(buf.get(i)); + + } + + } + + /* + * The Adler checksum is a 32-bit value. + */ + + return (int) chk.getValue(); + + } + + public int checksum(final IByteArraySlice slice) { + + assert slice != null; + + // reset before computing the checksum. + chk.reset(); + + chk.update(slice.array(), slice.off(), slice.len()); + + /* + * The Adler checksum is a 32-bit value. + */ + + return (int) chk.getValue(); + + } + + public int checksum(final byte[] buf, int sze) { + return checksum(buf, 0, sze); + } + + public int checksum(final byte[] buf, int off, int sze) { + + assert buf != null; + + // reset before computing the checksum. + chk.reset(); + + chk.update(buf, off, sze); + + /* + * The Adler checksum is a 32-bit value. + */ + + return (int) chk.getValue(); + } +} Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ChecksumUtility.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: svn:eol-style + native Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -41,6 +41,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import com.bigdata.io.ChecksumUtility; import org.apache.log4j.Logger; import com.bigdata.BigdataStatics; @@ -70,7 +71,6 @@ import com.bigdata.rawstore.WormAddressManager; import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.resources.ResourceManager; -import com.bigdata.util.ChecksumUtility; /** * <p> Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/FileMetadata.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/FileMetadata.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/FileMetadata.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -31,6 +31,7 @@ import java.nio.channels.FileChannel; import java.util.UUID; +import com.bigdata.io.ChecksumUtility; import org.apache.log4j.Logger; import com.bigdata.io.DirectBufferPool; @@ -38,7 +39,6 @@ import com.bigdata.io.IReopenChannel; import com.bigdata.rawstore.Bytes; import com.bigdata.rawstore.WormAddressManager; -import com.bigdata.util.ChecksumUtility; /** * Helper object used when opening or creating journal file in any of the Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -30,10 +30,10 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; -import java.nio.channels.ClosedByInterruptException; import java.nio.channels.FileChannel; import java.util.UUID; +import com.bigdata.io.ChecksumUtility; import org.apache.log4j.Logger; import com.bigdata.counters.CounterSet; @@ -41,7 +41,6 @@ import com.bigdata.rawstore.AbstractRawStore; import com.bigdata.rawstore.IAddressManager; import com.bigdata.rwstore.RWStore; -import com.bigdata.util.ChecksumUtility; /** * The hook that accesses the RWStore to provide read/write services as opposed Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RootBlockView.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RootBlockView.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RootBlockView.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -30,6 +30,7 @@ import java.nio.ByteBuffer; import java.util.UUID; +import com.bigdata.io.ChecksumUtility; import org.apache.log4j.Logger; import com.bigdata.btree.IndexMetadata; @@ -38,7 +39,6 @@ import com.bigdata.rawstore.WormAddressManager; import com.bigdata.resources.ResourceManager; import com.bigdata.rwstore.RWStore; -import com.bigdata.util.ChecksumUtility; /** * A view onto a root block of the {@link Journal}. Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -44,7 +44,7 @@ import com.bigdata.rawstore.IMRMW; import com.bigdata.rawstore.WormAddressManager; import com.bigdata.relation.locator.ILocatableResource; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * A non-restart-safe store for temporary data that buffers data in memory until Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/BlobAllocator.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -5,9 +5,8 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * BlobAllocator Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -27,10 +27,9 @@ import java.util.*; import java.io.*; +import com.bigdata.io.ChecksumUtility; import org.apache.log4j.Logger; -import com.bigdata.util.ChecksumUtility; - /** * FixedAllocator * Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/RWStore.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rwstore/RWStore.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -45,7 +45,7 @@ import com.bigdata.journal.FileMetadata; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RWStrategy.FileMetadataView; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * Storage class Deleted: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/ChecksumUtility.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/ChecksumUtility.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/ChecksumUtility.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -1,162 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Nov 5, 2006 - */ - -package com.bigdata.util; - -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.zip.Adler32; - -import com.bigdata.io.IByteArraySlice; - -/** - * Utility class for computing the {@link Adler32} checksum of a buffer. This - * class is NOT thread-safe. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class ChecksumUtility { - - private static ThreadLocal threadChk = new ThreadLocal(); - /** - * static access to a ThreadLocal Checksum utility - * - * @return the ChecksumUtility - */ - public static ChecksumUtility getCHK() { - ChecksumUtility chk = (ChecksumUtility) threadChk.get(); - - if (chk == null) { - chk = new ChecksumUtility(); - threadChk.set(chk); - } - - return chk; - } - - /** - * Private helper object. - */ - private final Adler32 chk = new Adler32(); - - /** - * Compute the {@link Adler32} checksum of the buffer. The position, - * mark, and limit are unchanged by this operation. The operation is - * optimized when the buffer is backed by an array. - * - * @param buf - * The buffer. - * @param pos - * The starting position. - * @param limit - * The limit. - * - * @return The checksum. - */ - public int checksum(final ByteBuffer buf, final int pos, final int limit) { - - assert buf != null; - assert pos >= 0; - assert limit > pos; - - // reset before computing the checksum. - chk.reset(); - - if (buf.hasArray()) { - - /* - * Optimized when the buffer is backed by an array. - */ - - final byte[] bytes = buf.array(); - - final int len = limit - pos; - - if (pos > bytes.length - len) { - - throw new BufferUnderflowException(); - - } - - chk.update(bytes, pos + buf.arrayOffset(), len); - - } else { - - for (int i = pos; i < limit; i++) { - - chk.update(buf.get(i)); - - } - - } - - /* - * The Adler checksum is a 32-bit value. - */ - - return (int) chk.getValue(); - - } - - public int checksum(final IByteArraySlice slice) { - - assert slice != null; - - // reset before computing the checksum. - chk.reset(); - - chk.update(slice.array(), slice.off(), slice.len()); - - /* - * The Adler checksum is a 32-bit value. - */ - - return (int) chk.getValue(); - - } - - public int checksum(final byte[] buf, int sze) { - return checksum(buf, 0, sze); - } - - public int checksum(final byte[] buf, int off, int sze) { - - assert buf != null; - - // reset before computing the checksum. - chk.reset(); - - chk.update(buf, off, sze); - - /* - * The Adler checksum is a 32-bit value. - */ - - return (int) chk.getValue(); - } -} Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -35,7 +35,7 @@ import com.bigdata.btree.IndexMetadata; import com.bigdata.cache.IGlobalLRU.ILRUCache; import com.bigdata.io.DirectBufferPool; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * Test the ability to abort (discard an uncommitted write set). This is a test Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRollbackCommit.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRollbackCommit.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRollbackCommit.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -29,7 +29,7 @@ import java.nio.ByteBuffer; -import com.bigdata.util.ChecksumUtility; +import com.bigdata.io.ChecksumUtility; /** * Test the ability to rollback a commit. Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRootBlockView.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRootBlockView.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestRootBlockView.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -31,11 +31,11 @@ import java.util.Random; import java.util.UUID; +import com.bigdata.io.ChecksumUtility; import junit.framework.TestCase2; import com.bigdata.rawstore.TestWormAddressManager; import com.bigdata.rawstore.WormAddressManager; -import com.bigdata.util.ChecksumUtility; import com.bigdata.util.MillisecondTimestampFactory; /** Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestChecksumUtility.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestChecksumUtility.java 2010-09-14 17:41:47 UTC (rev 3546) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestChecksumUtility.java 2010-09-14 17:52:22 UTC (rev 3547) @@ -31,6 +31,7 @@ import java.util.Random; import java.util.zip.Adler32; +import com.bigdata.io.ChecksumUtility; import junit.framework.TestCase; /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-14 17:41:54
|
Revision: 3546 http://bigdata.svn.sourceforge.net/bigdata/?rev=3546&view=rev Author: blevine218 Date: 2010-09-14 17:41:47 +0000 (Tue, 14 Sep 2010) Log Message: ----------- re-commit files. Looks like Eclipse got confused by a copy and paste operation. Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-14 17:41:47 UTC (rev 3546) @@ -0,0 +1,275 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Apr 22, 2007 + */ + +package com.bigdata.service.jini; + +import java.io.IOException; + +import junit.framework.AssertionFailedError; + +import net.jini.core.discovery.LookupLocator; +import net.jini.core.lookup.ServiceID; +import net.jini.core.lookup.ServiceRegistrar; +import net.jini.core.lookup.ServiceTemplate; + +import com.bigdata.journal.ITx; +import com.bigdata.mdi.IResourceMetadata; +import com.bigdata.mdi.LocalPartitionMetadata; +import com.bigdata.mdi.PartitionLocator; +import com.bigdata.service.DataService; +import com.bigdata.service.IDataService; +import com.bigdata.service.MetadataService; +import com.bigdata.test.util.Assert; +import com.sun.jini.tool.ClassServer; +import com.bigdata.util.config.NicUtil; + +/** + * Abstract base class for tests of remote services. + * <p> + * Note: jini MUST be running. You can get the jini starter kit and install it + * to get jini running. + * </p> + * <p> + * Note: You MUST specify a security policy that is sufficiently lax. + * </p> + * <p> + * Note: You MUST specify the codebase for downloadable code. + * </p> + * <p> + * Note: The <code>bigdata</code> JAR must be current in order for the client + * and the service to agree on interface definitions, etc. You can use + * <code>build.xml</code> in the root of this module to update that JAR. + * </p> + * <p> + * Note: A {@link ClassServer} will be started on port 8081 by default. If that + * port is in use then you MUST specify another port. + * </p> + * + * The following system properties will do the trick unless you have something + * running on port 8081. + * + * <pre> + * -Djava.security.policy=policy.all -Djava.rmi.server.codebase=http://localhost:8081 + * </pre> + * + * To use another port, try: + * + * <pre> + * -Djava.security.policy=policy.all -Dbigdata.test.port=8082 -Djava.rmi.server.codebase=http://localhost:8082 + * </pre> + * + * You can enable NIO using: + * <pre> + * -Dcom.sun.jini.jeri.tcp.useNIO=true + * </pre> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public abstract class AbstractServerTestCase { + + /** + * Equal to {@link ITx#UNISOLATED}. + */ + protected final long UNISOLATED = ITx.UNISOLATED; + + /** + * + */ + public AbstractServerTestCase() { + } + + /** + * Return the {@link ServiceID} of a server that we started ourselves. The + * method waits until the {@link ServiceID} becomes available on + * {@link AbstractServer#getServiceID()}. + * + * @exception AssertionFailedError + * If the {@link ServiceID} can not be found after a timeout. + * + * @exception InterruptedException + * if the thread is interrupted while it is waiting to retry. + */ + static public ServiceID getServiceID(final AbstractServer server) throws AssertionFailedError, InterruptedException { + + ServiceID serviceID = null; + + for(int i=0; i<10 && serviceID == null; i++) { + + /* + * Note: This can be null since the serviceID is not assigned + * synchronously by the registrar. + */ + + serviceID = server.getServiceID(); + + if(serviceID == null) { + + /* + * We wait a bit and retry until we have it or timeout. + */ + Thread.sleep(200); + } + } + + Assert.assertNotNull("serviceID",serviceID); + + /* + * Verify that we have discovered the _correct_ service. This is a + * potential problem when starting a stopping services for the test + * suite. + */ + Assert.assertEquals("serviceID", server.getServiceID(), serviceID); + + return serviceID; + } + + /** + * Lookup a {@link DataService} by its {@link ServiceID} using unicast + * discovery on localhost. + * + * @param serviceID + * The {@link ServiceID}. + * + * @return The service. + * + * @todo Modify to return the service item? + * + * @todo Modify to not be specific to {@link DataService} vs + * {@link MetadataService} (we need a common base interface for both + * that carries most of the functionality but allows us to make + * distinctions easily during discovery). + */ + public IDataService lookupDataService(ServiceID serviceID) + throws IOException, ClassNotFoundException, InterruptedException { + + /* + * Lookup the discover service (unicast on localhost). + */ + + // get the hostname. + String hostname = NicUtil.getIpAddress("default.nic", "default", true); + + // Find the service registrar (unicast protocol). + final int timeout = 4*1000; // seconds. + LookupLocator lookupLocator = new LookupLocator("jini://"+hostname); + ServiceRegistrar serviceRegistrar = lookupLocator.getRegistrar( timeout ); + + /* + * Prepare a template for lookup search. + * + * Note: The client needs a local copy of the interface in order to be + * able to invoke methods on the service without using reflection. The + * implementation class will be downloaded from the codebase identified + * by the server. + */ + ServiceTemplate template = new ServiceTemplate(// + /* + * use this to request the service by its serviceID. + */ + serviceID, + /* + * Use this to filter services by an interface that they expose. + */ +// new Class[] { IDataService.class }, + null, + /* + * use this to filter for services by Entry attributes. + */ + null); + + /* + * Lookup a service. This can fail if the service registrar has not + * finished processing the service registration. If it does, you can + * generally just retry the test and it will succeed. However this + * points out that the client may need to wait and retry a few times if + * you are starting everything up at once (or just register for + * notification events for the service if it is not found and enter a + * wait state). + */ + + IDataService service = null; + + for (int i = 0; i < 10 && service == null; i++) { + + service = (IDataService) serviceRegistrar.lookup(template /* , maxMatches */); + + if (service == null) { + System.err.println("Service not found: sleeping..."); + Thread.sleep(200); + } + } + + if (service != null) { + System.err.println("Service found."); + } + + return service; + } + + /** + * Compares two representations of the {@link PartitionLocator} + * without the left- and right-separator keys that bound the index + * partition. + * + * @param expected + * @param actual + */ + protected void assertEquals(PartitionLocator expected, PartitionLocator actual) { + Assert.assertEquals("partitionId", expected.getPartitionId(), actual.getPartitionId()); + Assert.assertEquals("dataServiceUUID", expected.getDataServiceUUID(), actual.getDataServiceUUID()); + } + + /** + * Compares two representations of the {@link LocalPartitionMetadata} for an + * index partition including the optional resource descriptions. + * + * @param expected + * @param actual + */ + protected void assertEquals(LocalPartitionMetadata expected, LocalPartitionMetadata actual) { + + Assert.assertEquals("partitionId",expected.getPartitionId(), actual.getPartitionId()); + Assert.assertEquals("leftSeparatorKey", expected.getLeftSeparatorKey(), ((LocalPartitionMetadata) actual).getLeftSeparatorKey()); + Assert.assertEquals("rightSeparatorKey", expected.getRightSeparatorKey(), ((LocalPartitionMetadata) actual).getRightSeparatorKey()); + + final IResourceMetadata[] expectedResources = expected.getResources(); + final IResourceMetadata[] actualResources = actual.getResources(); + + Assert.assertEquals("#resources",expectedResources.length,actualResources.length); + + for(int i=0;i<expected.getResources().length; i++) { + + // verify by components so that it is obvious what is wrong. + Assert.assertEquals("filename[" + i + "]", expectedResources[i].getFile(), actualResources[i].getFile()); + Assert.assertEquals("UUID[" + i + "]", expectedResources[i].getUUID(), actualResources[i].getUUID()); + + // verify by equals. + Assert.assertTrue("resourceMetadata",expectedResources[i].equals(actualResources[i])); + } + } +} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-14 17:41:47 UTC (rev 3546) @@ -0,0 +1,209 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Apr 23, 2007 + */ + +package com.bigdata.service.jini; + +import java.io.Serializable; +import java.util.Random; +import java.util.UUID; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.bigdata.btree.IIndex; +import com.bigdata.btree.ITuple; +import com.bigdata.btree.ITupleIterator; +import com.bigdata.btree.IndexMetadata; + +import com.bigdata.btree.proc.BatchInsert.BatchInsertConstructor; +import com.bigdata.journal.ITx; +import com.bigdata.service.DataService; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.service.IDataService; +import com.bigdata.service.jini.util.JiniServicesHelper; +import com.bigdata.test.util.Assert; +import com.bigdata.test.util.Util; + +/** + * Test suite for the {@link JiniClient}. + * <p> + * Note: The core test suite has already verified the basic semantics of the + * {@link IDataService} interface and partitioned indices so all we have to + * focus on here is the jini integration and verifying that the serialization + * imposed by RMI goes off without a hitch (e.g., that everything implements + * {@link Serializable} and that those {@link Serializable} implementations can + * correctly round trip the data). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestBigdataClient extends AbstractServerTestCase { + + protected boolean serviceImplRemote; + + public TestBigdataClient() { + this.serviceImplRemote = false; + } + + protected TestBigdataClient(boolean serviceImplRemote) { + this.serviceImplRemote = serviceImplRemote; + } + + /** + * Starts a {@link DataServer} ({@link #dataServer1}) and then a + * {@link MetadataServer} ({@link #metadataServer0}). Each runs in its own + * thread. + */ + @Before + public void setUp() throws Exception { + helper = new JiniServicesHelper(serviceImplRemote); + helper.start(); + } + + protected JiniServicesHelper helper = null; + + /** + * Destroy the test services. + */ + @After + public void tearDown() throws Exception { + if (helper != null) { + helper.destroy(); + } + } + + /** + * Test ability to registers a scale-out index on one of the + * {@link DataService}s. + * + * @throws Exception + */ + @Test + public void test_registerIndex1() throws Exception { + + final IBigdataFederation<?> fed = helper.client.connect(); + final String name = "testIndex"; + final IndexMetadata metadata = new IndexMetadata(name, UUID.randomUUID()); + + metadata.setDeleteMarkers(true); + fed.registerIndex(metadata); + + final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); + + Assert.assertEquals("indexUUID", metadata.getIndexUUID(), ndx.getIndexMetadata().getIndexUUID()); + + doBasicIndexTests(ndx); + } + + /** + * Test ability to registers a scale-out index on both of the + * {@link DataService}s. + * + * @throws Exception + */ + @Test + public void test_registerIndex2() throws Exception { + + final IBigdataFederation<?> fed = helper.client.connect(); + final String name = "testIndex"; + final IndexMetadata metadata = new IndexMetadata(name,UUID.randomUUID()); + + metadata.setDeleteMarkers(true); + + final UUID indexUUID = fed.registerIndex( metadata, // + // separator keys. + new byte[][] { + new byte[]{}, + Util.asSortKey(500) + },// + // data service assignments. + new UUID[] { // + helper.getDataService0().getServiceUUID(),// + helper.getDataService1().getServiceUUID() // + }); + + final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); + + Assert.assertEquals("indexUUID", indexUUID, ndx.getIndexMetadata().getIndexUUID()); + + // verify partition 0 on dataService0 + Assert.assertNotNull(helper.getDataService0().getIndexMetadata(DataService.getIndexPartitionName(name, 0), ITx.UNISOLATED)); + + // verify partition 1 on dataService1 + Assert.assertNotNull(helper.getDataService1().getIndexMetadata(DataService.getIndexPartitionName(name, 1), ITx.UNISOLATED)); + + doBasicIndexTests(ndx); + } + + /** + * Test helper reads and writes some data on the index in order to verify + * that these operations can be performed without serialization errors + * arising from the RPC calls. + * + * @param ndx + */ + protected void doBasicIndexTests(final IIndex ndx) { + + final int limit = 1000; + + final byte[][] keys = new byte[limit][]; + final byte[][] vals = new byte[limit][]; + + final Random r = new Random(); + + for (int i = 0; i < limit; i++) { + keys[i] = Util.asSortKey(i); + final byte[] val = new byte[10]; + r.nextBytes(val); + vals[i] = val; + } + + // batch insert. + ndx.submit(0/* fromIndex */, limit/* toIndex */, keys, vals, BatchInsertConstructor.RETURN_NO_VALUES, null); + + // verify #of index entries. + Assert.assertEquals(limit, ndx.rangeCount(null, null)); + + // verify data. + { + + final ITupleIterator<?> itr = ndx.rangeIterator(null, null); + + int i = 0; + + while (itr.hasNext()) { + final ITuple<?> tuple = itr.next(); + Assert.assertEquals(keys[i], tuple.getKey()); + Assert.assertEquals(vals[i], tuple.getValue()); + i++; + } + + Assert.assertEquals(limit, i); + } + } +} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-14 17:41:47 UTC (rev 3546) @@ -0,0 +1,35 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.service.jini; + +/** + * Test suite for the {@link JiniClient} using the purely remote + * service implementations. + */ +public class TestBigdataClientRemote extends TestBigdataClient { + public TestBigdataClientRemote() { + super(true); + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-14 17:39:53
|
Revision: 3545 http://bigdata.svn.sourceforge.net/bigdata/?rev=3545&view=rev Author: blevine218 Date: 2010-09-14 17:39:46 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Move certain dependencies on extensions to Assert into new com.bigdata.test.util.Assert class. Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java Removed Paths: ------------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-14 17:19:56 UTC (rev 3544) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-14 17:39:46 UTC (rev 3545) @@ -45,7 +45,7 @@ In the ANT script, hostname is obtained by an exec of the 'hostname' command. Hard-coding for now. --> - <hostname>blevine-laptop</hostname> + <hostname>blevine-desktop</hostname> <test.codebase>http://${hostname}:${test.codebase.port}/jsk-dl.jar</test.codebase> <!-- Not used??? --> <federation.name>bigdata.test.group-${hostname}</federation.name> </properties> @@ -206,7 +206,6 @@ <groupId>com.bigdata</groupId> <artifactId>bigdata-core</artifactId> <version>${project.version}</version> - </dependency> <dependency> <groupId>junit</groupId> Deleted: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-14 17:19:56 UTC (rev 3544) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-14 17:39:46 UTC (rev 3545) @@ -1,286 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* - * Created on Apr 22, 2007 - */ - -package com.bigdata.service.jini; - -import java.io.IOException; - -import junit.framework.Assert; -import junit.framework.AssertionFailedError; - -import net.jini.core.discovery.LookupLocator; -import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceRegistrar; -import net.jini.core.lookup.ServiceTemplate; - -import com.bigdata.journal.ITx; -import com.bigdata.mdi.IResourceMetadata; -import com.bigdata.mdi.LocalPartitionMetadata; -import com.bigdata.mdi.PartitionLocator; -import com.bigdata.service.DataService; -import com.bigdata.service.IDataService; -import com.bigdata.service.MetadataService; -import com.sun.jini.tool.ClassServer; -import com.bigdata.util.config.NicUtil; - -/** - * Abstract base class for tests of remote services. - * <p> - * Note: jini MUST be running. You can get the jini starter kit and install it - * to get jini running. - * </p> - * <p> - * Note: You MUST specify a security policy that is sufficiently lax. - * </p> - * <p> - * Note: You MUST specify the codebase for downloadable code. - * </p> - * <p> - * Note: The <code>bigdata</code> JAR must be current in order for the client - * and the service to agree on interface definitions, etc. You can use - * <code>build.xml</code> in the root of this module to update that JAR. - * </p> - * <p> - * Note: A {@link ClassServer} will be started on port 8081 by default. If that - * port is in use then you MUST specify another port. - * </p> - * - * The following system properties will do the trick unless you have something - * running on port 8081. - * - * <pre> - * -Djava.security.policy=policy.all -Djava.rmi.server.codebase=http://localhost:8081 - * </pre> - * - * To use another port, try: - * - * <pre> - * -Djava.security.policy=policy.all -Dbigdata.test.port=8082 -Djava.rmi.server.codebase=http://localhost:8082 - * </pre> - * - * You can enable NIO using: - * - * <pre> - * -Dcom.sun.jini.jeri.tcp.useNIO=true - * </pre> - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public abstract class AbstractServerTestCase -{ - - /** - * Equal to {@link ITx#UNISOLATED}. - */ - protected final long UNISOLATED = ITx.UNISOLATED; - - /** - * - */ - public AbstractServerTestCase() - { - } - - /** - * Return the {@link ServiceID} of a server that we started ourselves. The - * method waits until the {@link ServiceID} becomes available on - * {@link AbstractServer#getServiceID()}. - * - * @exception AssertionFailedError - * If the {@link ServiceID} can not be found after a timeout. - * - * @exception InterruptedException - * if the thread is interrupted while it is waiting to retry. - */ - static public ServiceID getServiceID(final AbstractServer server) throws AssertionFailedError, InterruptedException - { - ServiceID serviceID = null; - - for (int i = 0; i < 10 && serviceID == null; i++) - { - /* - * Note: This can be null since the serviceID is not assigned - * synchronously by the registrar. - */ - serviceID = server.getServiceID(); - - if (serviceID == null) - { - /* - * We wait a bit and retry until we have it or timeout. - */ - Thread.sleep(200); - } - } - - Assert.assertNotNull("serviceID", serviceID); - - /* - * Verify that we have discovered the _correct_ service. This is a - * potential problem when starting a stopping services for the test - * suite. - */ - Assert.assertEquals("serviceID", server.getServiceID(), serviceID); - - return serviceID; - - } - - /** - * Lookup a {@link DataService} by its {@link ServiceID} using unicast - * discovery on localhost. - * - * @param serviceID - * The {@link ServiceID}. - * - * @return The service. - * - * @todo Modify to return the service item? - * - * @todo Modify to not be specific to {@link DataService} vs - * {@link MetadataService} (we need a common base interface for both - * that carries most of the functionality but allows us to make - * distinctions easily during discovery). - */ - public IDataService lookupDataService(ServiceID serviceID) throws IOException, ClassNotFoundException, InterruptedException - { - /* - * Lookup the discover service (unicast on localhost). - */ - - // get the hostname. - String hostname = NicUtil.getIpAddress("default.nic", "default", true); - - // Find the service registrar (unicast protocol). - final int timeout = 4 * 1000; // seconds. - LookupLocator lookupLocator = new LookupLocator("jini://" + hostname); - ServiceRegistrar serviceRegistrar = lookupLocator.getRegistrar(timeout); - - /* - * Prepare a template for lookup search. - * - * Note: The client needs a local copy of the interface in order to be - * able to invoke methods on the service without using reflection. The - * implementation class will be downloaded from the codebase identified - * by the server. - */ - ServiceTemplate template = new ServiceTemplate(// - /* - * use this to request the service by its serviceID. - */ - serviceID, - /* - * Use this to filter services by an interface that they - * expose. - */ - // new Class[] { IDataService.class }, - null, - /* - * use this to filter for services by Entry attributes. - */ - null); - - /* - * Lookup a service. This can fail if the service registrar has not - * finished processing the service registration. If it does, you can - * generally just retry the test and it will succeed. However this - * points out that the client may need to wait and retry a few times if - * you are starting everything up at once (or just register for - * notification events for the service if it is not found and enter a - * wait state). - */ - - IDataService service = null; - - for (int i = 0; i < 10 && service == null; i++) - { - service = (IDataService) serviceRegistrar.lookup(template); - - if (service == null) - { - System.err.println("Service not found: sleeping..."); - Thread.sleep(200); - } - - } - - if (service != null) - { - System.err.println("Service found."); - } - - return service; - } - - /** - * Compares two representations of the {@link PartitionLocator} without the - * left- and right-separator keys that bound the index partition. - * - * @param expected - * @param actual - */ - protected void assertEquals(PartitionLocator expected, PartitionLocator actual) - { - Assert.assertEquals("partitionId", expected.getPartitionId(), actual.getPartitionId()); - Assert.assertEquals("dataServiceUUID", expected.getDataServiceUUID(), actual.getDataServiceUUID()); - } - - /** - * Compares two representations of the {@link LocalPartitionMetadata} for an - * index partition including the optional resource descriptions. - * - * @param expected - * @param actual - */ - protected void assertEquals(LocalPartitionMetadata expected, LocalPartitionMetadata actual) - { - Assert.assertEquals("partitionId", expected.getPartitionId(), actual.getPartitionId()); - Assert.assertEquals("leftSeparatorKey", expected.getLeftSeparatorKey(), ((LocalPartitionMetadata) actual).getLeftSeparatorKey()); - Assert.assertEquals("rightSeparatorKey", expected.getRightSeparatorKey(), ((LocalPartitionMetadata) actual).getRightSeparatorKey()); - - final IResourceMetadata[] expectedResources = expected.getResources(); - final IResourceMetadata[] actualResources = actual.getResources(); - - Assert.assertEquals("#resources", expectedResources.length, actualResources.length); - - for (int i = 0; i < expected.getResources().length; i++) - { - // verify by components so that it is obvious what is wrong. - - Assert.assertEquals("filename[" + i + "]", expectedResources[i].getFile(), actualResources[i].getFile()); - - // assertEquals("size[" + i + "]", expectedResources[i].size(), - // actualResources[i].size()); - - Assert.assertEquals("UUID[" + i + "]", expectedResources[i].getUUID(), actualResources[i].getUUID()); - - // verify by equals. - Assert.assertTrue("resourceMetadata", expectedResources[i].equals(actualResources[i])); - } - } -} Deleted: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-14 17:19:56 UTC (rev 3544) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-14 17:39:46 UTC (rev 3545) @@ -1,208 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Apr 23, 2007 - */ - -package com.bigdata.service.jini; - -import java.io.Serializable; -import java.util.Random; -import java.util.UUID; - -import junit.framework.Assert; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.bigdata.btree.IIndex; -import com.bigdata.btree.ITuple; -import com.bigdata.btree.ITupleIterator; -import com.bigdata.btree.IndexMetadata; - -import com.bigdata.btree.proc.BatchInsert.BatchInsertConstructor; -import com.bigdata.journal.ITx; -import com.bigdata.service.DataService; -import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.IDataService; -import com.bigdata.service.jini.util.JiniServicesHelper; -import com.bigdata.test.util.Util; - -/** - * Test suite for the {@link JiniClient}. - * <p> - * Note: The core test suite has already verified the basic semantics of the - * {@link IDataService} interface and partitioned indices so all we have to - * focus on here is the jini integration and verifying that the serialization - * imposed by RMI goes off without a hitch (e.g., that everything implements - * {@link Serializable} and that those {@link Serializable} implementations can - * correctly round trip the data). - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class TestBigdataClient extends AbstractServerTestCase { - - protected boolean serviceImplRemote; - - public TestBigdataClient() { - this.serviceImplRemote = false; - } - - public TestBigdataClient(boolean serviceImplRemote) { - this.serviceImplRemote = serviceImplRemote; - } - - - /** - * Starts a {@link DataServer} ({@link #dataServer1}) and then a - * {@link MetadataServer} ({@link #metadataServer0}). Each runs in its own - * thread. - */ - @Before - public void setUp() throws Exception { - helper = new JiniServicesHelper(serviceImplRemote); - helper.start(); - } - - protected JiniServicesHelper helper = null; - - /** - * Destroy the test services. - */ - @After - public void tearDown() throws Exception { - if (helper != null) { - helper.destroy(); - } - } - - /** - * Test ability to registers a scale-out index on one of the - * {@link DataService}s. - * - * @throws Exception - */ - @Test - public void test_registerIndex1() throws Exception { - final IBigdataFederation<?> fed = helper.client.connect(); - final String name = "testIndex"; - final IndexMetadata metadata = new IndexMetadata(name, UUID.randomUUID()); - - metadata.setDeleteMarkers(true); - fed.registerIndex(metadata); - final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); - - Assert.assertEquals("indexUUID", metadata.getIndexUUID(), ndx.getIndexMetadata().getIndexUUID()); - - doBasicIndexTests(ndx); - } - - /** - * Test ability to registers a scale-out index on both of the - * {@link DataService}s. - * - * @throws Exception - */ - @Test - public void test_registerIndex2() throws Exception { - final IBigdataFederation<?> fed = helper.client.connect(); - final String name = "testIndex"; - final IndexMetadata metadata = new IndexMetadata(name,UUID.randomUUID()); - - metadata.setDeleteMarkers(true); - - final UUID indexUUID = fed.registerIndex( metadata, // - // separator keys. - new byte[][] { - new byte[]{}, - Util.asSortKey(500) - },// - // data service assignments. - new UUID[] { // - helper.getDataService0().getServiceUUID(),// - helper.getDataService1().getServiceUUID() // - }); - - final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); - - Assert.assertEquals("indexUUID", indexUUID, ndx.getIndexMetadata().getIndexUUID()); - - // verify partition 0 on dataService0 - Assert.assertNotNull(helper.getDataService0().getIndexMetadata(DataService.getIndexPartitionName(name, 0), ITx.UNISOLATED)); - - // verify partition 1 on dataService1 - Assert.assertNotNull(helper.getDataService1().getIndexMetadata(DataService.getIndexPartitionName(name, 1), ITx.UNISOLATED)); - - doBasicIndexTests(ndx); - } - - /** - * Test helper reads and writes some data on the index in order to verify - * that these operations can be performed without serialization errors - * arising from the RPC calls. - * - * @param ndx - */ - protected void doBasicIndexTests(final IIndex ndx) { - - final int limit = 1000; - - final byte[][] keys = new byte[limit][]; - final byte[][] vals = new byte[limit][]; - - final Random r = new Random(); - - for (int i = 0; i < limit; i++) { - keys[i] = Util.asSortKey(i); - final byte[] val = new byte[10]; - r.nextBytes(val); - vals[i] = val; - } - - // batch insert. - ndx.submit(0/* fromIndex */, limit/* toIndex */, keys, vals, BatchInsertConstructor.RETURN_NO_VALUES, null); - - // verify #of index entries. - Assert.assertEquals(limit, ndx.rangeCount(null, null)); - - // verify data. - { - final ITupleIterator<?> itr = ndx.rangeIterator(null, null); - - int i = 0; - - while (itr.hasNext()) { - final ITuple<?> tuple = itr.next(); - - Assert.assertEquals(keys[i], tuple.getKey()); - Assert.assertEquals(vals[i], tuple.getValue()); - i++; - } - - Assert.assertEquals(limit, i); - } - } -} Deleted: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-14 17:19:56 UTC (rev 3544) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-14 17:39:46 UTC (rev 3545) @@ -1,36 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ - -package com.bigdata.service.jini; - -/** - * Test suite for the {@link JiniClient} using the purely remote - * service implementations. - */ -public class TestBigdataClientRemote extends TestBigdataClient { - - public TestBigdataClientRemote() { - super(true); - } -} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java 2010-09-14 17:39:46 UTC (rev 3545) @@ -0,0 +1,82 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.test.util; + + +/** + * Utility class that extends JUnit's Assert class with additional utilities. + * + * @author blevine + * + */ +public class Assert extends org.junit.Assert +{ + public static void assertEquals(byte[] expected, byte[] actual) + { + assertEquals(null, expected, actual); + } + public static void assertEquals(String message, byte[] expected, byte[] actual) + { + if (expected == null && actual == null) + { + return; + } + + if ( (expected == null) && (actual != null) ) + { + assertNull(message, actual); + } + + if ( (expected != null) && (actual == null) ) + { + assertNotNull(message, actual); + } + + if (expected.length != actual.length) + { + String msg = "(array lengths do not match)."; + + if (message != null) + { + msg = message + " " + msg; + } + + fail(msg); + } + + for (int i = 0; i < expected.length; i++) + { + if (expected[i] != actual[i]) + { + String msg = "(index = i)."; + + if (message != null) + { + msg = message + " " + msg; + } + assertEquals(msg, expected[i], actual[i]); + } + } + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-14 17:20:05
|
Revision: 3544 http://bigdata.svn.sourceforge.net/bigdata/?rev=3544&view=rev Author: sgossard Date: 2010-09-14 17:19:56 +0000 (Tue, 14 Sep 2010) Log Message: ----------- [merge trunk --> maven_scaleout] : Merge of ^/trunk , revisions 3476:3542. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/SingleResourceReaderTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/BasicRioLoader.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/IRioLoader.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/PresortRioLoader.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/store/DataLoader.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/Splitter.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/config/NicUtil.java branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/rio/AbstractRIOTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/rio/EDSAsyncLoader.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java Property Changed: ---------------- branches/maven_scaleout/ branches/maven_scaleout/bigdata-core/ branches/maven_scaleout/bigdata-core/bigdata-perf/ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/answers (U1)/ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/config/ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/logging/ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/scripts/ branches/maven_scaleout/bigdata-core/dsi-utils/LEGAL/ branches/maven_scaleout/bigdata-core/dsi-utils/lib/ branches/maven_scaleout/bigdata-core/dsi-utils/src/ branches/maven_scaleout/bigdata-core/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/maven_scaleout/bigdata-core/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/maven_scaleout/bigdata-core/osgi/ branches/maven_scaleout/bigdata-core/src/main/deploy/bin/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties branches/maven_scaleout/bigdata-core/src/main/java/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/ branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config branches/maven_scaleout/bigdata-core/src/test/java/ Property changes on: branches/maven_scaleout ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463,3469-3470 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463,3469-3470 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core ___________________________________________________________________ Added: svn:mergeinfo + /trunk:3499 Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-perf:3379-3430 + /trunk/bigdata-perf:3379-3541 Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/java/edu/lehigh/swat/bench/ubt/Test.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -85,7 +85,7 @@ hostname = NicUtil.getIpAddress("default.nic", "default", false); } catch(Throwable t) {//for now, maintain same failure logic as used previously t.printStackTrace(); - s = NicUtil.getIpAddressByLocalHost(); + hostname = NicUtil.getIpAddressByLocalHost(); } QUERY_TEST_RESULT_FILE = hostname + "-result.txt"; } else { Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources ___________________________________________________________________ Modified: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources:2574-3440 /trunk/bigdata-perf/lubm/src/resources:3379-3430 + /branches/dev-btm/bigdata-perf/lubm/src/resources:2574-3440 /trunk/bigdata-perf/lubm/src/resources:3379-3541 Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/answers (U1) ___________________________________________________________________ Deleted: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources/answers (U1):2574-3440 Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/config ___________________________________________________________________ Deleted: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources/config:2574-3440 Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/logging ___________________________________________________________________ Deleted: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources/logging:2574-3440 Property changes on: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources/scripts ___________________________________________________________________ Deleted: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources/scripts:2574-3440 Property changes on: branches/maven_scaleout/bigdata-core/dsi-utils/LEGAL ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/LEGAL:3379-3430 + /trunk/dsi-utils/LEGAL:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/dsi-utils/lib ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/lib:3379-3430 + /trunk/dsi-utils/lib:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/dsi-utils/src ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/src:3379-3430 + /trunk/dsi-utils/src:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom:3379-3430 + /trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom:3379-3430 + /trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/osgi ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/osgi:3379-3430 + /trunk/osgi:3379-3430,3499 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/bin ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini ___________________________________________________________________ Added: svn:mergeinfo + /trunk/src/main/deploy/var/config/jini:3499 /trunk/src/resources/config:3516-3528 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-14 17:19:56 UTC (rev 3544) @@ -768,10 +768,11 @@ * have for your applications! */ "-Xmx1600m",// was 800 - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS but is less necessary for other bigdata services. + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 + "-Xms800m", */ - "-Xms800m", // 1/2 of the max heap is a good value. /* * This option will keep the JVM "alive" even when it is memory starved * but perform of a memory starved JVM is terrible. Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/src/resources/config/bigdataCluster.config:3516-3528 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-14 17:19:56 UTC (rev 3544) @@ -814,12 +814,11 @@ * http://blogs.msdn.com/ntdebugging/archive/2009/02/06/microsoft-windows-dynamic-cache-service.aspx */ "-Xmx9G", // Note: out of 32 available! - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS, but is less necessary for other bigdata services. If the machine is - * dedicated to the DataService then use the maximum heap. Otherwise 1/2 of - * the maximum heap is a good value. - */ + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 "-Xms9G", + */ /* * FIXME This might not be required, so that should be tested. * However, you don't want the JVM to just die if it is being @@ -1299,11 +1298,11 @@ static private namespace = "U"+univNum+""; // minimum #of data services to run. - static private minDataServices = bigdata.dataServiceCount; +// static private minDataServices = bigdata.dataServiceCount; // unused // How long the master will wait to discover the minimum #of data // services that you specified (ms). - static private awaitDataServicesTimeout = 8000; +// static private awaitDataServicesTimeout = 8000; // unused. /* Multiplier for the scatter effect. */ Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-14 17:19:56 UTC (rev 3544) @@ -783,10 +783,11 @@ * have for your applications! */ "-Xmx4g",// was 800 - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS but is less necessary for other bigdata services. + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 + "-Xms2G", */ - "-Xms2G", // 1/2 of the max heap is a good value. /* * This option will keep the JVM "alive" even when it is memory starved * but perform of a memory starved JVM is terrible. Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/boot:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/boot/config:3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/boot:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3438 /trunk/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/boot:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/boot/config:3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/boot:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3438 /trunk/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463,3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463,3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/main/java ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java:3463,3469-3470 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java:3463,3469-3470 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata/src/java:3507 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/bigdata-rdf/src/java:3542 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -273,7 +273,18 @@ // // /** {@value #DEFAULT_MAX_TRIES} */ // int DEFAULT_MAX_TRIES = 3; - + + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + String DEFAULT_GRAPH = "defaultGraph" ; + + /** + * TODO Should we always enforce a real value? i.e. provide a real default + * or abort the load. + */ + String DEFAULT_DEFAULT_GRAPH = null ; } /** @@ -403,6 +414,12 @@ private transient RDFFormat rdfFormat; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + public final String defaultGraph ; + + /** * Force the load of the NxParser integration class and its registration * of the NQuadsParser#nquads RDFFormat. * @@ -497,6 +514,8 @@ sb.append(", " + ConfigurationOptions.RDF_FORMAT + "=" + rdfFormat); + sb.append(", " + ConfigurationOptions.DEFAULT_GRAPH + "=" + defaultGraph) ; + sb.append(", " + ConfigurationOptions.FORCE_OVERFLOW_BEFORE_CLOSURE + "=" + forceOverflowBeforeClosure); @@ -602,6 +621,10 @@ } + defaultGraph = (String) config.getEntry(component, + ConfigurationOptions.DEFAULT_GRAPH, String.class, + ConfigurationOptions.DEFAULT_DEFAULT_GRAPH); + rejectedExecutionDelay = (Long) config.getEntry( component, ConfigurationOptions.REJECTED_EXECUTION_DELAY, Long.TYPE, @@ -983,6 +1006,7 @@ jobState.ontology,//file jobState.ontology.getPath(),//baseURI jobState.getRDFFormat(),// + jobState.defaultGraph, jobState.ontologyFileFilter // ); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -215,6 +215,7 @@ jobState.valuesInitialCapacity,// jobState.bnodesInitialCapacity,// jobState.getRDFFormat(), // + jobState.defaultGraph, parserOptions,// false, // deleteAfter is handled by the master! jobState.parserPoolSize, // Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/SingleResourceReaderTask.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -186,7 +186,7 @@ // run the parser. // @todo reuse the same underlying parser instance? - loader.loadRdf(reader, baseURL, rdfFormat, parserOptions); + loader.loadRdf(reader, baseURL, rdfFormat, null, parserOptions); success = true; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -356,8 +356,14 @@ * The default {@link RDFFormat}. */ private final RDFFormat defaultFormat; - + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private final String defaultGraph; + + /** * Options for the {@link RDFParser}. */ private final RDFParserOptions parserOptions; @@ -1423,7 +1429,7 @@ try { // run the parser. new PresortRioLoader(buffer).loadRdf(reader, baseURL, - rdfFormat, parserOptions); + rdfFormat, defaultGraph, parserOptions); } finally { reader.close(); } @@ -1490,6 +1496,9 @@ * {@link BNode}s parsed from a single document. * @param defaultFormat * The default {@link RDFFormat} which will be assumed. + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param parserOptions * Options for the {@link RDFParser}. * @param deleteAfter @@ -1529,6 +1538,7 @@ final int valuesInitialCapacity,// final int bnodesInitialCapacity, // final RDFFormat defaultFormat,// + final String defaultGraph,// final RDFParserOptions parserOptions,// final boolean deleteAfter,// final int parserPoolSize,// @@ -1566,6 +1576,8 @@ this.defaultFormat = defaultFormat; + this.defaultGraph = defaultGraph; + this.parserOptions = parserOptions; this.deleteAfter = deleteAfter; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/BasicRioLoader.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -37,6 +37,8 @@ import org.openrdf.rio.RDFParser; import org.openrdf.rio.Rio; +import com.bigdata.rdf.model.BigdataURI; + /** * Parses data but does not load it into the indices. * @@ -74,6 +76,8 @@ private final ValueFactory valueFactory; + protected String defaultGraph; + public BasicRioLoader(final ValueFactory valueFactory) { if (valueFactory == null) @@ -153,18 +157,20 @@ } final public void loadRdf(final InputStream is, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(is, baseURI, rdfFormat, options); + loadRdf2(is, baseURI, rdfFormat, defaultGraph, options); } final public void loadRdf(final Reader reader, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(reader, baseURI, rdfFormat, options); + loadRdf2(reader, baseURI, rdfFormat, defaultGraph, options); } @@ -180,7 +186,7 @@ * @throws Exception */ protected void loadRdf2(final Object source, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, final RDFParserOptions options) throws Exception { if (source == null) @@ -198,6 +204,8 @@ if (log.isInfoEnabled()) log.info("format=" + rdfFormat + ", options=" + options); + this.defaultGraph = defaultGraph ; + final RDFParser parser = getParser(rdfFormat); // apply options to the parser @@ -212,7 +220,7 @@ // Note: reset so that rates are correct for each source loaded. stmtsAdded = 0; - + try { before(); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/IRioLoader.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -72,12 +72,14 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(Reader reader, String baseURL, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; /** * Parse RDF data. @@ -88,11 +90,13 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(InputStream is, String baseURI, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/PresortRioLoader.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -23,11 +23,14 @@ */ package com.bigdata.rdf.rio; +import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.Value; import org.openrdf.rio.RDFHandler; import org.openrdf.rio.RDFHandlerException; +import com.bigdata.rdf.model.BigdataURI; + /** * Statement handler for the RIO RDF Parser that writes on a * {@link StatementBuffer}. @@ -45,6 +48,12 @@ final protected IStatementBuffer<?> buffer; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private BigdataURI defaultGraphURI = null ; + + /** * Sets up parser to load RDF. * * @param buffer @@ -58,7 +67,7 @@ this.buffer = buffer; } - + /** * bulk insert the buffered data into the store. */ @@ -87,8 +96,11 @@ public RDFHandler newRDFHandler() { + defaultGraphURI = null != defaultGraph && 4 == buffer.getDatabase ().getSPOKeyArity () + ? buffer.getDatabase ().getValueFactory ().createURI ( defaultGraph ) + : null + ; return this; - } public void handleStatement( final Statement stmt ) { @@ -98,9 +110,13 @@ log.debug(stmt); } - + + Resource graph = stmt.getContext() ; + if ( null == graph + && null != defaultGraphURI ) // only true when we know we are loading a quad store + graph = defaultGraphURI ; // buffer the write (handles overflow). - buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext() ); + buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), graph ); stmtsAdded++; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -642,7 +642,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, reader, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, reader, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -670,7 +670,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/* endOfBatch */); + loadData3(totals, is, baseURL, rdfFormat, null, true/* endOfBatch */); return totals; @@ -706,7 +706,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, is, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -763,8 +763,8 @@ if(file.exists()) { - loadFiles(totals, 0/* depth */, file.toURI().toURL(), - baseURL, rdfFormat, filter, endOfBatch); + loadFiles(totals, 0/* depth */, file.toURI().toURL(), baseURL, + rdfFormat, null, filter, endOfBatch); return; @@ -792,7 +792,7 @@ try { - loadData3(totals, reader, baseURL, rdfFormat, endOfBatch); + loadData3(totals, reader, baseURL, rdfFormat, null, endOfBatch); } catch (Exception ex) { @@ -818,6 +818,9 @@ * The format of the file (optional, when not specified the * format is deduced for each file in turn using the * {@link RDFFormat} static methods). + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param filter * A filter selecting the file names that will be loaded * (optional). When specified, the filter MUST accept directories @@ -828,7 +831,8 @@ * @throws IOException */ public LoadStats loadFiles(final URL url, final String baseURI, - final RDFFormat rdfFormat, final FilenameFilter filter) + final RDFFormat rdfFormat, final String defaultGraph, + final FilenameFilter filter) throws IOException { if (url == null) @@ -836,8 +840,8 @@ final LoadStats totals = new LoadStats(); - loadFiles(totals, 0/* depth */, url, baseURI, - rdfFormat, filter, true/* endOfBatch */); + loadFiles(totals, 0/* depth */, url, baseURI, rdfFormat, defaultGraph, filter, true/* endOfBatch */ + ); return totals; @@ -845,7 +849,8 @@ protected void loadFiles(final LoadStats totals, final int depth, final URL url, final String baseURI, final RDFFormat rdfFormat, - final FilenameFilter filter, final boolean endOfBatch) + final String defaultGraph, final FilenameFilter filter, + final boolean endOfBatch) throws IOException { // Legacy behavior - allow local files and directories for now, @@ -870,8 +875,10 @@ final File f = files[i]; - loadFiles(totals, depth + 1, f.toURI().toURL(), baseURI, - rdfFormat, filter, +// final RDFFormat fmt = RDFFormat.forFileName(f.toString(), +// rdfFormat); + + loadFiles(totals, depth + 1, f.toURI().toURL(), baseURI, rdfFormat, defaultGraph, filter, (depth == 0 && i < files.length ? false : endOfBatch)); } @@ -915,7 +922,7 @@ final String s = baseURI != null ? baseURI : url.toURI() .toString(); - loadData3(totals, reader, s, rdfFormat, endOfBatch); + loadData3(totals, reader, s, rdfFormat, defaultGraph, endOfBatch); return; @@ -947,7 +954,7 @@ */ protected void loadData3(final LoadStats totals, final Object source, final String baseURL, final RDFFormat rdfFormat, - final boolean endOfBatch) throws IOException { + final String defaultGraph, final boolean endOfBatch) throws IOException { final long begin = System.currentTimeMillis(); @@ -970,11 +977,10 @@ } // Setup the loader. - final PresortRioLoader loader = new PresortRioLoader(buffer); + final PresortRioLoader loader = new PresortRioLoader ( buffer ) ; // @todo review: disable auto-flush - caller will handle flush of the buffer. // loader.setFlush(false); - // add listener to log progress. loader.addRioLoaderListener( new RioLoaderListener() { @@ -998,12 +1004,12 @@ if(source instanceof Reader) { - loader.loadRdf((Reader) source, baseURL, rdfFormat, parserOptions); + loader.loadRdf((Reader) source, baseURL, rdfFormat, defaultGraph, parserOptions); } else if (source instanceof InputStream) { loader.loadRdf((InputStream) source, baseURL, rdfFormat, - parserOptions); + defaultGraph, parserOptions); } else throw new AssertionError(); @@ -1351,9 +1357,8 @@ // dataLoader.loadFiles(fileOrDir, null/* baseURI */, // rdfFormat, filter); - dataLoader.loadFiles(totals, 0/* depth */, - fileOrDir.toURI().toURL(), baseURI, - rdfFormat, filter, true/* endOfBatch */ + dataLoader.loadFiles(totals, 0/* depth */, fileOrDir.toURI().toURL(), baseURI, + rdfFormat, null, filter, true/* endOfBatch */ ); } Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/util:3463 /trunk/bigdata-rdf/src/java/com/bigdata/rdf/util:3379-3430 + /branches/dev-btm/bigdata-rdf/src/java/com/bigdata/rdf/util:3463 /trunk/bigdata-rdf/src/java/com/bigdata/rdf/util:3379-3430,3542 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/Splitter.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -714,7 +714,7 @@ try { // run the parser. new MyLoader(buffer).loadRdf(reader, baseURL, - defaultRDFFormat, s.parserOptions); + defaultRDFFormat, null, s.parserOptions); } finally { reader.close(); } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/config/NicUtil.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/config/NicUtil.java 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/config/NicUtil.java 2010-09-14 17:19:56 UTC (rev 3544) @@ -26,28 +26,20 @@ package com.bigdata.util.config; import java.io.IOException; -import java.net.InetAddress; import java.net.Inet4Address; +import java.net.InetAddress; import java.net.InterfaceAddress; -import java.net.MalformedURLException; import java.net.NetworkInterface; import java.net.SocketException; import java.net.UnknownHostException; +import java.util.Collections; +import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Enumeration; -import java.util.Collections; -import java.util.logging.LogRecord; import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import net.jini.config.Configuration; -import net.jini.config.ConfigurationException; -import com.sun.jini.config.Config; -import com.sun.jini.logging.Levels; - /** * Utility class that provides a set of static convenience methods * related to processing information about the current node's Network @@ -400,34 +392,34 @@ return macAddr; } - /** - * Three-argument version of <code>getInetAddress</code> that retrieves - * the desired interface name from the given <code>Configuration</code> - * parameter. - */ - public static InetAddress getInetAddress(Configuration config, - String componentName, - String nicNameEntry) - { - String nicName = "NoNetworkInterfaceName"; - try { - nicName = (String)Config.getNonNullEntry(config, - componentName, - nicNameEntry, - String.class, - "eth0"); - } catch(ConfigurationException e) { - jiniConfigLogger.log(WARNING, e - +" - [componentName="+componentName - +", nicNameEntry="+nicNameEntry+"]"); - utilLogger.log(Level.WARN, e - +" - [componentName="+componentName - +", nicNameEntry="+nicNameEntry+"]"); - e.printStackTrace(); - return null; - } - return ( getInetAddress(nicName, 0, null, false) ); - } +// /** +// * Three-argument version of <code>getInetAddress</code> that retrieves +// * the desired interface name from the given <code>Configuration</code> +// * parameter. +// */ +// public static InetAddress getInetAddress(Configuration config, +// String componentName, +// String nicNameEntry) +// { +// String nicName = "NoNetworkInterfaceName"; +// try { +// nicName = (String)Config.getNonNullEntry(config, +// componentName, +// nicNameEntry, +// String.class, +// "eth0"); +// } catch(ConfigurationException e) { +// jiniConfigLogger.log(WARNING, e +// +" - [componentName="+componentName +// +", nicNameEntry="+nicNameEntry+"]"); +// utilLogger.log(Level.WARN, e +// +" - [componentName="+componentName +// +", nicNameEntry="+nicNameEntry+"]"); +// e.printStackTrace(); +// return null; +// } +// return ( getInetAddress(nicName, 0, null, false) ); +// } // What follows are a number of versions of the getIpAddress method // provided for convenience. Modified: branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties =================================================================== --- branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties 2010-09-14 14:46:01 UTC (rev 3543) +++ branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties 2010-09-14 17:19:56 UTC (rev 3544) @@ -1,7 +1,8 @@ -# Be very careful when you use this configuration! This turns off incremental -# inference for load and retract, so you must explicitly force these operations, -# which requires punching through the SAIL layer. Of course, if you are not -# using inference then this is just the ticket and quite fast. +# This configuration turns off incremental inference for load and retract, so +# you must explicitly force these operations if you want to compute the closure +# of the knowledge base. Forcing the closure requires punching through the SAIL +# layer. Of course, if you are not using inference then this configuration is +# just the ticket and is quite fast. # set the initial and maximum extent of the journal com.bigdata.journal.AbstractJournal.initialExtent=209715200 Property changes on: branches/maven_scaleout/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties ___________________________________________________________________ Added: svn:mergeinfo + /trunk/bigdata-sails/src/samples/com/bigdata/samples/fastload.properties:3503 /trunk/src/samples-sail/com/bigdata/samples/fastload.properties:3499 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/config/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/config/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testf... [truncated message content] |
From: <tho...@us...> - 2010-09-14 14:46:10
|
Revision: 3543 http://bigdata.svn.sourceforge.net/bigdata/?rev=3543&view=rev Author: thompsonbry Date: 2010-09-14 14:46:01 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Refactored the RunState and ChunkTask out of the RunningQuery. Working on the federation based unit test setup. We can not use the EmbeddedFederation for this because the serviceId is shared by both data service instances. Unfortunately, we can no longer easily use the JiniServiceHelper either due to things like the jini group setup. I am going to tackle this next on a workstation with more RAM so I can attach to a running federation. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/ap/R.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestMapBindingSetsOverShards.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/config/JiniCoreServicesConfiguration.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/jini/start/process/JiniCoreServicesProcessHelper.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/service/jini/util/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/service/jini/util/JiniCoreServicesHelper.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/service/jini/util/JiniServicesHelper.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/JiniCoreServicesHelper.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/util/JiniServicesHelper.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -3,7 +3,6 @@ import java.io.Serializable; import com.bigdata.bop.BOp; -import com.bigdata.bop.IBindingSet; import com.bigdata.bop.fed.FederatedRunningQuery; import com.bigdata.relation.accesspath.IAsynchronousIterator; @@ -94,8 +93,16 @@ // NOP } - public IAsynchronousIterator<E[]> iterator() { - return source; + public IChunkAccessor<E> getChunkAccessor() { + return new ChunkAccessor(); } + private class ChunkAccessor implements IChunkAccessor<E> { + + public IAsynchronousIterator<E[]> iterator() { + return source; + } + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -18,68 +18,66 @@ private static final long serialVersionUID = 1L; /** The identifier of the query. */ - final long queryId; + final public long queryId; /** The identifier of the operator. */ - final int bopId; + final public int bopId; /** - * The index partition identifier against which the operator was - * executing. + * The index partition identifier against which the operator was executing. */ - final int partitionId; + final public int partitionId; /** * The identifier of the service on which the operator was executing. */ - final UUID serviceId; + final public UUID serviceId; /** * * The cause and <code>null</code> if the operator halted normally. */ - final Throwable cause; + final public Throwable cause; /** - * The operator identifier for the primary sink -or- <code>null</code> - * if there is no primary sink (for example, if this is the last - * operator in the pipeline). + * The operator identifier for the primary sink -or- <code>null</code> if + * there is no primary sink (for example, if this is the last operator in + * the pipeline). */ - final Integer sinkId; + final public Integer sinkId; /** - * The number of the {@link BindingSetChunk}s that were output for the - * primary sink. (This information is used for the atomic termination - * decision.) + * The number of the {@link IChunkMessage}s that were output for the primary + * sink. (This information is used for the atomic termination decision.) * <p> * For a given downstream operator this is ONE (1) for scale-up. For - * scale-out, this is one per index partition over which the - * intermediate results were mapped. + * scale-out, this is one per index partition over which the intermediate + * results were mapped. */ - final int sinkChunksOut; + final public int sinkChunksOut; /** - * The operator identifier for the alternative sink -or- - * <code>null</code> if there is no alternative sink. + * The operator identifier for the alternative sink -or- <code>null</code> + * if there is no alternative sink. */ - final Integer altSinkId; + final public Integer altSinkId; /** - * The number of the {@link BindingSetChunk}s that were output for the - * alternative sink. (This information is used for the atomic - * termination decision.) + * The number of the {@link IChunkMessage}s that were output for the + * alternative sink. (This information is used for the atomic termination + * decision.) * <p> * For a given downstream operator this is ONE (1) for scale-up. For - * scale-out, this is one per index partition over which the - * intermediate results were mapped. It is zero if there was no - * alternative sink for the operator. + * scale-out, this is one per index partition over which the intermediate + * results were mapped. It is zero if there was no alternative sink for the + * operator. */ - final int altSinkChunksOut; + final public int altSinkChunksOut; /** - * The statistics for the execution of the bop against the partition on - * the service. + * The statistics for the execution of the bop against the partition on the + * service. */ - final BOpStats taskStats; + final public BOpStats taskStats; /** * @param queryId @@ -88,19 +86,18 @@ * The operator whose execution phase has terminated for a * specific index partition and input chunk. * @param partitionId - * The index partition against which the operator was - * executed. + * The index partition against which the operator was executed. * @param serviceId * The node which executed the operator. * @param cause * <code>null</code> unless execution halted abnormally. * @param chunksOut - * A map reporting the #of binding set chunks which were - * output for each downstream operator for which at least one - * chunk of output was produced. + * A map reporting the #of binding set chunks which were output + * for each downstream operator for which at least one chunk of + * output was produced. * @param taskStats - * The statistics for the execution of that bop on that shard - * and service. + * The statistics for the execution of that bop on that shard and + * service. */ public HaltOpMessage( // @@ -110,17 +107,6 @@ final Integer altSinkId, final int altSinkChunksOut,// final BOpStats taskStats) { - if (altSinkId != null && sinkId == null) { - // The primary sink must be defined if the altSink is defined. - throw new IllegalArgumentException(); - } - - if (sinkId != null && altSinkId != null - && sinkId.intValue() == altSinkId.intValue()) { - // The primary and alternative sink may not be the same operator. - throw new IllegalArgumentException(); - } - this.queryId = queryId; this.bopId = bopId; this.partitionId = partitionId; @@ -132,4 +118,5 @@ this.altSinkChunksOut = altSinkChunksOut; this.taskStats = taskStats; } -} \ No newline at end of file + +} Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -0,0 +1,96 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 13, 2010 + */ + +package com.bigdata.bop.engine; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; + +import com.bigdata.bop.IBindingSet; +import com.bigdata.relation.accesspath.BlockingBuffer; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.striterator.IChunkedIterator; + +/** + * API providing a variety of ways to access chunks of data (data are typically + * elements or binding sets). + * + * @todo Expose an {@link IChunkedIterator}, which handles both element at a + * time and chunk at a time. + * + * @todo Expose a mechanism to visit the direct {@link ByteBuffer} slices in + * which the data are stored. For an operator which executes on a GPU, we + * want to transfer the data from the direct {@link ByteBuffer} in which + * it was received into a direct {@link ByteBuffer} which is a slice onto + * its VRAM. (And obviously we need to do the reverse with the outputs of + * a GPU operator). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public interface IChunkAccessor<E> { + + /** + * Visit the binding sets in the chunk. + * + * @deprecated We do not need to use {@link IAsynchronousIterator} any more. + * This could be much more flexible and should be harmonized to + * support high volume operators, GPU operators, etc. probably + * the right thing to do is introduce another interface here + * with a getChunk():IChunk where IChunk let's you access the + * chunks data in different ways (and chunks can be both + * {@link IBindingSet}[]s and element[]s so we might need to + * raise that into the interfaces and/or generics as well). + * + * @todo It is likely that we can convert to the use of + * {@link BlockingQueue} instead of {@link BlockingBuffer} in the + * operators and then handle the logic for combining chunks inside of + * the {@link QueryEngine}. E.g., by scanning this list for chunks for + * the same bopId and combining them logically into a single chunk. + * <p> + * For scale-out, chunk combination will naturally occur when the node + * on which the operator will run requests the {@link ByteBuffer}s + * from the source nodes. Those will get wrapped up logically into a + * source for processing. For selective operators, those chunks can be + * combined before we execute the operator. For unselective operators, + * we are going to run over all the data anyway. + */ + IAsynchronousIterator<E[]> iterator(); + +// /** +// * Chunked iterator pattern. The iterator may be used for element at a time +// * processing, but the underlying iterator operators in chunks. The size of +// * the chunks depends originally on the data producer, but smaller chunks +// * may be automatically combined into larger chunks both during production +// * and when data are buffered, whether to get them off of the heap or to +// * transfer them among nodes. +// * +// * @return +// */ +// IChunkedIterator<E> chunkedIterator(); + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -57,35 +57,10 @@ * Discard the materialized data. */ void release(); - + /** - * Visit the binding sets in the chunk. - * - * @todo we do not need to use {@link IAsynchronousIterator} any more. This - * could be much more flexible and should be harmonized to support - * high volume operators, GPU operators, etc. probably the right thing - * to do is introduce another interface here with a getChunk():IChunk - * where IChunk let's you access the chunks data in different ways - * (and chunks can be both {@link IBindingSet}[]s and element[]s so we - * might need to raise that into the interfaces and/or generics as - * well). - * - * @todo It is likely that we can convert to the use of - * {@link BlockingQueue} instead of {@link BlockingBuffer} in the - * operators and then handle the logic for combining chunks inside of - * the {@link QueryEngine}. E.g., by scanning this list for chunks for - * the same bopId and combining them logically into a single chunk. - * <p> - * For scale-out, chunk combination will naturally occur when the node - * on which the operator will run requests the {@link ByteBuffer}s - * from the source nodes. Those will get wrapped up logically into a - * source for processing. For selective operators, those chunks can be - * combined before we execute the operator. For unselective operators, - * we are going to run over all the data anyway. - * - * @throws IllegalStateException - * if the payload is not materialized. + * Return an interface which may be used to access the chunk's data. */ - IAsynchronousIterator<E[]> iterator(); + IChunkAccessor<E> getChunkAccessor(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -2,21 +2,24 @@ import java.rmi.RemoteException; +import com.bigdata.bop.BindingSetPipelineOp; + /** * Interface for a client executing queries (the query controller). */ public interface IQueryClient extends IQueryPeer { -// /** -// * Return the query. -// * -// * @param queryId -// * The query identifier. -// * @return The query. -// * -// * @throws RemoteException -// */ -// public BOp getQuery(long queryId) throws RemoteException; + /** + * Return the query. + * + * @param queryId + * The query identifier. + * @return The query. + * + * @throws IllegalArgumentException + * if there is no such query. + */ + public BindingSetPipelineOp getQuery(long queryId) throws RemoteException; /** * Notify the client that execution has started for some query, operator, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -45,7 +45,6 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; import com.bigdata.bop.bset.Union; -import com.bigdata.bop.fed.FederatedQueryEngine; import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; @@ -54,7 +53,6 @@ import com.bigdata.rdf.spo.SPORelation; import com.bigdata.relation.IMutableRelation; import com.bigdata.relation.IRelation; -import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.IElementFilter; import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.Program; @@ -413,7 +411,7 @@ /** * The currently executing queries. */ - final ConcurrentHashMap<Long/* queryId */, RunningQuery> runningQueries = new ConcurrentHashMap<Long, RunningQuery>(); + final protected ConcurrentHashMap<Long/* queryId */, RunningQuery> runningQueries = new ConcurrentHashMap<Long, RunningQuery>(); /** * A priority queue of {@link RunningQuery}s having binding set chunks @@ -513,7 +511,8 @@ */ private class QueryEngineTask implements Runnable { public void run() { - System.err.println("QueryEngine running: " + this); + if(log.isInfoEnabled()) + log.info("running: " + this); while (true) { try { final RunningQuery q = priorityQueue.take(); @@ -522,11 +521,12 @@ continue; final IChunkMessage<IBindingSet> chunk = q.chunksIn.poll(); if (log.isTraceEnabled()) - log.trace("Accepted chunk: queryId=" + queryId - + ", bopId=" + chunk.getBOpId()); - // create task. + log.trace("Accepted chunk: " + chunk); try { + // create task. final FutureTask<?> ft = q.newChunkTask(chunk); + if (log.isDebugEnabled()) + log.debug("Running chunk: " + chunk); // execute task. localIndexManager.getExecutorService().execute(ft); } catch (RejectedExecutionException ex) { @@ -670,6 +670,9 @@ // remove from the set of running queries. runningQueries.remove(q.getQueryId(), q); + + if (log.isInfoEnabled()) + log.info("Removed entry for query: " + q.getQueryId()); } @@ -800,6 +803,17 @@ return runningQueries.get(queryId); } + + public BindingSetPipelineOp getQuery(final long queryId) { + + final RunningQuery q = getRunningQuery(queryId); + + if (q == null) + throw new IllegalArgumentException(); + + return q.getQuery(); + + } /** * Places the {@link RunningQuery} object into the internal map. @@ -827,30 +841,8 @@ final IQueryClient clientProxy, final BindingSetPipelineOp query) { return new RunningQuery(this, queryId, true/* controller */, - this/* clientProxy */, query, newQueryBuffer(query)); + this/* clientProxy */, query); } - /** - * Return a buffer onto which the solutions will be written. - * - * @todo This method is probably in the wrong place. We should use whatever - * is associated with the top-level {@link BOp} in the query and then - * rely on the NIO mechanisms to move the data around as necessary. - * - * @todo Could return a data structure which encapsulates the query results - * and could allow multiple results from a query, e.g., one per step - * in a program. - * - * @deprecated This is going away. - * - * @see FederatedQueryEngine#newQueryBuffer(BindingSetPipelineOp) - */ - protected IBlockingBuffer<IBindingSet[]> newQueryBuffer( - final BindingSetPipelineOp query) { - - return query.newBuffer(); - - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-14 13:50:31 UTC (rev 3542) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-14 14:46:01 UTC (rev 3543) @@ -53,6 +53,7 @@ import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.NoSuchBOpException; +import com.bigdata.bop.PipelineOp; import com.bigdata.bop.bset.CopyBindingSetOp; import com.bigdata.bop.solutions.SliceOp; import com.bigdata.journal.IIndexManager; @@ -72,6 +73,12 @@ .getLogger(RunningQuery.class); /** + * Logger for the {@link ChunkTask}. + */ + private final static Logger chunkTaskLog = Logger + .getLogger(ChunkTask.class); + + /** * The run state of the query and the result of the computation iff it * completes execution normally (without being interrupted, cancelled, etc). */ @@ -91,11 +98,6 @@ /** The unique identifier for this query. */ final private long queryId; -// /** -// * The timestamp when the query was accepted by this node (ms). -// */ -// final private long begin; - /** * The query deadline. The value is the system clock time in milliseconds * when the query is due and {@link Long#MAX_VALUE} if there is no deadline. @@ -105,12 +107,6 @@ final private AtomicLong deadline = new AtomicLong(Long.MAX_VALUE); /** - * How long the query is allowed to run (elapsed milliseconds) -or- - * {@link Long#MAX_VALUE} if there is no deadline. - */ - final private long timeout; - - /** * <code>true</code> iff the outer {@link QueryEngine} is the controller for * this query. */ @@ -126,7 +122,7 @@ final private IQueryClient clientProxy; /** The query. */ - final private BOp query; + final private BindingSetPipelineOp query; /** * The buffer used for the overall output of the query pipeline. @@ -153,59 +149,20 @@ private final ConcurrentHashMap<BSBundle, Future<?>> operatorFutures = new ConcurrentHashMap<BSBundle, Future<?>>(); /** - * A lock guarding {@link #runningTaskCount}, {@link #availableChunkCount}, - * {@link #availableChunkCountMap}. + * A lock guarding {@link RunState#runningTaskCount}, + * {@link RunState#availableChunkCount}, + * {@link RunState#availableChunkCountMap}. This is <code>null</code> unless + * this is the query controller. + * + * @see RunState */ - private final ReentrantLock runStateLock = new ReentrantLock(); + private final ReentrantLock runStateLock; /** - * The #of tasks for this query which have started but not yet halted and - * ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. + * The run state of this query and <code>null</code> unless this is the + * query controller. */ - private long runningTaskCount = 0; - - /** - * The #of chunks for this query of which a running task has made available - * but which have not yet been accepted for processing by another task and - * ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private long availableChunkCount = 0; - - /** - * A map reporting the #of chunks available for each operator in the - * pipeline (we only report chunks for pipeline operators). The total #of - * chunks available for any given operator in the pipeline is reported by - * {@link #availableChunkCount}. - * <p> - * The movement of the intermediate binding set chunks forms an acyclic - * directed graph. This map is used to track the #of chunks available for - * each bop in the pipeline. When a bop has no more incoming chunks, we send - * an asynchronous message to all nodes on which that bop had executed - * informing the {@link QueryEngine} on that node that it should immediately - * release all resources associated with that bop. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); - - /** - * A collection reporting on the #of instances of a given {@link BOp} which - * are concurrently executing. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Map<Integer/*bopId*/, AtomicLong/*runningCount*/> runningCountMap = new LinkedHashMap<Integer, AtomicLong>(); - - /** - * A collection of the operators which have executed at least once. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Set<Integer/*bopId*/> startedSet = new LinkedHashSet<Integer>(); + final private RunState runState; /** * The chunks available for immediate processing (they must have been @@ -285,7 +242,7 @@ /** * Return the operator tree for this query. */ - public BOp getQuery() { + public BindingSetPipelineOp getQuery() { return query; } @@ -329,8 +286,8 @@ public RunningQuery(final QueryEngine queryEngine, final long queryId, // final long begin, final boolean controller, - final IQueryClient clientProxy, final BOp query, - final IBlockingBuffer<IBindingSet[]> queryBuffer) { + final IQueryClient clientProxy, final BindingSetPipelineOp query + ) { if (queryEngine == null) throw new IllegalArgumentException(); @@ -342,21 +299,41 @@ throw new IllegalArgumentException(); this.queryEngine = queryEngine; + this.queryId = queryId; -// this.begin = begin; + this.controller = controller; + this.clientProxy = clientProxy; + this.query = query; - this.queryBuffer = queryBuffer; + this.bopIndex = BOpUtility.getIndex(query); + this.statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() : null; + + runStateLock = controller ? new ReentrantLock() : null; - this.timeout = query.getProperty(BOp.Annotations.TIMEOUT, - BOp.Annotations.DEFAULT_TIMEOUT); + runState = controller ? new RunState(this) : null; + + this.queryBuffer = newQueryBuffer(); + + } - if (timeout < 0) - throw new IllegalArgumentException(); + /** + * Return the buffer on which the solutions will be written (if any). This + * is based on the top-level operator in the query plan. + * + * @return The buffer for the solutions -or- <code>null</code> if the + * top-level operator in the query plan is a mutation operator. + */ + protected IBlockingBuffer<IBindingSet[]> newQueryBuffer() { + + if (query.isMutation()) + return null; + + return ((BindingSetPipelineOp) query).newBuffer(); } @@ -423,27 +400,88 @@ if (log.isDebugEnabled()) log.debug("queryId=" + queryId + ", chunksIn.size()=" - + chunksIn.size()); + + chunksIn.size() + ", msg=" + msg); } /** - * Invoked once by the query controller with the initial - * {@link BindingSetChunk} which gets the query moving. - * - * @todo this should reject multiple invocations for a given query instance. + * The run state for the query. */ - public void startQuery(final IChunkMessage<IBindingSet> chunk) { - if (!controller) - throw new UnsupportedOperationException(); - if (chunk == null) - throw new IllegalArgumentException(); - if (chunk.getQueryId() != queryId) // @todo equals() if queryId is UUID. - throw new IllegalArgumentException(); - final int bopId = chunk.getBOpId(); - runStateLock.lock(); - try { - lifeCycleSetUpQuery(); + static private class RunState { + + /** + * The query. + */ + private final RunningQuery query; + + /** + * The query identifier. + */ + private final long queryId; + + /** + * The #of tasks for this query which have started but not yet halted + * and ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long runningTaskCount = 0; + + /** + * The #of chunks for this query of which a running task has made + * available but which have not yet been accepted for processing by + * another task and ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long availableChunkCount = 0; + + /** + * A map reporting the #of chunks available for each operator in the + * pipeline (we only report chunks for pipeline operators). The total + * #of chunks available across all operators in the pipeline is reported + * by {@link #availableChunkCount}. + * <p> + * The movement of the intermediate binding set chunks forms an acyclic + * directed graph. This map is used to track the #of chunks available + * for each bop in the pipeline. When a bop has no more incoming chunks, + * we send an asynchronous message to all nodes on which that bop had + * executed informing the {@link QueryEngine} on that node that it + * should immediately release all resources associated with that bop. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection reporting on the #of instances of a given {@link BOp} + * which are concurrently executing. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection of the operators which have executed at least once. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Set<Integer/* bopId */> startedSet = new LinkedHashSet<Integer>(); + + public RunState(final RunningQuery query) { + + this.query = query; + + this.queryId = query.queryId; + + } + + public void startQuery(final IChunkMessage<?> msg) { + + query.lifeCycleSetUpQuery(); + + final Integer bopId = Integer.valueOf(msg.getBOpId()); + availableChunkCount++; { AtomicLong n = availableChunkCountMap.get(bopId); @@ -451,114 +489,81 @@ availableChunkCountMap.put(bopId, n = new AtomicLong()); n.incrementAndGet(); } + if (log.isInfoEnabled()) log.info("queryId=" + queryId + ",runningTaskCount=" + runningTaskCount + ",availableChunks=" + availableChunkCount); + System.err.println("startQ : bopId=" + bopId + ",running=" + runningTaskCount + ",available=" + availableChunkCount); - queryEngine.acceptChunk(chunk); - } finally { - runStateLock.unlock(); + } - } - /** - * Message provides notice that the operator has started execution and will - * consume some specific number of binding set chunks. - * - * @param bopId - * The identifier of the operator. - * @param partitionId - * The index partition identifier against which the operator is - * executing. - * @param serviceId - * The identifier of the service on which the operator is - * executing. - * @param fanIn - * The #of chunks that will be consumed by the operator - * execution. - * - * @throws UnsupportedOperationException - * If this node is not the query coordinator. - */ - public void startOp(final StartOpMessage msg) { - if (!controller) - throw new UnsupportedOperationException(); - final Integer bopId = Integer.valueOf(msg.bopId); - runStateLock.lock(); - try { + public void startOp(final StartOpMessage msg) { + + final Integer bopId = Integer.valueOf(msg.bopId); + runningTaskCount++; { AtomicLong n = runningCountMap.get(bopId); if (n == null) runningCountMap.put(bopId, n = new AtomicLong()); n.incrementAndGet(); - if(startedSet.add(bopId)) { + if (startedSet.add(bopId)) { // first evaluation pass for this operator. - lifeCycleSetUpOperator(msg.bopId); + query.lifeCycleSetUpOperator(bopId); } } + availableChunkCount -= msg.nchunks; + { AtomicLong n = availableChunkCountMap.get(bopId); if (n == null) throw new AssertionError(); n.addAndGet(-msg.nchunks); } - System.err.println("startOp: bopId=" + msg.bopId + ",running=" + + System.err.println("startOp: bopId=" + bopId + ",running=" + runningTaskCount + ",available=" + availableChunkCount + ",fanIn=" + msg.nchunks); - if (deadline.get() < System.currentTimeMillis()) { + + // check deadline. + if (query.deadline.get() < System.currentTimeMillis()) { + if (log.isTraceEnabled()) - log.trace("queryId: deadline expired."); - future.halt(new TimeoutException()); - cancel(true/* mayInterruptIfRunning */); + log.trace("expired: queryId=" + queryId + ", deadline=" + + query.deadline); + + query.future.halt(new TimeoutException()); + + query.cancel(true/* mayInterruptIfRunning */); + } - } finally { - runStateLock.unlock(); + } - } - /** - * Message provides notice that the operator has ended execution. The - * termination conditions for the query are checked. (For scale-out, the - * node node controlling the query needs to be involved for each operator - * start/stop in order to make the termination decision atomic). - * - * @throws UnsupportedOperationException - * If this node is not the query coordinator. - */ - public void haltOp(final HaltOpMessage msg) { - if (!controller) - throw new UnsupportedOperationException(); - runStateLock.lock(); - try { - // update per-operator statistics. - { - final BOpStats stats = statsMap.get(msg.bopId); - if (stats == null) { - statsMap.put(msg.bopId, msg.taskStats); - } else { - stats.add(msg.taskStats); - } - } - /* - * Update termination criteria counters. - */ + /** + * Update termination criteria counters. + */ + public void haltOp(final HaltOpMessage msg) { + // chunks generated by this task. final int fanOut = msg.sinkChunksOut + msg.altSinkChunksOut; availableChunkCount += fanOut; if (msg.sinkId != null) { AtomicLong n = availableChunkCountMap.get(msg.sinkId); if (n == null) - availableChunkCountMap.put(msg.sinkId, n = new AtomicLong()); + availableChunkCountMap + .put(msg.sinkId, n = new AtomicLong()); n.addAndGet(msg.sinkChunksOut); } if (msg.altSinkId != null) { AtomicLong n = availableChunkCountMap.get(msg.altSinkId); if (n == null) - availableChunkCountMap.put(msg.altSinkId, n = new AtomicLong()); + availableChunkCountMap.put(msg.altSinkId, + n = new AtomicLong()); n.addAndGet(msg.altSinkChunksOut); } // one less task is running. @@ -575,7 +580,7 @@ * No more chunks can appear for this operator so invoke its end * of life cycle hook. */ - lifeCycleTearDownOperator(msg.bopId); + query.lifeCycleTearDownOperator(msg.bopId); } System.err.println("haltOp : bopId=" + msg.bopId + ",running=" + runningTaskCount + ",available=" + availableChunkCount @@ -584,61 +589,156 @@ + runningTaskCount; assert availableChunkCount >= 0 : "availableChunkCount=" + availableChunkCount; -// final long elapsed = System.currentTimeMillis() - begin; if (log.isTraceEnabled()) - log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId - + ",serviceId=" + queryEngine.getServiceUUID() - + ", nchunks=" + fanOut + " : runningTaskCount=" - + runningTaskCount + ", availableChunkCount=" - + availableChunkCount);// + ", elapsed=" + elapsed); + log.trace("bopId=" + msg.bopId + ",partitionId=" + + msg.partitionId + ",serviceId=" + + query.queryEngine.getServiceUUID() + ", nchunks=" + + fanOut + " : runningTaskCount=" + runningTaskCount + + ", availableChunkCount=" + availableChunkCount); // test termination criteria if (msg.cause != null) { // operator failed on this chunk. log.error("Error: Canceling query: queryId=" + queryId + ",bopId=" + msg.bopId + ",partitionId=" + msg.partitionId, msg.cause); - future.halt(msg.cause); - cancel(true/* mayInterruptIfRunning */); + query.future.halt(msg.cause); + query.cancel(true/* mayInterruptIfRunning */); } else if (runningTaskCount == 0 && availableChunkCount == 0) { // success (all done). - future.halt(getStats()); - cancel(true/* mayInterruptIfRunning */); - } else if (deadline.get() < System.currentTimeMillis()) { if (log.isTraceEnabled()) - log.trace("queryId: deadline expired."); - future.halt(new TimeoutException()); - cancel(true/* mayInterruptIfRunning */); + log.trace("success: queryId=" + queryId); + query.future.halt(query.getStats()); + query.cancel(true/* mayInterruptIfRunning */); + } else if (query.deadline.get() < System.currentTimeMillis()) { + if (log.isTraceEnabled()) + log.trace("expired: queryId=" + queryId + ", deadline=" + + query.deadline); + query.future.halt(new TimeoutException()); + query.cancel(true/* mayInterruptIfRunning */); } + } + + /** + * Return <code>true</code> the specified operator can no longer be + * triggered by the query. The specific criteria are that no operators + * which are descendants of the specified operator are running or have + * chunks available against which they could run. Under those conditions + * it is not possible for a chunk to show up which would cause the + * operator to be executed. + * + * @param bopId + * Some operator identifier. + * + * @return <code>true</code> if the operator can not be triggered given + * the current query activity. + * + * @throws IllegalMonitorStateException + * unless the {@link #runStateLock} is held by the caller. + */ + protected boolean isOperatorDone(final int bopId) { + + return PipelineUtility.isDone(bopId, query.getQuery(), + query.bopIndex, runningCountMap, availableChunkCountMap); + + } + + } // class RunState + + /** + * Invoked once by the query controller with the initial + * {@link BindingSetChunk} which gets the query moving. + * + * @todo this should reject multiple invocations for a given query instance. + */ + public void startQuery(final IChunkMessage<IBindingSet> msg) { + + if (!controller) + throw new UnsupportedOperationException(); + + if (msg == null) + throw new IllegalArgumentException(); + + if (msg.getQueryId() != queryId) // @todo equals() if queryId is UUID. + throw new IllegalArgumentException(); + + runStateLock.lock(); + + try { + + runState.startQuery(msg); + + queryEngine.acceptChunk(msg); + } finally { + runStateLock.unlock(); + } + } /** - * Return <code>true</code> the specified operator can no longer be - * triggered by the query. The specific criteria are that no operators which - * are descendants of the specified operator are running or have chunks - * available against which they could run. Under those conditions it is not - * possible for a chunk to show up which would cause the operator to be - * executed. + * Message provides notice that the operator has started execution and will + * consume some specific number of binding set chunks. * - * @param bopId - * Some operator identifier. + * @param msg The {@link StartOpMessage}. * - * @return <code>true</code> if the operator can not be triggered given the - * current query activity. + * @throws UnsupportedOperationException + * If this node is not the query coordinator. + */ + public void startOp(final StartOpMessage msg) { + + if (!controller) + throw new UnsupportedOperationException(); + + runStateLock.lock(); + + try { + + runState.startOp(msg); + + } finally { + + runStateLock.unlock(); + + } + + } + + /** + * Message provides notice that the operator has ended execution. The + * termination conditions for the query are checked. (For scale-out, the + * node node controlling the query needs to be involved for each operator + * start/stop in order to make the termination decision atomic). * - * @throws IllegalMonitorStateException - * unless the {@link #runStateLock} is held by the caller. + * @param msg The {@link HaltOpMessage} + * + * @throws UnsupportedOperationException + * If this node is not the query coordinator. */ - protected boolean isOperatorDone(final int bopId) { + public void haltOp(final HaltOpMessage msg) { + + if (!controller) + throw new UnsupportedOperationException(); - if (!runStateLock.isHeldByCurrentThread()) - throw new IllegalMonitorStateException(); + // update per-operator statistics. + final BOpStats tmp = statsMap.putIfAbsent(msg.bopId, msg.taskStats); - return PipelineUtility.isDone(bopId, query, bopIndex, runningCountMap, - availableChunkCountMap); + if (tmp != null) + tmp.add(msg.taskStats); + runStateLock.lock(); + + try { + + runState.haltOp(msg); + + } finally { + + runStateLock.unlock(); + + } + } /** @@ -703,100 +803,222 @@ * A chunk to be consumed. */ @SuppressWarnings("unchecked") - protected FutureTask<Void> newChunkTask(final IChunkMessage<IBindingSet> chunk) { - /* - * Look up the BOp in the index, create the BOpContext for that BOp, and - * return the value returned by BOp.eval(context). - */ - final int bopId = chunk.getBOpId(); - final int partitionId = chunk.getPartitionId(); - final BOp bop = bopIndex.get(bopId); - if (bop == null) { - throw new NoSuchBOpException(bopId); - } - if (!(bop instanceof BindingSetPipelineOp)) { - /* - * @todo evaluation of element[] pipelines needs to use pretty much - * the same code, but it needs to be typed for E[] rather than - * IBindingSet[]. - * - * @todo evaluation of Monet style BATs would also operate under - * different assumptions, closer to those of an element[]. - */ - throw new UnsupportedOperationException(bop.getClass().getName()); - } - // self - final BindingSetPipelineOp op = ((BindingSetPipelineOp) bop); - // parent (null if this is the root of the operator tree). - final BOp p = BOpUtility.getParent(query, op); - // sink (null unless parent is defined) - final Integer sinkId = p == null ? null : (Integer) p - .getProperty(BindingSetPipelineOp.Annotations.BOP_ID); - final IBlockingBuffer<IBindingSet[]> sink = (p == null ? queryBuffer - : op.newBuffer()); - // altSink (null when not specified). - final Integer altSinkId = (Integer) op - .getProperty(BindingSetPipelineOp.Annotations.ALT_SINK_REF); - if (altSinkId != null && !bopIndex.containsKey(altSinkId)) { - throw new NoSuchBOpException(altSinkId); - } - final IBlockingBuffer<IBindingSet[]> altSink = altSinkId == null ? null - : op.newBuffer(); - // context - final BOpContext context = new BOpContext(this, partitionId, op - .newStats(), chunk.iterator(), sink, altSink); - // FutureTask for operator execution (not running yet). - final FutureTask<Void> f = op.eval(context); - // Hook the FutureTask. - final Runnable r = new Runnable() { - public void run() { - final UUID serviceId = queryEngine.getServiceUUID(); - int fanIn = 1; - int sinkChunksOut = 0; - int altSinkChunksOut = 0; - try { - clientProxy.startOp(new StartOpMessage(queryId, - bopId, partitionId, serviceId, fanIn)); - if (log.isDebugEnabled()) - log.debug("Running chunk: queryId=" + queryId - + ", bopId=" + bopId + ", bop=" + bop); - f.run(); // run - f.get(); // verify success - if (sink != queryBuffer && !sink.isEmpty()) { - // handle output chunk. - sinkChunksOut += handleOutputChunk(sinkId, sink); - } - if (altSink != queryBuffer && altSink != null - && !altSink.isEmpty()) { - // handle alt sink output chunk. - altSinkChunksOut += handleOutputChunk(altSinkId, altSink); - } - clientProxy.haltOp(new HaltOpMessage(queryId, bopId, - partitionId, serviceId, null/* cause */, - sinkId, sinkChunksOut, altSinkId, - altSinkChunksOut, context.getStats())); - } catch (Throwable t) { - try { - clientProxy.haltOp(new HaltOpMessage(queryId, - bopId, partitionId, serviceId, - t/* cause */, sinkId, sinkChunksOut, altSinkId, - altSinkChunksOut, context.getStats())); - } catch (RemoteException e) { - cancel(true/* mayInterruptIfRunning */); - log.error("queryId=" + queryId, e); - } - } - } - }; + protected FutureTask<Void> newChunkTask( + final IChunkMessage<IBindingSet> chunk) { + + // create runnable to evaluate a chunk for an operator and partition. + final Runnable r = new ChunkTask(chunk); + // wrap runnable. final FutureTask<Void> f2 = new FutureTask(r, null/* result */); + // add to list of active futures for this query. - operatorFutures.put(new BSBundle(bopId, partitionId), f2); + operatorFutures.put(new BSBundle(chunk.getBOpId(), chunk + .getPartitionId()), f2); + // return : caller will execute. return f2; + } /** + * Runnable evaluates an operator for some chunk of inputs. In scale-out, + * the operator may be evaluated against some partition of a scale-out + * index. + */ + private class ChunkTask implements Runnable { + + /** Alias for the {@link ChunkTask}'s logger. */ + private final Logger log = chunkTaskLog; + + /** The index of the bop which is being evaluated. */ + private final int bopId; + + /** + * The index partition against which the operator is being evaluated and + * <code>-1</code> if the operator is not being evaluated against a + * shard. + */ + private final int partitionId; + + /** The operator which is being evaluated. */ + private final BOp bop; + + /** + * The index of the operator which is the default sink for outputs + * generated by this evaluation. This is the + * {@link BOp.Annotations#BOP_ID} of the parent of this operator. This + * will be <code>null</code> if the operator does not have a parent and + * is not a query since no outputs will be generated in that case. + */ + private final Integer sinkId; + + /** + * The index of the operator which is the alternative sink for outputs + * generated by this evaluation. This is <code>null</code> unless the + * operator explicitly specifies an alternative sink using + * {@link BindingSetPipelineOp.Annotations#ALT_SINK_REF}. + */ + private final Integer altSinkId; + + /** + * The sink on which outputs destined for the {@link #sinkId} operator + * will be written and <code>null</code> if {@link #sinkId} is + * <code>null</code>. + */ + private final IBlockingBuffer<IBindingSet[]> sink; + + /** + * The sink on which outputs destined for the {@link #altSinkId} + * operator will be written and <code>null</code> if {@link #altSinkId} + * is <code>null</code>. + */ + private final IBlockingBuffer<IBindingSet[]> altSink; + + /** + * The evaluation context for this operator. + */ + private final BOpContext<IBindingSet> context; + + /** + * {@link FutureTask} which evaluates the operator (evaluation is + * delegated to this {@link FutureTask}). + */ + private final FutureTask<Void> ft; + + /** + * Create a task to consume a chunk. This looks up the {@link BOp} which + * is the target for the message in the {@link RunningQuery#bopIndex}, + * creates the sink(s) for the {@link BOp}, creates the + * {@link BOpContext} for that {@link BOp}, and wraps the value returned + * by {@link PipelineOp#eval(BOpContext)} in order to handle the outputs + * written on those sinks. + * + * @param chunk + * A message containing the materialized chunk and metadata + * about the operator which will consume that chunk. + */ + public ChunkTask(final IChunkMessage<IBindingSet> chunk) { + bopId = chunk.getBOpId(); + partitionId = chunk.getPartitionId(); + bop = bopIndex.get(bopId); + if (bop == null) { + throw new NoSuchBOpException(bopId); + } + if (!(bop instanceof BindingSetPipelineOp)) { + /* + * @todo evaluation of element[] pipelines needs to use pretty + * much the same code, but it needs to be typed for E[] rather + * than IBindingSet[]. + * + * @todo evaluation of Monet style BATs would also operate under + * different assumptions, closer to those of an element[]. + */ + throw new UnsupportedOperationException(bop.getClass() + .getName()); + } + + // self + final BindingSetPipelineOp op = ((BindingSetPipelineOp) bop); + + // parent (null if this is the root of the operator tree). + final BOp p = BOpUtility.getParent(query, op); + + // sink (null unless parent is defined) + sinkId = p == null ? null : (Integer) p + .getProperty(BindingSetPipelineOp.Annotations.BOP_ID); + + // altSink (null when not specified). + altSinkId = (Integer) op + .getProperty(BindingSetPipelineOp.Annotations.ALT_SINK_REF); + + if (altSinkId != null && !bopIndex.containsKey(altSinkId)) + throw new NoSuchBOpException(altSinkId); + + if (altSinkId != null && sinkId == null) { + throw new RuntimeException( + "The primary sink must be defined if the altSink is defined: " + + bop); + } + + if (sinkId != null && altSinkId != null + && sinkId.intValue() == altSinkId.intValue()) { + throw new RuntimeException( + "The primary and alternative sink may not be the same operator: " + + bop); + } + + sink = (p == null ? queryBuffer : op.newBuffer()); + + altSink = altSinkId == null ? null : op.newBuffer(); + + // context + context = new BOpContext<IBindingSet>(RunningQuery.this, + partitionId, op.newStats(), chunk.getChunkAccessor() + .iterator(), sink, altSink); + + // FutureTask for operator execution (not running yet). + ft = op.eval(context); + + } + + /** + * Evaluate the {@link IChunkMessage}. + */ + public void run() { + final UUID serviceId = queryEngine.getServiceUUID(); + int fanIn = 1; + int sinkChunksOut = 0; + int altSinkChunksOut = 0; + try { + clientProxy.startOp(new StartOpMessage(queryId, + bopId, partitionId, serviceId, fanIn)); + if (log.isDebugEnabled()) + log.debug("Running chunk: queryId=" + queryId + ", bopId=" + + bopId + ", bop=" + bop); + ft.run(); // run + ft.get(); // verify success + if (sink != null && sink != queryBuffer && !sink.isEmpty()) { + /* + * Handle sink output, sending appropriate chunk + * message(s). + * + * Note: This maps output over shards/nodes in s/o. + ... [truncated message content] |
From: <dm...@us...> - 2010-09-14 13:50:38
|
Revision: 3542 http://bigdata.svn.sourceforge.net/bigdata/?rev=3542&view=rev Author: dmacgbr Date: 2010-09-14 13:50:31 +0000 (Tue, 14 Sep 2010) Log Message: ----------- See trac #146. Allow specification of a default graph when running a bulk load of RDF triple data into a quad store. This is achieved by setting com.bigdata.rdf.load.MappedRDFDataLoadMaster.defaultGraph to the desired value, e.g. "http://xyz.com/data/defaultGraph", in the bigdata configuration file. This parameter has no effect when loading a triple store. Further, if not specified when loading a quad store, the systems behaviour is unaffected by this change. i.e. the graph/context co-ordinate in each quad remains null. Various of the unit tests touched by this change have been modified effectively assuming that the default graph has not been specified. Modified Paths: -------------- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFDataLoadMaster.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -272,7 +272,18 @@ // // /** {@value #DEFAULT_MAX_TRIES} */ // int DEFAULT_MAX_TRIES = 3; - + + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + String DEFAULT_GRAPH = "defaultGraph" ; + + /** + * TODO Should we always enforce a real value? i.e. provide a real default + * or abort the load. + */ + String DEFAULT_DEFAULT_GRAPH = null ; } /** @@ -402,6 +413,12 @@ private transient RDFFormat rdfFormat; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + public final String defaultGraph ; + + /** * Force the load of the NxParser integration class and its registration * of the NQuadsParser#nquads RDFFormat. * @@ -496,6 +513,8 @@ sb.append(", " + ConfigurationOptions.RDF_FORMAT + "=" + rdfFormat); + sb.append(", " + ConfigurationOptions.DEFAULT_GRAPH + "=" + defaultGraph) ; + sb.append(", " + ConfigurationOptions.FORCE_OVERFLOW_BEFORE_CLOSURE + "=" + forceOverflowBeforeClosure); @@ -601,6 +620,10 @@ } + defaultGraph = (String) config.getEntry(component, + ConfigurationOptions.DEFAULT_GRAPH, String.class, + ConfigurationOptions.DEFAULT_DEFAULT_GRAPH); + rejectedExecutionDelay = (Long) config.getEntry( component, ConfigurationOptions.REJECTED_EXECUTION_DELAY, Long.TYPE, @@ -979,6 +1002,7 @@ jobState.ontology,//file jobState.ontology.getPath(),//baseURI jobState.getRDFFormat(),// + jobState.defaultGraph, jobState.ontologyFileFilter // ); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/MappedRDFFileLoadTask.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -223,6 +223,7 @@ jobState.valuesInitialCapacity,// jobState.bnodesInitialCapacity,// jobState.getRDFFormat(), // + jobState.defaultGraph, parserOptions,// false, // deleteAfter is handled by the master! jobState.parserPoolSize, // Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/load/SingleResourceReaderTask.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -186,7 +186,7 @@ // run the parser. // @todo reuse the same underlying parser instance? - loader.loadRdf(reader, baseURL, rdfFormat, parserOptions); + loader.loadRdf(reader, baseURL, rdfFormat, null, parserOptions); success = true; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/AsynchronousStatementBufferFactory.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -356,8 +356,14 @@ * The default {@link RDFFormat}. */ private final RDFFormat defaultFormat; - + /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private final String defaultGraph; + + /** * Options for the {@link RDFParser}. */ private final RDFParserOptions parserOptions; @@ -1423,7 +1429,7 @@ try { // run the parser. new PresortRioLoader(buffer).loadRdf(reader, baseURL, - rdfFormat, parserOptions); + rdfFormat, defaultGraph, parserOptions); } finally { reader.close(); } @@ -1490,6 +1496,9 @@ * {@link BNode}s parsed from a single document. * @param defaultFormat * The default {@link RDFFormat} which will be assumed. + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param parserOptions * Options for the {@link RDFParser}. * @param deleteAfter @@ -1529,6 +1538,7 @@ final int valuesInitialCapacity,// final int bnodesInitialCapacity, // final RDFFormat defaultFormat,// + final String defaultGraph,// final RDFParserOptions parserOptions,// final boolean deleteAfter,// final int parserPoolSize,// @@ -1566,6 +1576,8 @@ this.defaultFormat = defaultFormat; + this.defaultGraph = defaultGraph; + this.parserOptions = parserOptions; this.deleteAfter = deleteAfter; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/BasicRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -37,6 +37,8 @@ import org.openrdf.rio.RDFParser; import org.openrdf.rio.Rio; +import com.bigdata.rdf.model.BigdataURI; + /** * Parses data but does not load it into the indices. * @@ -74,6 +76,8 @@ private final ValueFactory valueFactory; + protected String defaultGraph; + public BasicRioLoader(final ValueFactory valueFactory) { if (valueFactory == null) @@ -153,18 +157,20 @@ } final public void loadRdf(final InputStream is, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(is, baseURI, rdfFormat, options); + loadRdf2(is, baseURI, rdfFormat, defaultGraph, options); } final public void loadRdf(final Reader reader, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, + final RDFParserOptions options) throws Exception { - loadRdf2(reader, baseURI, rdfFormat, options); + loadRdf2(reader, baseURI, rdfFormat, defaultGraph, options); } @@ -180,7 +186,7 @@ * @throws Exception */ protected void loadRdf2(final Object source, final String baseURI, - final RDFFormat rdfFormat, final RDFParserOptions options) + final RDFFormat rdfFormat, final String defaultGraph, final RDFParserOptions options) throws Exception { if (source == null) @@ -198,6 +204,8 @@ if (log.isInfoEnabled()) log.info("format=" + rdfFormat + ", options=" + options); + this.defaultGraph = defaultGraph ; + final RDFParser parser = getParser(rdfFormat); // apply options to the parser @@ -212,7 +220,7 @@ // Note: reset so that rates are correct for each source loaded. stmtsAdded = 0; - + try { before(); Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/IRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -72,12 +72,14 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(Reader reader, String baseURL, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; /** * Parse RDF data. @@ -88,11 +90,13 @@ * The base URL for those data. * @param rdfFormat * The interchange format. + * @param defaultGraph + * The default graph. * @param options * Options to be applied to the {@link RDFParser}. * @throws Exception */ public void loadRdf(InputStream is, String baseURI, RDFFormat rdfFormat, - RDFParserOptions options) throws Exception; + String defaultGraph, RDFParserOptions options) throws Exception; } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/rio/PresortRioLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -23,11 +23,14 @@ */ package com.bigdata.rdf.rio; +import org.openrdf.model.Resource; import org.openrdf.model.Statement; import org.openrdf.model.Value; import org.openrdf.rio.RDFHandler; import org.openrdf.rio.RDFHandlerException; +import com.bigdata.rdf.model.BigdataURI; + /** * Statement handler for the RIO RDF Parser that writes on a * {@link StatementBuffer}. @@ -45,6 +48,12 @@ final protected IStatementBuffer<?> buffer; /** + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. + */ + private BigdataURI defaultGraphURI = null ; + + /** * Sets up parser to load RDF. * * @param buffer @@ -58,7 +67,7 @@ this.buffer = buffer; } - + /** * bulk insert the buffered data into the store. */ @@ -87,8 +96,11 @@ public RDFHandler newRDFHandler() { + defaultGraphURI = null != defaultGraph && 4 == buffer.getDatabase ().getSPOKeyArity () + ? buffer.getDatabase ().getValueFactory ().createURI ( defaultGraph ) + : null + ; return this; - } public void handleStatement( final Statement stmt ) { @@ -98,9 +110,13 @@ log.debug(stmt); } - + + Resource graph = stmt.getContext() ; + if ( null == graph + && null != defaultGraphURI ) // only true when we know we are loading a quad store + graph = defaultGraphURI ; // buffer the write (handles overflow). - buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext() ); + buffer.add( stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), graph ); stmtsAdded++; Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -640,7 +640,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, reader, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, reader, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -668,7 +668,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/* endOfBatch */); + loadData3(totals, is, baseURL, rdfFormat, null, true/* endOfBatch */); return totals; @@ -704,7 +704,7 @@ final LoadStats totals = new LoadStats(); - loadData3(totals, is, baseURL, rdfFormat, true/*endOfBatch*/); + loadData3(totals, is, baseURL, rdfFormat, null, true/*endOfBatch*/); return totals; @@ -762,7 +762,7 @@ if(file.exists()) { loadFiles(totals, 0/* depth */, file, baseURL, - rdfFormat, filter, endOfBatch); + rdfFormat, null, filter, endOfBatch); return; @@ -789,7 +789,7 @@ try { - loadData3(totals, reader, baseURL, rdfFormat, endOfBatch); + loadData3(totals, reader, baseURL, rdfFormat, null, endOfBatch); } catch (Exception ex) { @@ -817,6 +817,9 @@ * The format of the file (optional, when not specified the * format is deduced for each file in turn using the * {@link RDFFormat} static methods). + * @param defaultGraph + * The value that will be used for the graph/context co-ordinate when + * loading data represented in a triple format into a quad store. * @param filter * A filter selecting the file names that will be loaded * (optional). When specified, the filter MUST accept directories @@ -827,7 +830,8 @@ * @throws IOException */ public LoadStats loadFiles(final File file, final String baseURI, - final RDFFormat rdfFormat, final FilenameFilter filter) + final RDFFormat rdfFormat, final String defaultGraph, + final FilenameFilter filter) throws IOException { if (file == null) @@ -835,7 +839,7 @@ final LoadStats totals = new LoadStats(); - loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, filter, true/* endOfBatch */ + loadFiles(totals, 0/* depth */, file, baseURI, rdfFormat, defaultGraph, filter, true/* endOfBatch */ ); return totals; @@ -844,7 +848,8 @@ protected void loadFiles(final LoadStats totals, final int depth, final File file, final String baseURI, final RDFFormat rdfFormat, - final FilenameFilter filter, final boolean endOfBatch) + final String defaultGraph, final FilenameFilter filter, + final boolean endOfBatch) throws IOException { if (file.isDirectory()) { @@ -864,7 +869,7 @@ // final RDFFormat fmt = RDFFormat.forFileName(f.toString(), // rdfFormat); - loadFiles(totals, depth + 1, f, baseURI, rdfFormat, filter, + loadFiles(totals, depth + 1, f, baseURI, rdfFormat, defaultGraph, filter, (depth == 0 && i < files.length ? false : endOfBatch)); } @@ -919,7 +924,7 @@ final String s = baseURI != null ? baseURI : file.toURI() .toString(); - loadData3(totals, reader, s, fmt, endOfBatch); + loadData3(totals, reader, s, fmt, defaultGraph, endOfBatch); return; @@ -955,7 +960,7 @@ */ protected void loadData3(final LoadStats totals, final Object source, final String baseURL, final RDFFormat rdfFormat, - final boolean endOfBatch) throws IOException { + final String defaultGraph, final boolean endOfBatch) throws IOException { final long begin = System.currentTimeMillis(); @@ -978,11 +983,10 @@ } // Setup the loader. - final PresortRioLoader loader = new PresortRioLoader(buffer); + final PresortRioLoader loader = new PresortRioLoader ( buffer ) ; // @todo review: disable auto-flush - caller will handle flush of the buffer. // loader.setFlush(false); - // add listener to log progress. loader.addRioLoaderListener( new RioLoaderListener() { @@ -1006,12 +1010,12 @@ if(source instanceof Reader) { - loader.loadRdf((Reader) source, baseURL, rdfFormat, parserOptions); + loader.loadRdf((Reader) source, baseURL, rdfFormat, defaultGraph, parserOptions); } else if (source instanceof InputStream) { loader.loadRdf((InputStream) source, baseURL, rdfFormat, - parserOptions); + defaultGraph, parserOptions); } else throw new AssertionError(); @@ -1360,7 +1364,7 @@ // rdfFormat, filter); dataLoader.loadFiles(totals, 0/* depth */, fileOrDir, baseURI, - rdfFormat, filter, true/* endOfBatch */ + rdfFormat, null, filter, true/* endOfBatch */ ); } Modified: trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java =================================================================== --- trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/java/com/bigdata/rdf/util/Splitter.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -714,7 +714,7 @@ try { // run the parser. new MyLoader(buffer).loadRdf(reader, baseURL, - defaultRDFFormat, s.parserOptions); + defaultRDFFormat, null, s.parserOptions); } finally { reader.close(); } Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/AbstractRIOTestCase.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -401,7 +401,7 @@ }); - loader.loadRdf((Reader) reader, baseURI, rdfFormat, options); + loader.loadRdf((Reader) reader, baseURI, rdfFormat, null, options); if (log.isInfoEnabled()) log.info("Done: " + resource); @@ -681,7 +681,7 @@ loader.loadRdf(new BufferedReader(new InputStreamReader( new FileInputStream(resource))), baseURI, rdfFormat, - options); + null, options); if(log.isInfoEnabled()) log.info("End of reparse: nerrors=" + nerrs + ", file=" Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/EDSAsyncLoader.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -161,6 +161,7 @@ valuesInitialCapacity,// bnodesInitialCapacity,// RDFFormat.RDFXML, // defaultFormat + null, // defaultGraph parserOptions, // parserOptions false, // deleteAfter poolSize, // parserPoolSize, Modified: trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java =================================================================== --- trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAsynchronousStatementBufferFactory.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -400,6 +400,7 @@ valuesInitialCapacity,// bnodesInitialCapacity,// RDFFormat.RDFXML, // defaultFormat + null, // defaultGraph parserOptions, // false, // deleteAfter parallel?5:1, // parserPoolSize, Modified: trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java =================================================================== --- trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-14 10:57:21 UTC (rev 3541) +++ trunk/bigdata-sails/src/test/com/bigdata/rdf/stress/LoadClosureAndQueryTest.java 2010-09-14 13:50:31 UTC (rev 3542) @@ -1204,7 +1204,7 @@ try { dataLoader.loadFiles(dataDir, null/* baseURI */, - null/* rdfFormat */, filter); + null/* rdfFormat */, null, /* defaultGraph */filter); } catch (IOException ex) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 10:57:28
|
Revision: 3541 http://bigdata.svn.sourceforge.net/bigdata/?rev=3541&view=rev Author: thompsonbry Date: 2010-09-14 10:57:21 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Reorganized to synchronize on the buffer before testing remaining() since that depends on position(). This addresses the following stack trace for the RWStore. Caused by: java.nio.BufferOverflowException > [java] at java.nio.Buffer.nextPutIndex(Buffer.java:501) > [java] at java.nio.DirectByteBuffer.putLong(DirectByteBuffer.java:736) > [java] at com.bigdata.io.writecache.WriteCache.clearAddrMap(WriteCache.java:1761) > [java] at com.bigdata.io.writecache.WriteCacheService.clearWrite(WriteCacheService.java:1966) > [java] at com.bigdata.rwstore.RWStore.immediateFree(RWStore.java:1154) > [java] at com.bigdata.rwstore.RWStore.free(RWStore.java:1126) > [java] at com.bigdata.journal.RWStrategy.delete(RWStrategy.java:321) > [java] at com.bigdata.journal.RWStrategy.delete(RWStrategy.java:309) > [java] at com.bigdata.journal.AbstractJournal.delete(AbstractJournal.java:2625) > [java] at com.bigdata.btree.Node.replaceChildRef(Node.java:870) > [java] at com.bigdata.btree.AbstractNode.copyOnWrite(AbstractNode.java:546) > [java] at com.bigdata.btree.AbstractNode.copyOnWrite(AbstractNode.java:417) > [java] at com.bigdata.btree.Leaf.insert(Leaf.java:490) > [java] at com.bigdata.btree.Node.insert(Node.java:913) > [java] at com.bigdata.btree.Node.insert(Node.java:913) > [java] at com.bigdata.btree.Node.insert(Node.java:913) > [java] at com.bigdata.btree.AbstractBTree.insert(AbstractBTree.java:2046) > [java] at com.bigdata.btree.AbstractBTree.insert(AbstractBTree.java:1990) > [java] at com.bigdata.rdf.spo.SPOIndexWriteProc.apply(SPOIndexWriteProc.java:247) > [java] at com.bigdata.btree.UnisolatedReadWriteIndex.submit(UnisolatedReadWriteIndex.java:796) > [java] at com.bigdata.rdf.spo.SPOIndexWriter.call(SPOIndexWriter.java:332) > [java] at com.bigdata.rdf.spo.SPOIndexWriter.call(SPOIndexWriter.java:69) > [java] at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) > [java] at java.util.concurrent.FutureTask.run(FutureTask.java:138) > [java] at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) > [java] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) > [java] at java.lang.Thread.run(Thread.java:619) Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-13 21:00:14 UTC (rev 3540) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-14 10:57:21 UTC (rev 3541) @@ -1749,26 +1749,27 @@ // } final ByteBuffer tmp = acquire(); try { - if (tmp.remaining() >= 12) { - /* - * Note: We must synchronize before having a side effect on - * position. Also see write(...) which is synchronized on - * the buffer during critical sections which have a side - * effect on the buffer position. - */ - synchronized (tmp) { - final int spos = tmp.position(); - tmp.putLong(addr); - tmp.putInt(0); - if (checker != null) { - // update the checksum (no side-effects on [data]) - final ByteBuffer chkBuf = tmp.asReadOnlyBuffer(); - chkBuf.position(spos); - chkBuf.limit(tmp.position()); - checker.update(chkBuf); - } - } // synchronized(tmp) - } + /* + * Note: We must synchronize before having a side effect on + * position (which includes depending on remaining()). Also see + * write(...) which is synchronized on the buffer during + * critical sections which have a side effect on the buffer + * position. + */ + synchronized (tmp) { + if (tmp.remaining() >= 12) { + final int spos = tmp.position(); + tmp.putLong(addr); + tmp.putInt(0); + if (checker != null) { + // update the checksum (no side-effects on [data]) + final ByteBuffer chkBuf = tmp.asReadOnlyBuffer(); + chkBuf.position(spos); + chkBuf.limit(tmp.position()); + checker.update(chkBuf); + } + } + } // synchronized(tmp) } finally { release(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-13 21:00:21
|
Revision: 3540 http://bigdata.svn.sourceforge.net/bigdata/?rev=3540&view=rev Author: sgossard Date: 2010-09-13 21:00:14 +0000 (Mon, 13 Sep 2010) Log Message: ----------- [merge dev-btm --> maven_scaleout] : Making certain merge of ^/branches/dev-btm reflects proper mergeinfo for revisions 3468:3470. Property Changed: ---------------- branches/maven_scaleout/ branches/maven_scaleout/bigdata-core/src/main/deploy/bin/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config branches/maven_scaleout/bigdata-core/src/test/java/ Property changes on: branches/maven_scaleout ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463,3469-3470 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/bin ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463,3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/resources/config/bigdataStandaloneTesting.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/config/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/config/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/jini/start/testfed.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2594-3237 /branches/dev-btm/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3463 /branches/fko/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3150-3194 /trunk/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/test/java ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/java:2594-3237 /branches/dev-btm/bigdata/src/test:3463 /branches/dev-btm/bigdata-core/src/test/java:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/test:3463 /branches/dev-btm/bigdata-rdf/src/test:3463 /branches/dev-btm/bigdata-sails/src/test:3463 /branches/fko/bigdata-core/src/test/java:3150-3194 /trunk/bigdata-core/src/test/java:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/test/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/test/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/test/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/test/java:2594-3237 /branches/dev-btm/bigdata/src/test:3463 /branches/dev-btm/bigdata-core/src/test/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/test:3463 /branches/dev-btm/bigdata-rdf/src/test:3463 /branches/dev-btm/bigdata-sails/src/test:3463 /branches/fko/bigdata-core/src/test/java:3150-3194 /trunk/bigdata-core/src/test/java:3379-3430,3432-3460 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-13 20:28:14
|
Revision: 3539 http://bigdata.svn.sourceforge.net/bigdata/?rev=3539&view=rev Author: sgossard Date: 2010-09-13 20:28:07 +0000 (Mon, 13 Sep 2010) Log Message: ----------- [merge dev-btm --> maven_scaleout] : Merge of ^/branches/dev-btm , revision 3468:3470. Small patches related to merge of revision 3463. Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=3468&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=3463&view=rev Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/boot-processes.xml branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java Property Changed: ---------------- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/ branches/maven_scaleout/bigdata-core/src/main/java/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/attr/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/ Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/boot:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/boot/config:3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/boot:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3438 /trunk/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/boot-processes.xml =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/boot-processes.xml 2010-09-13 19:57:08 UTC (rev 3538) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot/boot-processes.xml 2010-09-13 20:28:07 UTC (rev 3539) @@ -77,7 +77,7 @@ <javaprop name="java.util.logging.config.file" value="${bigdata.configDir}/logging/logging.properties"/> - <property name="java.classpath" value="${bootLauncherClasspath}"/> + <property name="java.classpath" value="${bootLauncherClasspath}${:}lib/fastutil.jar${:}lib/dsiutils.jar${:}lib/cweb-extser.jar"/> <property name="java.app.mainclass" value="com.bigdata.boot.starter.SingleNonActivatableServiceStarter"/> <arg value="${bigdata.configDir}/policy/service.policy"/> @@ -95,7 +95,7 @@ <javaprop name="java.util.logging.config.file" value="${bigdata.configDir}/logging/logging.properties"/> - <property name="java.classpath" value="${bootLauncherClasspath}"/> + <property name="java.classpath" value="${bootLauncherClasspath}${:}lib/fastutil.jar${:}lib/dsiutils.jar${:}lib/ctc_utils.jar"/> <property name="java.app.mainclass" value="com.bigdata.boot.starter.SingleNonActivatableServiceStarter"/> <arg value="${bigdata.configDir}/policy/service.policy"/> Property changes on: branches/maven_scaleout/bigdata-core/src/main/java ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java:3463 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java:3463,3469-3470 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:3463 /trunk/bigdata-jini/src/java/com/bigdata/attr:3379-3430 + /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:3463,3469-3470 /trunk/bigdata-jini/src/java/com/bigdata/attr:3379-3430 Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:3463 /trunk/bigdata-jini/src/java/com/bigdata/disco:3379-3430 + /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:3463,3469-3470 /trunk/bigdata-jini/src/java/com/bigdata/disco:3379-3430 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java 2010-09-13 19:57:08 UTC (rev 3538) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java 2010-09-13 20:28:07 UTC (rev 3539) @@ -108,7 +108,7 @@ if ( (MetadataServer.class.getName()).equals(className) ) { System.out.println("*** MetadataConfiguration.MetadataServiceStarter: getDataDir [MetadataServer.Options.DATA_DIR="+MetadataServer.Options.DATA_DIR+", serviceDir="+serviceDir.toString()+"] ***"); return new NV(MetadataServer.Options.DATA_DIR, serviceDir.toString()); - } else if ( (com.bigdata.loadbalancer.ServiceImpl.class.getName()).equals(className) ) { + } else if ( (com.bigdata.metadata.ServiceImpl.class.getName()).equals(className) ) { System.out.println("*** MetadataConfiguration.MetadataServiceStarter: getDataDir [EmbeddedShardLocator.Options.DATA_DIR="+EmbeddedShardLocator.Options.DATA_DIR+", serviceDir="+serviceDir.toString()+"] ***"); return new NV(EmbeddedShardLocator.Options.DATA_DIR, serviceDir.toString()); } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-13 19:57:14
|
Revision: 3538 http://bigdata.svn.sourceforge.net/bigdata/?rev=3538&view=rev Author: blevine218 Date: 2010-09-13 19:57:08 +0000 (Mon, 13 Sep 2010) Log Message: ----------- Really enable TestBigdataClientRemote this time! Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-13 19:56:24 UTC (rev 3537) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-13 19:57:08 UTC (rev 3538) @@ -105,9 +105,7 @@ <include>**/Test*.java</include> <include>**/*Test.java</include> </includes> - <excludes> - <exclude>**/TestBigdataClientremove.java</exclude> - </excludes> + <systemPropertyVariables> <java.security.policy>${java.security.policy}</java.security.policy> <java.net.preferIPv4Stack>{java.net.preferIPv4Stack}"</java.net.preferIPv4Stack> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-13 19:56:31
|
Revision: 3537 http://bigdata.svn.sourceforge.net/bigdata/?rev=3537&view=rev Author: blevine218 Date: 2010-09-13 19:56:24 +0000 (Mon, 13 Sep 2010) Log Message: ----------- Remove dependencies on cweb-junit Move to JUnit 4 fix-up deploy directories in POM Enable TestBigdataClientRemote Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java Property Changed: ---------------- branches/maven_scaleout/bigdata-integ/ Property changes on: branches/maven_scaleout/bigdata-integ ___________________________________________________________________ Modified: svn:ignore - target + target .classpath .project .settings Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-13 15:17:50 UTC (rev 3536) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-13 19:56:24 UTC (rev 3537) @@ -43,10 +43,10 @@ <!-- In the ANT script, hostname is obtained by an exec of the - 'hostname' command. Hard-coding to localhost for now. + 'hostname' command. Hard-coding for now. --> - <hostname>blevine-desktop</hostname> - <test.codebase>http://${hostname}:${test.codebase.port}/jsk-dl.jar</test.codebase> + <hostname>blevine-laptop</hostname> + <test.codebase>http://${hostname}:${test.codebase.port}/jsk-dl.jar</test.codebase> <!-- Not used??? --> <federation.name>bigdata.test.group-${hostname}</federation.name> </properties> @@ -69,7 +69,7 @@ <artifactId>bigdata-core</artifactId> <classifier>deploy</classifier> <type>tar.gz</type> - <outputDirectory>${deploy.directory}</outputDirectory> + <outputDirectory>${deploy.root.dir}</outputDirectory> </artifactItem> </artifactItems> <useSubdirPerArtifact>true</useSubdirPerArtifact> @@ -106,10 +106,9 @@ <include>**/*Test.java</include> </includes> <excludes> - <exclude>**/TestBigdataClientRemote.java</exclude> + <exclude>**/TestBigdataClientremove.java</exclude> </excludes> <systemPropertyVariables> - <foo.bar.prop>hello</foo.bar.prop> <java.security.policy>${java.security.policy}</java.security.policy> <java.net.preferIPv4Stack>{java.net.preferIPv4Stack}"</java.net.preferIPv4Stack> <log4j.configuration>${log4j.configuration}</log4j.configuration> @@ -217,14 +216,6 @@ <version>4.7</version> <scope>test</scope> </dependency> - - <dependency> - <groupId>com.bigdata.thirdparty</groupId> - <artifactId>cweb-junit-ext</artifactId> - <version>1.1.0-b3-dev</version> - <scope>test</scope> - </dependency> - </dependencies> </project> Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-13 15:17:50 UTC (rev 3536) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-13 19:56:24 UTC (rev 3537) @@ -20,7 +20,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ /* * Created on Apr 22, 2007 */ @@ -28,10 +28,10 @@ package com.bigdata.service.jini; import java.io.IOException; -import java.net.InetAddress; +import junit.framework.Assert; import junit.framework.AssertionFailedError; -import junit.framework.TestCase2; + import net.jini.core.discovery.LookupLocator; import net.jini.core.lookup.ServiceID; import net.jini.core.lookup.ServiceRegistrar; @@ -45,7 +45,6 @@ import com.bigdata.service.IDataService; import com.bigdata.service.MetadataService; import com.sun.jini.tool.ClassServer; -import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.NicUtil; /** @@ -84,6 +83,7 @@ * </pre> * * You can enable NIO using: + * * <pre> * -Dcom.sun.jini.jeri.tcp.useNIO=true * </pre> @@ -91,210 +91,22 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public abstract class AbstractServerTestCase extends TestCase2 { +public abstract class AbstractServerTestCase +{ /** * Equal to {@link ITx#UNISOLATED}. */ - protected final long UNISOLATED = ITx.UNISOLATED; - + protected final long UNISOLATED = ITx.UNISOLATED; + /** * */ - public AbstractServerTestCase() { + public AbstractServerTestCase() + { } /** - * @param arg0 - */ - public AbstractServerTestCase(String arg0) { - super(arg0); - } - -// /** -// * Return an open port on current machine. Try the suggested port first. If -// * suggestedPort is zero, just select a random port -// */ -// private static int getPort(int suggestedPort) throws IOException { -// ServerSocket openSocket; -// try { -// openSocket = new ServerSocket(suggestedPort); -// } catch (BindException ex) { -// // the port is busy, so look for a random open port -// openSocket = new ServerSocket(0); -// } -// -// int port = openSocket.getLocalPort(); -// openSocket.close(); -// -// return port; -// } - -// /** -// * This may be used to verify that a specific port is available. The method -// * will return iff the port is available at the time that this method was -// * called. The method will retry a few times since sometimes it takes a bit -// * for a socket to get released and we are reusing the same socket for the -// * {@link ClassServer} for each test. -// * -// * @param port -// * The port to try. -// * -// * @exception AssertionFailedError -// * if the port is not available. -// */ -// protected static void assertOpenPort(final int port) throws IOException { -// -// ServerSocket openSocket; -// -// int i = 0; -// -// final int maxTries = 3; -// -// while (i < maxTries) { -// -// try { -// -// // try to open a server socket on that port. -// openSocket = new ServerSocket(port); -// -// // close the socket - it is available for the moment. -// openSocket.close(); -// -// return; -// -// } catch (BindException ex) { -// -// if (i++ < maxTries) { -// -// log.warn("Port " + port + " is busy - retrying: " + ex); -// -// try { -// Thread.sleep(100/* ms */); -// } catch (InterruptedException t) { -// /* ignore */ -// } -// -// } else { -// -// fail("Port is busy: " + ex + " - use " + PORT_OPTION -// + " to specify another port?"); -// -// } -// -// } -// -// } -// -// } - -// private ClassServer classServer; -// -// /** -// * The name of the System property that may be used to change the port on which -// * the {@link ClassServer} will be started. -// */ -// public static final String PORT_OPTION = "bigdata.test.port"; -// -// /** -// * The default port on which the {@link ClassServer} will be started. -// * <p> -// * Note: Outlook appears to conflict with 8081. -// */ -// public static final String DEFAULT_PORT = "8082"; -// -// /** -// * Starts a {@link ClassServer} that supports downloadable code for the unit -// * test. The {@link ClassServer} will start on the port named by the System -// * property {@link #PORT_OPTION} and on port {@link #DEFAULT_PORT} if that -// * system property is not set. -// * -// * @throws IOException -// */ -// protected void startClassServer() throws IOException { -// -// // Note: See below. -//// if(true) return; -// -// Logger.getLogger("com.sun.jini.tool.ClassServer").setLevel(Level.ALL); -// -// /* -// * Obtain port from System.getProperties() so that other ports may be -// * used. -// */ -// final int port = Integer.parseInt(System.getProperty(PORT_OPTION,DEFAULT_PORT)); -// -// /* -// * The directories containing the JARs and the compiled classes for the -// * bigdata project. -// */ -// String dirlist = -// "lib"+File.pathSeparatorChar+ -// "lib"+File.separatorChar+"icu"+File.pathSeparatorChar+ -// "lib"+File.separatorChar+"jini"+File.pathSeparatorChar -// /* -// * FIXME This does not seem to be resolving the bigdata classes -// * necessitating that we list that jar explicitly below (and that it -// * be up to date). The problem can be seen in the Jini Service -// * Browser and the console for the Service Browser. In fact, the -// * test suite executes just fine if you do NOT use the ClassServer! -// * -// * I can only get this working right now by placing bigdata-core.jar into -// * the lib directory (or some other directory below the current -// * working directory, but not ant-build since that gives the ant -// * script fits). -// * -// * I still see a ClassNotFound problem in the Jini console complaining -// * that it can not find IDataService, but only when I select the -// * registrar on which the services are running! -// */ -//// + -//// "bin" -// //+File.pathSeparatorChar+ -//// "ant-build" -// ; -// -// assertOpenPort(port); -// -// classServer = new ClassServer( -// port, -// dirlist, -// true, // trees - serve up files inside of JARs, -// true // verbose -// ); -// -// classServer.start(); -// -// } - - public void setUp() throws Exception { - - if (log.isInfoEnabled()) - log.info(getName()); - -// startClassServer(); - - } - - /** - * Stops the {@link ClassServer}. - */ - public void tearDown() throws Exception { - -// if (classServer != null) { -// -// classServer.terminate(); -// -// } - - super.tearDown(); - - if (log.isInfoEnabled()) - log.info(getName()); - - } - - /** * Return the {@link ServiceID} of a server that we started ourselves. The * method waits until the {@link ServiceID} becomes available on * {@link AbstractServer#getServiceID()}. @@ -305,45 +117,40 @@ * @exception InterruptedException * if the thread is interrupted while it is waiting to retry. */ - static public ServiceID getServiceID(final AbstractServer server) - throws AssertionFailedError, InterruptedException { - + static public ServiceID getServiceID(final AbstractServer server) throws AssertionFailedError, InterruptedException + { ServiceID serviceID = null; - for(int i=0; i<10 && serviceID == null; i++) { - + for (int i = 0; i < 10 && serviceID == null; i++) + { /* * Note: This can be null since the serviceID is not assigned * synchronously by the registrar. */ - serviceID = server.getServiceID(); - - if(serviceID == null) { - + + if (serviceID == null) + { /* * We wait a bit and retry until we have it or timeout. */ - Thread.sleep(200); - } - } - - assertNotNull("serviceID",serviceID); - + + Assert.assertNotNull("serviceID", serviceID); + /* * Verify that we have discovered the _correct_ service. This is a * potential problem when starting a stopping services for the test * suite. */ - assertEquals("serviceID", server.getServiceID(), serviceID); + Assert.assertEquals("serviceID", server.getServiceID(), serviceID); return serviceID; - + } - + /** * Lookup a {@link DataService} by its {@link ServiceID} using unicast * discovery on localhost. @@ -360,10 +167,9 @@ * that carries most of the functionality but allows us to make * distinctions easily during discovery). */ - public IDataService lookupDataService(ServiceID serviceID) - throws IOException, ClassNotFoundException, InterruptedException { - - /* + public IDataService lookupDataService(ServiceID serviceID) throws IOException, ClassNotFoundException, InterruptedException + { + /* * Lookup the discover service (unicast on localhost). */ @@ -371,10 +177,9 @@ String hostname = NicUtil.getIpAddress("default.nic", "default", true); // Find the service registrar (unicast protocol). - final int timeout = 4*1000; // seconds. - System.err.println("hostname: "+hostname); - LookupLocator lookupLocator = new LookupLocator("jini://"+hostname); - ServiceRegistrar serviceRegistrar = lookupLocator.getRegistrar( timeout ); + final int timeout = 4 * 1000; // seconds. + LookupLocator lookupLocator = new LookupLocator("jini://" + hostname); + ServiceRegistrar serviceRegistrar = lookupLocator.getRegistrar(timeout); /* * Prepare a template for lookup search. @@ -385,19 +190,20 @@ * by the server. */ ServiceTemplate template = new ServiceTemplate(// - /* - * use this to request the service by its serviceID. - */ - serviceID, - /* - * Use this to filter services by an interface that they expose. - */ -// new Class[] { IDataService.class }, - null, - /* - * use this to filter for services by Entry attributes. - */ - null); + /* + * use this to request the service by its serviceID. + */ + serviceID, + /* + * Use this to filter services by an interface that they + * expose. + */ + // new Class[] { IDataService.class }, + null, + /* + * use this to filter for services by Entry attributes. + */ + null); /* * Lookup a service. This can fail if the service registrar has not @@ -408,52 +214,42 @@ * notification events for the service if it is not found and enter a * wait state). */ - + IDataService service = null; - - for (int i = 0; i < 10 && service == null; i++) { - - service = (IDataService) serviceRegistrar - .lookup(template /* , maxMatches */); - - if (service == null) { - + + for (int i = 0; i < 10 && service == null; i++) + { + service = (IDataService) serviceRegistrar.lookup(template); + + if (service == null) + { System.err.println("Service not found: sleeping..."); - Thread.sleep(200); - } - + } - if (service != null) { - + if (service != null) + { System.err.println("Service found."); - } return service; - } /** - * Compares two representations of the {@link PartitionLocator} - * without the left- and right-separator keys that bound the index - * partition. + * Compares two representations of the {@link PartitionLocator} without the + * left- and right-separator keys that bound the index partition. * * @param expected * @param actual */ - protected void assertEquals(PartitionLocator expected, PartitionLocator actual) { - - assertEquals("partitionId", expected.getPartitionId(), actual - .getPartitionId()); - - assertEquals("dataServiceUUID", expected.getDataServiceUUID(), actual - .getDataServiceUUID()); - + protected void assertEquals(PartitionLocator expected, PartitionLocator actual) + { + Assert.assertEquals("partitionId", expected.getPartitionId(), actual.getPartitionId()); + Assert.assertEquals("dataServiceUUID", expected.getDataServiceUUID(), actual.getDataServiceUUID()); } - + /** * Compares two representations of the {@link LocalPartitionMetadata} for an * index partition including the optional resource descriptions. @@ -461,43 +257,30 @@ * @param expected * @param actual */ - protected void assertEquals(LocalPartitionMetadata expected, - LocalPartitionMetadata actual) { + protected void assertEquals(LocalPartitionMetadata expected, LocalPartitionMetadata actual) + { + Assert.assertEquals("partitionId", expected.getPartitionId(), actual.getPartitionId()); + Assert.assertEquals("leftSeparatorKey", expected.getLeftSeparatorKey(), ((LocalPartitionMetadata) actual).getLeftSeparatorKey()); + Assert.assertEquals("rightSeparatorKey", expected.getRightSeparatorKey(), ((LocalPartitionMetadata) actual).getRightSeparatorKey()); - assertEquals("partitionId",expected.getPartitionId(), actual.getPartitionId()); - - assertEquals("leftSeparatorKey", expected.getLeftSeparatorKey(), - ((LocalPartitionMetadata) actual) - .getLeftSeparatorKey()); - - assertEquals("rightSeparatorKey", expected.getRightSeparatorKey(), - ((LocalPartitionMetadata) actual) - .getRightSeparatorKey()); - final IResourceMetadata[] expectedResources = expected.getResources(); - final IResourceMetadata[] actualResources = actual.getResources(); - - assertEquals("#resources",expectedResources.length,actualResources.length); - for(int i=0;i<expected.getResources().length; i++) { - + Assert.assertEquals("#resources", expectedResources.length, actualResources.length); + + for (int i = 0; i < expected.getResources().length; i++) + { // verify by components so that it is obvious what is wrong. - - assertEquals("filename[" + i + "]", expectedResources[i].getFile(), - actualResources[i].getFile()); -// assertEquals("size[" + i + "]", expectedResources[i].size(), -// actualResources[i].size()); + Assert.assertEquals("filename[" + i + "]", expectedResources[i].getFile(), actualResources[i].getFile()); - assertEquals("UUID[" + i + "]", expectedResources[i].getUUID(), - actualResources[i].getUUID()); + // assertEquals("size[" + i + "]", expectedResources[i].size(), + // actualResources[i].size()); + Assert.assertEquals("UUID[" + i + "]", expectedResources[i].getUUID(), actualResources[i].getUUID()); + // verify by equals. - assertTrue("resourceMetadata",expectedResources[i].equals(actualResources[i])); - + Assert.assertTrue("resourceMetadata", expectedResources[i].equals(actualResources[i])); } - } - } Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-13 15:17:50 UTC (rev 3536) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-13 19:56:24 UTC (rev 3537) @@ -31,6 +31,12 @@ import java.util.Random; import java.util.UUID; +import junit.framework.Assert; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + import com.bigdata.btree.IIndex; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; @@ -65,27 +71,18 @@ this.serviceImplRemote = false; } - public TestBigdataClient(String name) { - super(name); - this.serviceImplRemote = false; - } - public TestBigdataClient(boolean serviceImplRemote) { this.serviceImplRemote = serviceImplRemote; } - public TestBigdataClient(String name, boolean serviceImplRemote) { - super(name); - this.serviceImplRemote = serviceImplRemote; - } /** * Starts a {@link DataServer} ({@link #dataServer1}) and then a * {@link MetadataServer} ({@link #metadataServer0}). Each runs in its own * thread. */ + @Before public void setUp() throws Exception { - super.setUp(); helper = new JiniServicesHelper(serviceImplRemote); helper.start(); } @@ -95,12 +92,11 @@ /** * Destroy the test services. */ + @After public void tearDown() throws Exception { if (helper != null) { helper.destroy(); } - - super.tearDown(); } /** @@ -109,6 +105,7 @@ * * @throws Exception */ + @Test public void test_registerIndex1() throws Exception { final IBigdataFederation<?> fed = helper.client.connect(); final String name = "testIndex"; @@ -118,8 +115,7 @@ fed.registerIndex(metadata); final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); - assertEquals("indexUUID", metadata.getIndexUUID(), ndx - .getIndexMetadata().getIndexUUID()); + Assert.assertEquals("indexUUID", metadata.getIndexUUID(), ndx.getIndexMetadata().getIndexUUID()); doBasicIndexTests(ndx); } @@ -130,6 +126,7 @@ * * @throws Exception */ + @Test public void test_registerIndex2() throws Exception { final IBigdataFederation<?> fed = helper.client.connect(); final String name = "testIndex"; @@ -151,16 +148,13 @@ final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); - assertEquals("indexUUID", indexUUID, ndx.getIndexMetadata() - .getIndexUUID()); + Assert.assertEquals("indexUUID", indexUUID, ndx.getIndexMetadata().getIndexUUID()); // verify partition 0 on dataService0 - assertNotNull(helper.getDataService0().getIndexMetadata( - DataService.getIndexPartitionName(name, 0), ITx.UNISOLATED)); + Assert.assertNotNull(helper.getDataService0().getIndexMetadata(DataService.getIndexPartitionName(name, 0), ITx.UNISOLATED)); // verify partition 1 on dataService1 - assertNotNull(helper.getDataService1().getIndexMetadata( - DataService.getIndexPartitionName(name, 1), ITx.UNISOLATED)); + Assert.assertNotNull(helper.getDataService1().getIndexMetadata(DataService.getIndexPartitionName(name, 1), ITx.UNISOLATED)); doBasicIndexTests(ndx); } @@ -192,7 +186,7 @@ ndx.submit(0/* fromIndex */, limit/* toIndex */, keys, vals, BatchInsertConstructor.RETURN_NO_VALUES, null); // verify #of index entries. - assertEquals(limit, ndx.rangeCount(null, null)); + Assert.assertEquals(limit, ndx.rangeCount(null, null)); // verify data. { @@ -203,12 +197,12 @@ while (itr.hasNext()) { final ITuple<?> tuple = itr.next(); - assertEquals(keys[i], tuple.getKey()); - assertEquals(vals[i], tuple.getValue()); + Assert.assertEquals(keys[i], tuple.getKey()); + Assert.assertEquals(vals[i], tuple.getValue()); i++; } - assertEquals(limit, i); + Assert.assertEquals(limit, i); } } } Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-13 15:17:50 UTC (rev 3536) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-13 19:56:24 UTC (rev 3537) @@ -33,8 +33,4 @@ public TestBigdataClientRemote() { super(true); } - - public TestBigdataClientRemote(String name) { - super(name, true); - } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-13 15:17:56
|
Revision: 3536 http://bigdata.svn.sourceforge.net/bigdata/?rev=3536&view=rev Author: thompsonbry Date: 2010-09-13 15:17:50 +0000 (Mon, 13 Sep 2010) Log Message: ----------- Modified log4j.properties to log the write cache buffer flushes Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-13 15:16:56 UTC (rev 3535) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-13 15:17:50 UTC (rev 3536) @@ -30,6 +30,8 @@ #log4j.logger.com.bigdata.samples=INFO #log4j.logger.com.bigdata.rdf.sail.tck=INFO +log4j.logger.com.bigdata.io.writecache.WriteCacheService=INFO + #log4j.logger.com.bigdata.io.DataOutputBuffer=INFO #log4j.logger.com.bigdata.io.FileChannelUtility=INFO #log4j.logger.com.bigdata.util.concurrent=INFO This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-13 15:17:03
|
Revision: 3535 http://bigdata.svn.sourceforge.net/bigdata/?rev=3535&view=rev Author: thompsonbry Date: 2010-09-13 15:16:56 +0000 (Mon, 13 Sep 2010) Log Message: ----------- Modified the WriteCacheService to log cache evictions @ INFO. Modified the bsbm ant script and properties to locate the correct log4j configuration file. Fixed reporting for nclean and perhaps hitRate for the write cache / write cache service Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-13 15:16:56 UTC (rev 3535) @@ -48,6 +48,7 @@ import org.apache.log4j.Logger; import com.bigdata.btree.IndexSegmentBuilder; +import com.bigdata.counters.CAT; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.io.DirectBufferPool; @@ -792,7 +793,7 @@ if ((md = recordMap.get(offset)) == null) { // The record is not in this write cache. - counters.nmiss.incrementAndGet(); + counters.nmiss.increment(); return null; } @@ -843,7 +844,7 @@ } - counters.nhit.incrementAndGet(); + counters.nhit.increment(); if (log.isTraceEnabled()) { log.trace(show(dst, "read bytes")); @@ -1329,12 +1330,12 @@ /** * #of read requests that are satisfied by the write cache. */ - public final AtomicLong nhit = new AtomicLong(); + public final CAT nhit = new CAT(); /** * The #of read requests that are not satisfied by the write cache. */ - public final AtomicLong nmiss = new AtomicLong(); + public final CAT nmiss = new CAT(); /* * write on the cache. Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2010-09-13 15:16:56 UTC (rev 3535) @@ -628,10 +628,23 @@ try { cleanList.add(cache); cleanListNotEmpty.signalAll(); - counters.get().nclean = dirtyList.size(); + counters.get().nclean = cleanList.size(); } finally { cleanListLock.unlock(); } + if(log.isInfoEnabled()) { + final WriteCacheServiceCounters tmp = counters.get(); + final long nhit = tmp.nhit.get(); + final long ntests = nhit + tmp.nmiss.get(); + final double hitRate=(ntests == 0L ? 0d : (double) nhit / ntests); + log.info("WriteCacheService: bufferSize=" + + buffers[0].capacity() + ",nbuffers=" + + tmp.nbuffers + ",nclean=" + tmp.nclean + + ",ndirty=" + tmp.ndirty + ",maxDirty=" + + tmp.maxdirty + ",nflush=" + tmp.nflush + + ",nwrite=" + tmp.nwrite + ",hitRate=" + + hitRate); + } } catch (InterruptedException t) { /* @@ -1394,8 +1407,8 @@ public boolean write(final long offset, final ByteBuffer data, final int chk, final boolean useChecksum) throws InterruptedException, IllegalStateException { - if (log.isInfoEnabled()) { - log.info("offset: " + offset + ", length: " + data.limit() + if (log.isTraceEnabled()) { + log.trace("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + useChecksum); } @@ -1675,8 +1688,8 @@ protected boolean writeLargeRecord(final long offset, final ByteBuffer data, final int chk, final boolean useChecksum) throws InterruptedException, IllegalStateException { - if (log.isInfoEnabled()) { - log.info("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + if (log.isTraceEnabled()) { + log.trace("offset: " + offset + ", length: " + data.limit() + ", chk=" + chk + ", useChecksum=" + useChecksum); } @@ -1905,6 +1918,9 @@ if (cache == null) { // No match. + + counters.get().nmiss.increment(); + return null; } Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/RWStore.properties 2010-09-13 15:16:56 UTC (rev 3535) @@ -13,6 +13,21 @@ com.bigdata.btree.writeRetentionQueue.capacity=4000 com.bigdata.btree.BTree.branchingFactor=128 +# Reduce the branching factor for the lexicon since BSBM uses a lot of long +# literals. Note that you have to edit this override to specify the namespace +# into which the BSBM data will be loaded. +com.bigdata.namespace.BSBM_284826.lex.TERM2ID.com.bigdata.btree.BTree.branchingFactor=32 +com.bigdata.namespace.BSBM_284826.lex.ID2TERM.com.bigdata.btree.BTree.branchingFactor=32 + +# Override the #of write cache buffers. +com.bigdata.journal.AbstractJournal.writeCacheBufferCount=12 + +# Note: You must override the buffer capacity in build.xml on the +# "run-load" target, but this would give you 10M write cache buffers +# if you placed that override there. +# +# -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 + # 200M initial extent. com.bigdata.journal.AbstractJournal.initialExtent=209715200 com.bigdata.journal.AbstractJournal.maximumExtent=209715200 Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.properties 2010-09-13 15:16:56 UTC (rev 3535) @@ -57,9 +57,9 @@ # Laptop #bsbm.baseDir=d:/bigdata-perf-analysis/bsbm/bsbm_${bsbm.pc} # Server -#bsbm.baseDir=/nas/data/bsbm/bsbm_${bsbm.pc} +bsbm.baseDir=/nas/data/bsbm/bsbm_${bsbm.pc} # Windows 2008 Server -bsbm.baseDir=c:/usr/local/data/bsbm/bsbm_${bsbm.pc} +#bsbm.baseDir=c:/usr/local/data/bsbm/bsbm_${bsbm.pc} # Where to put the XML results files. bsbm.resultsDir=${bsbm.baseDir}/.. @@ -71,12 +71,12 @@ bsbm.outputType=nt # Specify ".gz" or ".zip" if pre-generated files have been compressed. -bsbm.compressType= -#bsbm.compressType=".gz" +#bsbm.compressType= +bsbm.compressType=".gz" # Which mode to use for the Journal. (DiskRW or DiskWORM) -#journalMode=RW -journalMode=WORM +journalMode=RW +#journalMode=WORM # The name of the file containing the generated RDF data without the filename extension. bsbm.outputFile=${bsbm.baseDir}/dataset @@ -89,11 +89,11 @@ #bsbm.journalFile=${bsbm.baseDir}/bigdata-bsbm.worm #bsbm.journalFile=${bsbm.baseDir}/bigdata-bsbm.jnl # Note: This is on the large volume. -#bsbm.journalFile=/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl +bsbm.journalFile=/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # Windows 2008 Server: SSD. #bsbm.journalFile=e:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # Windows 2008 Server: SAS. -bsbm.journalFile=f:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl +#bsbm.journalFile=f:/data/bsbm/bsbm_${bsbm.pc}/bigdata-bsbm.${journalMode}.jnl # # Qualification of the system under test. @@ -144,7 +144,7 @@ # Use a specific seed (hot disk cache run with only JVM tuning effects). #bsbm.seed=1273687925860 -bsbm.seed=1273687925861 +bsbm.seed=919191 # # Profiler parameters. @@ -167,7 +167,7 @@ profiler=${profilerAgent} ${profilerAgentOptions} # Configure GC. -gcopts= +#gcopts= #gcopts=-verbose:gc #gcopts=-XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode gcopts=-XX:+UseParallelOldGC @@ -191,4 +191,5 @@ ## -Dcom.bigdata.LRUNexus.percentHeap=.1 # all jvm args for query. -queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties +queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:log4j.properties +# -Dlog4j.debug Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml 2010-09-13 14:53:57 UTC (rev 3534) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/bsbm/build.xml 2010-09-13 15:16:56 UTC (rev 3535) @@ -50,14 +50,14 @@ <exclude name="**/*.java" /> <exclude name="**/package.html" /> </fileset> - <!-- copy log4j configuration file. --> - <fileset dir="${bsbm.dir}/src/resources/logging" /> </copy> <copy toDir="${build.dir}/bin"> <!-- copy benchmark data and queries. --> <fileset dir="${bsbm.dir}/src/resources/bsbm-data" /> <!-- copy the journal configuration file. --> <fileset file="${bsbm.dir}/*.properties" /> + <!-- copy log4j configuration file. --> + <fileset dir="${bsbm.dir}/src/resources/logging" /> </copy> </target> @@ -144,7 +144,10 @@ <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true" dir="${build.dir}/bin"> <arg line="-namespace ${bsbm.namespace} ${bsbm.journalPropertyFile} ${bsbm.outputFile}.${bsbm.outputType}${bsbm.compressType}" /> <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${bsbm.journalFile}" /> + <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${bsbm.journalFile} + -Dcom.bigdata.rdf.store.DataLoader.bufferCapacity=1000000 + -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 + " /> <classpath> <path refid="runtime.classpath" /> </classpath> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-13 14:54:07
|
Revision: 3534 http://bigdata.svn.sourceforge.net/bigdata/?rev=3534&view=rev Author: sgossard Date: 2010-09-13 14:53:57 +0000 (Mon, 13 Sep 2010) Log Message: ----------- [merge dev-btm --> maven_scaleout] : Merge of ^/branches/dev-btm , revision 3463. The merged changes from the dev-btm branch included trunk revisions 3432:3460. Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=3463&view=rev Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/deploy/bin/config/browser.config branches/maven_scaleout/bigdata-core/src/main/deploy/bin/pstart branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/log4j.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/DataFinder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/RingBuffer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/BigdataZooDefs.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/BigdataServiceConfiguration.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/ServicesManagerConfiguration.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/TransactionServerConfiguration.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/process/JiniServiceProcessHelper.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractLocalTransactionManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/BufferMode.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/IConcurrencyManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ILocalTransactionManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ITransactionService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/JournalTransactionService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TransactionService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/mdi/PartitionLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/AdminProxy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/Constants.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/PrivateInterface.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceProxy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/process/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/sail/BigdataSail.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/spo/SPO.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/AsynchronousOverflowTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/CompactingMergeTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/ResourceManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/StoreManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractIndexCache.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractScaleOutFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractTransactionService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/CommitTimeIndex.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DataService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DefaultClientDelegate.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DefaultServiceFederationDelegate.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DistributedTransactionService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/EmbeddedFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IBigdataFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IEventReceivingService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IFederationDelegate.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IMetadataService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IServiceShutdown.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IndexCache.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ListIndicesTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/MetadataIndexCache.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/NoCacheMetadataIndexView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ShardLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/JiniFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/LoadBalancerServer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/MetadataServer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/lookup/LoadBalancerClient.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/lookup/TransactionServiceClient.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/master/ServiceMap.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/DumpFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/JiniServicesHelper.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/ClientIndexView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/RawDataServiceTupleIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/shard/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/AdminProxy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/Constants.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/PrivateInterface.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceProxy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestRingBuffer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/DestroyTransactionService.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/TestServiceStarter.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/config/TestServiceConfiguration.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAll.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestTransactionService.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/rules/TestOptionals.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/store/TestLocalTripleStoreTransactionSemantics.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/MockLocalTransactionManager.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/MockTransactionService.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/AbstractEmbeddedFederationTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/TestEventReceiver.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/TestMove.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/TestRestartSafe.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/ndx/pipeline/TestMasterTask.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/EmbeddedIndexStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/EmbeddedShardLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ShardManagement.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/TxState.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/lookup/ShardLocatorClient.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/EmbeddedTransactionService.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoringRemote.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/TestServiceStarterRemote.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/jini/start/config/TestServiceConfigurationRemote.java Property Changed: ---------------- branches/maven_scaleout/ branches/maven_scaleout/bigdata-core/src/main/deploy/bin/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties branches/maven_scaleout/bigdata-core/src/main/java/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/attr/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/sail/bench/ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/util/ branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config branches/maven_scaleout/bigdata-core/src/test/java/ Property changes on: branches/maven_scaleout ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440 /branches/fko:3150-3194 /trunk:3379-3430 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/bin ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/bin/config/browser.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/bin/config/browser.config 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/bin/config/browser.config 2010-09-13 14:53:57 UTC (rev 3534) @@ -35,6 +35,7 @@ "net.jini.core.constraint.RemoteMethodControl", "net.jini.id.ReferentUuid", "com.bigdata.service.Service", + "com.bigdata.service.ShardManagement", "com.bigdata.service.EventReceivingService" }; } Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/bin/pstart =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/bin/pstart 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/bin/pstart 2010-09-13 14:53:57 UTC (rev 3534) @@ -218,6 +218,12 @@ os.path.join(self.appHome, "lib", "start.jar"), os.path.join(self.appHome, "lib", "log4j.jar"), os.path.join(self.appHome, "lib", "bigdata-core.jar"), + os.path.join(self.appHome, "lib", "fastutil.jar"), + os.path.join(self.appHome, "lib", "dsiutils.jar"), + os.path.join(self.appHome, "lib", "cweb-extser.jar"), + os.path.join(self.appHome, "lib", "icu4j.jar"), + os.path.join(self.appHome, "lib", "ctc_utils.jar"), + os.path.join(self.appHome, "lib", "lgplutils.jar"), os.path.join(self.appHome, "lib", "zookeeper.jar"), os.path.join(self.appHome, "lib", "jsk-lib.jar") ] Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-13 14:53:57 UTC (rev 3534) @@ -629,12 +629,15 @@ "jini", "org.apache.zookeeper.server.quorum.QuorumPeerMain", - "com.bigdata.service.jini.TransactionServer", - "com.bigdata.service.jini.MetadataServer", +//BTM "com.bigdata.service.jini.TransactionServer", +//BTM "com.bigdata.service.jini.MetadataServer", "com.bigdata.service.jini.DataServer", - "com.bigdata.service.jini.LoadBalancerServer", -//BTM "com.bigdata.loadbalancer.ServiceImpl", - "com.bigdata.service.jini.ClientServer" +//BTM "com.bigdata.service.jini.LoadBalancerServer", + "com.bigdata.service.jini.ClientServer", + +"com.bigdata.transaction.ServiceImpl", +"com.bigdata.metadata.ServiceImpl", +"com.bigdata.loadbalancer.ServiceImpl" }; @@ -1191,6 +1194,52 @@ } //BTM - BEGIN +com.bigdata.transaction.ServiceImpl { + constraints = new IServiceConstraint[] { + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + new HostAllowConstraint(bigdata.txs) + }; + + properties = new NV[] { + new NV(TransactionServer.Options.MIN_RELEASE_AGE, "0"), + }; + + args = new String[]{ + "-Xmx200m", + "-Djava.util.logging.config.file=@NAS@/dist/bigdata/var/config/logging/transaction-logging.properties", + "-Dlog4j.configuration=@NAS@/dist/bigdata/var/config/logging/transaction-logging.properties", + "-Dlog4j.primary.configuration=@NAS@/dist/bigdata/var/config/logging/transaction-logging.properties", + "-DusingServiceConfiguration=true", + "-Dbigdata.logDir=@NAS@/dist/bigdata/var/log", + "-DappHome=@APP_HOME@", + "-Dconfig=@NAS@/dist/bigdata/var/config/jini/transaction.config" + }; +} + +com.bigdata.metadata.ServiceImpl { + constraints = new IServiceConstraint[] { + new JiniRunningConstraint(), + new ZookeeperRunningConstraint(), + new HostAllowConstraint(bigdata.mds) + }; + + properties = new NV[] { + new NV(MetadataServer.Options.OVERFLOW_ENABLED,"false") + }; + + args = new String[]{ + "-Xmx200m", + "-Djava.util.logging.config.file=@NAS@/dist/bigdata/var/config/logging/shardlocator-logging.properties", + "-Dlog4j.configuration=@NAS@/dist/bigdata/var/config/logging/shardlocator-logging.properties", + "-Dlog4j.primary.configuration=@NAS@/dist/bigdata/var/config/logging/shardlocator-logging.properties", + "-DusingServiceConfiguration=true", + "-Dbigdata.logDir=@NAS@/dist/bigdata/var/log", + "-DappHome=@APP_HOME@", + "-Dconfig=@NAS@/dist/bigdata/var/config/jini/shardlocator.config" + }; +} + com.bigdata.loadbalancer.ServiceImpl { constraints = new IServiceConstraint[] { new JiniRunningConstraint(), @@ -1727,34 +1776,17 @@ // When true, a pre-existing job with the same name is deleted first. deleteJob = true; - // ALTERNATIVE 1: Start http servers for the directories containing - // the ontology and the data files: + // Scanner identifies resources to be loaded. + resourceScannerFactory = com.bigdata.service.jini.master.FileSystemScanner.newFactory( + new File("@NAS@/lubm/U10"), // dataDir + //new File("/nas/metrics/lehigh/U10-compressed"), // dataDir + new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. + ); - ontologyDir = new File("/tmp/lubm"); - dataDir = new File("/tmp/lubm/datafiles"); - static dataServer = new com.bigdata.service.jini.master.FileServer( - dataDir, 20, 8702, 20); - static ontologyServer = new com.bigdata.service.jini.master.FileServer( - ontologyDir, 5, 8703, 5); - resourceScannerFactory = - com.bigdata.service.jini.master.FileSystemScannerServer.newFactory( - dataDir, - new com.bigdata.rdf.load.RDFFilenameFilter(), dataServer); - ontology = com.bigdata.service.jini.master.FileServer.getURL( - ontologyServer, "/univ-bench.owl"); + // The ontology to load (file or directory) when the KB is created. + ontology = new File("@install.lubm.config.dir@/univ-bench.owl"); + //ontology = new File("/nas/metrics/lehigh/univ-bench.owl"); - // ALTERNATIVE 2: Supply the data files and ontology from an - // external web server. - -// ontology = new URL("http://stub/lubm/univ-bench.owl"); -// resourceScannerFactory = -// com.bigdata.service.jini.master.URLListScanner.newFactory( -// new URL[] { -// new URL("http://stub/lubm/datafiles/University0_0.owl"), -// new URL("http://stub/lubm/datafiles/University0_1.owl"), -// ... -// }); - // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; @@ -1805,7 +1837,7 @@ forceOverflow = false; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates]. + * that you specify for [servicesTemplates] and [clientsTemplate]. */ awaitServicesTimeout = 10000; @@ -1849,8 +1881,25 @@ }; - // Minimum number of client services for distributed execution. - clientServiceCount = bigdata.clientServiceCount; + /* Template for matching the services to which the clients will be + * distributed for execution. Normally you will specify + * IClientService as the interface to be discovered. While it is + * possible to run tasks on an IDataService or even an + * IMetadataService since they both implement IRemoteExecutor, it + * is generally discouraged unless the tasks require explicit + * access to the local index partitions for their execution. + */ + clientsTemplate = new ServicesTemplate( + bigdata.clientServiceCount, // minMatches + new ServiceTemplate( + null, //serviceID + new Class[]{ + com.bigdata.service.IClientService.class + }, + null // attributes + ), + null // filter + ); /* * RDF distributed data loader options. @@ -1891,7 +1940,7 @@ forceOverflow = true; /* How long the master will wait in milliseconds to discover the services - * that you specify for [servicesTemplates]. + * that you specify for [servicesTemplates] and [clientsTemplate]. */ awaitServicesTimeout = 10000; @@ -1935,8 +1984,25 @@ }; - // Minimum number of client services for distributed execution. - clientServiceCount = bigdata.clientServiceCount; + /* Template for matching the services to which the clients will be + * distributed for execution. Normally you will specify + * IClientService as the interface to be discovered. While it is + * possible to run tasks on an IDataService or even an + * IMetadataService since they both implement IRemoteExecutor, it + * is generally discouraged unless the tasks require explicit + * access to the local index partitions for their execution. + */ + clientsTemplate = new ServicesTemplate( + bigdata.clientServiceCount, // minMatches + new ServiceTemplate( + null, //serviceID + new Class[]{ + com.bigdata.service.IClientService.class + }, + null // attributes + ), + null // filter + ); /* * RDF distributed data loader options. @@ -2047,7 +2113,7 @@ } -com.bigdata.service.jini.util.BroadcastSighup { +com.bigdata.service.jini.BroadcastSighup { pushConfig = false; @@ -2111,8 +2177,24 @@ }; - // Minimum number of client services for distributed execution. - clientServiceCount = bigdata.clientServiceCount; + /* Template for matching the services to which the clients will be + * distributed for execution. Normally you will specify + * IClientService as the interface to be discovered. While it is + * possible to run tasks on an IDataService or even an + * IMetadataService since they both implement IRemoteExecutor, it + * is generally discouraged unless the tasks require explicit + * access to the local index partitions for their execution. + */ + clientsTemplate = new ServicesTemplate( + bigdata.clientServiceCount, // minMatches + new ServiceTemplate( + null, //serviceID + new Class[]{ + com.bigdata.service.IClientService.class + }, + null), // attributes + null // filter + ); /* The initial #of index partitions for the scale-out index * (computed as #partitions per data service). Choose at least Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataStandalone.config 2010-09-13 14:53:57 UTC (rev 3534) @@ -37,6 +37,7 @@ import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.lexicon.LexiconKeyOrder; import com.bigdata.rawstore.Bytes; +import com.bigdata.DataFinder; import java.net.URL; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit.*; @@ -1743,13 +1744,13 @@ // Scanner identifies resources to be loaded. resourceScannerFactory = com.bigdata.service.jini.master.URLListScanner.newFactory( - new File("/nas/data/lubm/U1000").toURI().toURL(), // dataDir + DataFinder.bestURL("/nas/data/lubm/U1000")// dataDir new com.bigdata.rdf.load.RDFFilenameFilter() // optional filename filter. ); // The ontology to load (file or directory) when the KB is created. //ontology = new File("@install.lubm.config.dir@/univ-bench.owl"); - ontology = new File("/nas/data/lubm/univ-bench.owl").toURI().toURL(); + ontology = DataFinder.bestURL("/nas/data/lubm/univ-bench.owl"); // The maximum thread pool size for RDF parser tasks. //parserPoolSize = 5; Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config 2010-09-13 14:53:57 UTC (rev 3534) @@ -16,16 +16,29 @@ com.bigdata.metadata { - private static exportIpAddr = + // for exporting the service + + private static serverExporterIpAddr = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); - private static exportPort = + private static serverExporterPort = Integer.parseInt( System.getProperty("exportPort", "0") ); + private static serverEnableDgc = false; + private static serverKeepAlive = true; + private static serverExporterTcpServerEndpoint = + TcpServerEndpoint.getInstance(serverExporterIpAddr, serverExporterPort); - groupsToJoin = ConfigDeployUtil.getGroupsToDiscover(); - locatorsToJoin = ConfigDeployUtil.getLocatorsToDiscover(); + // for exporting remote futures - private static exporterTcpServerEndpoint = - TcpServerEndpoint.getInstance(exportIpAddr, exportPort); + private static futureExporterIpAddr = + NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.dataNetwork"), false); + private static futureExporterPort = 0; + private static futureEnableDgc = true; + private static futureKeepAlive = false; + private static futureExporterTcpServerEndpoint = + TcpServerEndpoint.getInstance(futureExporterIpAddr, futureExporterPort); + + //shared by both exporters + private static serverILFactory = new BasicILFactory( new BasicMethodConstraints( @@ -38,11 +51,20 @@ serverExporter = new BasicJeriExporter - (TcpServerEndpoint.getInstance(exportIpAddr,exportPort), - serverILFactory, - false, - true); + (serverExporterTcpServerEndpoint, + serverILFactory, + serverEnableDgc, + serverKeepAlive); + futureExporter = + new BasicJeriExporter + (futureExporterTcpServerEndpoint, + futureILFactory, + futureEnableDgc, + futureKeepAlive); + groupsToJoin = ConfigDeployUtil.getGroupsToDiscover(); + locatorsToJoin = ConfigDeployUtil.getLocatorsToDiscover(); + static discoveryManager = new LookupDiscoveryManager(groupsToJoin, locatorsToJoin, null, @@ -55,15 +77,15 @@ } net.jini.discovery.LookupDiscovery { - multicastRequestHost = com.bigdata.metadata.exportIpAddr; + multicastRequestHost = com.bigdata.metadata.serverExporterIpAddr; multicastInterfaces = new NetworkInterface[] { - NicUtil.getNetworkInterface(com.bigdata.metadata.exportIpAddr) + NicUtil.getNetworkInterface(com.bigdata.metadata.serverExporterIpAddr) }; } net.jini.lookup.ServiceDiscoveryManager { eventListenerExporter = new BasicJeriExporter - (com.bigdata.metadata.exporterTcpServerEndpoint, + (com.bigdata.metadata.serverExporterTcpServerEndpoint, com.bigdata.metadata.serverILFactory, false, false); } Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/log4j.properties =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/log4j.properties 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/log4j.properties 2010-09-13 14:53:57 UTC (rev 3534) @@ -210,3 +210,5 @@ #log4j.logger.com.bigdata.boot.BootComponentTest=DEBUG #log4j.logger.com.bigdata.boot.launcher.ConfigReaderUnitTest=DEBUG #log4j.logger.com.bigdata.process.ProcessConfigXmlHandlerTest=DEBUG + +log4j.logger.com.bigdata.transaction=DEBUG Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties 2010-09-13 14:53:57 UTC (rev 3534) @@ -36,3 +36,6 @@ ############################################################ #log4j.logger.com.bigdata.metadata=DEBUG +#log4j.logger.com.bigdata.metadata.EmbeddedShardLocator=DEBUG +#log4j.logger.com.bigdata.metadata.EmbeddedIndexStore=DEBUG + Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties 2010-09-13 14:53:57 UTC (rev 3534) @@ -36,4 +36,5 @@ ############################################################ #log4j.logger.com.bigdata.transaction=DEBUG +#log4j.logger.com.bigdata.transaction.EmbeddedTransactionService=DEBUG Property changes on: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460 Property changes on: branches/maven_scaleout/bigdata-core/src/main/java ___________________________________________________________________ Added: svn:mergeinfo + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463 /branches/dev-btm/bigdata-jini/src/java:3463 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/DataFinder.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/DataFinder.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/DataFinder.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -10,6 +10,7 @@ import org.slf4j.LoggerFactory; import java.io.*; +import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.util.ArrayList; @@ -61,6 +62,10 @@ return DEFAULT_SEARCH.getBestURI(path); } + public static URL bestURL(String path) throws MalformedURLException { + return DEFAULT_SEARCH.getBestURI(path).toURL(); + } + public static DataFinder defaultFinder(){ return DEFAULT_SEARCH; } Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/attr:3379-3430 + /branches/dev-btm/bigdata-jini/src/java/com/bigdata/attr:3463 /trunk/bigdata-jini/src/java/com/bigdata/attr:3379-3430 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/RingBuffer.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/RingBuffer.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/RingBuffer.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -154,7 +154,7 @@ public boolean add(final T ref) throws IllegalStateException { if (ref == null) - throw new IllegalArgumentException(); + throw new NullPointerException(); beforeOffer( ref ); @@ -178,7 +178,7 @@ public boolean offer(final T ref) { if (ref == null) - throw new IllegalArgumentException(); + throw new NullPointerException(); beforeOffer( ref ); @@ -387,12 +387,12 @@ if (index < 0 || index >= size) throw new IllegalArgumentException(); - if (index + 1 == size) { - - // remove the LRU position. - return remove(); - - } +// if (index + 1 == size) { +// +// // remove the LRU position. +// return remove(); +// +// } /* * Otherwise we are removing some non-LRU element. @@ -409,7 +409,7 @@ for (;;) { - int nexti = (i + 1) % capacity; // update index. + final int nexti = (i + 1) % capacity; // update index. if (nexti != head) { @@ -491,10 +491,9 @@ */ final public boolean scanHead(final int nscan, final T ref) { - assert nscan > 0; -// if (nscan <= 0) -// throw new IllegalArgumentException(); -// + if (nscan <= 0) + throw new IllegalArgumentException(); + if (ref == null) throw new IllegalArgumentException(); @@ -581,6 +580,9 @@ public boolean contains(final Object ref) { + if (ref == null) + throw new NullPointerException(); + // MRU to LRU scan. for (int n = 0, i = tail; n < size; n++) { @@ -601,7 +603,8 @@ throw new NullPointerException(); if (c == this) - throw new IllegalArgumentException(); + return true; +// throw new IllegalArgumentException(); for( Object e : c ) { Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-jini/src/java/com/bigdata/disco:3379-3430 + /branches/dev-btm/bigdata-jini/src/java/com/bigdata/disco:3463 /trunk/bigdata-jini/src/java/com/bigdata/disco:3379-3430 Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -300,7 +300,7 @@ private long EXECUTOR_TERMINATION_TIMEOUT = 1L*60L*1000L; public ShutdownThread() { - super("Build Server Request Service Shutdown thread"); + super("callable executor service shutdown thread"); setDaemon(false); } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/BigdataZooDefs.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/BigdataZooDefs.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/BigdataZooDefs.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -28,12 +28,16 @@ * serviceConfigMonitor * com.bigdata.service.jini.DataServer * lock0000000000 (Ephemeral) - * com.bigdata.service.jini.LoadBalancerServer or com.bigdata.loadbalancer.ServiceImpl + * com.bigdata.service.jini.LoadBalancerServer * lock0000000000 (Ephemeral) * com.bigdata.service.jini.MetadataServer * lock0000000000 (Ephemeral) * com.bigdata.service.jini.TransactionServer * lock0000000000 (Ephemeral) + * com.bigdata.loadbalancer.ServiceImpl + * lock0000000000 (Ephemeral) + * com.bigdata.metadata.ServiceImpl + * lock0000000000 (Ephemeral) * createPhysicalService * config * com.bigdata.service.jini.DataServer {DataServiceConfiguration} @@ -43,7 +47,7 @@ * logicalService0000000000 * election * physicalServiceb2bf8b98-da0c-42f5-ac65-027bf3304429 (Ephemeral) {UUID} - * com.bigdata.service.jini.LoadBalancerServer or com.bigdata.loadbalancer.ServiceImpl {LoadBalancerConfiguration} + * com.bigdata.service.jini.LoadBalancerServer * logicalService0000000000 * election * physicalService911a9b28-7396-4932-ab80-77078119e7e2 (Ephemeral) {UUID} @@ -55,15 +59,27 @@ * logicalService0000000000 * election * physicalService87522080-2da6-42be-84a8-4a863b420042 (Ephemeral) {UUID} + * com.bigdata.loadbalancer.ServiceImpl {LoadBalancerConfiguration} + * logicalService0000000000 + * election + * physicalService911a9b28-7396-4932-ab80-77078119e7e3 (Ephemeral) {UUID} + * com.bigdata.metadata.ServiceImpl {MetadataServiceConfiguration} + * logicalService0000000000 + * election + * physicalServicec0f35d2e-0a20-40c4-bb76-c97e7cb72eb4 (Ephemeral) {UUID} * services * com.bigdata.service.jini.TransactionServer * instances (persistent znodes) - * com.bigdata.service.jini.LoadBalancerServer or com.bigdata.loadbalancer.ServiceImpl + * com.bigdata.service.jini.LoadBalancerServer * instances (persistent znodes) * com.bigdata.service.jini.MetadataServer * instances (persistent znodes) * com.bigdata.service.jini.DataServerServer * instances (persistent znodes) + * com.bigdata.loadbalancer.ServiceImpl + * instances (persistent znodes) + * com.bigdata.metadata.ServiceImpl + * instances (persistent znodes) * </pre> * * Each {@link ServiceConfiguration} znode defines the service type, the target Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/BigdataServiceConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/BigdataServiceConfiguration.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/BigdataServiceConfiguration.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -81,6 +81,14 @@ boolean classCastOk = false; if( AbstractServer.class.isAssignableFrom(cls) ) { classCastOk = true; + } else if( com.bigdata.service.jini.MetadataServer.class.isAssignableFrom(cls) ) { + classCastOk = true; + } else if( com.bigdata.metadata.ServiceImpl.class.isAssignableFrom(cls) ) { + classCastOk = true; + } else if( com.bigdata.service.jini.TransactionServer.class.isAssignableFrom(cls) ) { + classCastOk = true; + } else if( com.bigdata.transaction.ServiceImpl.class.isAssignableFrom(cls) ) { + classCastOk = true; } else if( com.bigdata.service.jini.LoadBalancerServer.class.isAssignableFrom(cls) ) { classCastOk = true; } else if( com.bigdata.loadbalancer.ServiceImpl.class.isAssignableFrom(cls) ) { Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/JiniServiceConfiguration.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -34,7 +34,6 @@ import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; -import java.net.InetAddress; import java.util.Arrays; import java.util.Date; import java.util.Enumeration; @@ -132,8 +131,6 @@ public final Properties properties; public final String[] jiniOptions; - private final String serviceIpAddr; - protected void toString(StringBuilder sb) { super.toString(sb); @@ -181,12 +178,6 @@ } else { log.warn("groups = " + Arrays.toString(this.groups)); } - - try { - this.serviceIpAddr = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); - } catch(IOException e) { - throw new ConfigurationException(e.getMessage(), e); - } } /** @@ -262,6 +253,9 @@ "net.jini.jeri.tcp.TcpServerEndpoint", "net.jini.discovery.LookupDiscovery", +//BTM +"net.jini.discovery.LookupDiscoveryManager", + "net.jini.core.discovery.LookupLocator", "net.jini.core.entry.Entry", "net.jini.lookup.entry.Name", @@ -434,7 +428,6 @@ } out.write("}\n"); - out.write("\n\n" + className + " {\n"); writeServiceDescription(out); out.write("}\n"); @@ -483,6 +476,9 @@ final ServiceDir serviceDir = new ServiceDir(this.serviceDir); + String serviceIpAddr = NicUtil.getIpAddress ( "default.nic", "default", false ) ; + if ( null == serviceIpAddr ) + throw new IOException ( "Can't get a host ip address" ) ; final Hostname hostName = new Hostname(serviceIpAddr); final ServiceUUID serviceUUID = new ServiceUUID(this.serviceUUID); @@ -831,6 +827,7 @@ final ServiceID serviceID = JiniUtil .uuid2ServiceID(serviceUUID); +System.out.println("\n**** JiniServiceConfiguration.awaitServiceDiscoveryOrDeath: serviceID = "+serviceID); final ServiceItem[] items = serviceDiscoveryManager.lookup( new ServiceTemplate(// Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/MetadataServerConfiguration.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -37,6 +37,9 @@ import com.bigdata.service.jini.MetadataServer; import com.bigdata.util.NV; +//BTM +import com.bigdata.metadata.EmbeddedShardLocator; + /** * Configuration for the {@link MetadataServer}. * @@ -54,17 +57,27 @@ /** * @param config */ - public MetadataServerConfiguration(Configuration config) - throws ConfigurationException { - - super(MetadataServer.class, config); - +//BTM - BEGIN +//BTM public MetadataServerConfiguration(Configuration config) +//BTM throws ConfigurationException { +//BTM +//BTM super(MetadataServer.class, config); +//BTM +//BTM } + public MetadataServerConfiguration(Class classType, + Configuration config) + throws ConfigurationException + { + super(classType, config); +System.out.println("*** MetadataServerConfiguration: constructor ***"); } +//BTM - END public MetadataServiceStarter newServiceStarter(JiniFederation fed, IServiceListener listener, String zpath, Entry[] attributes) throws Exception { +System.out.println("*** MetadataServerConfiguration ---> newServiceStarter ***"); return new MetadataServiceStarter(fed, listener, zpath, attributes); } @@ -81,14 +94,27 @@ IServiceListener listener, String zpath, Entry[] attributes) { super(fed, listener, zpath, attributes); +System.out.println("*** MetadataServerConfiguration.MetadataServiceStarter: constructor ***"); } @Override protected NV getDataDir() { - return new NV(MetadataServer.Options.DATA_DIR, serviceDir - .toString()); +//BTM return new NV(MetadataServer.Options.DATA_DIR, serviceDir +//BTM .toString()); +//BTM - BEGIN + // className field defined/set in ServiceConfiguration parent + if ( (MetadataServer.class.getName()).equals(className) ) { +System.out.println("*** MetadataConfiguration.MetadataServiceStarter: getDataDir [MetadataServer.Options.DATA_DIR="+MetadataServer.Options.DATA_DIR+", serviceDir="+serviceDir.toString()+"] ***"); + return new NV(MetadataServer.Options.DATA_DIR, serviceDir.toString()); + } else if ( (com.bigdata.loadbalancer.ServiceImpl.class.getName()).equals(className) ) { +System.out.println("*** MetadataConfiguration.MetadataServiceStarter: getDataDir [EmbeddedShardLocator.Options.DATA_DIR="+EmbeddedShardLocator.Options.DATA_DIR+", serviceDir="+serviceDir.toString()+"] ***"); + return new NV(EmbeddedShardLocator.Options.DATA_DIR, serviceDir.toString()); + } else { + return null; + } +//BTM - END } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/ServicesManagerConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/ServicesManagerConfiguration.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/ServicesManagerConfiguration.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -223,45 +223,64 @@ // .getName(), config)); } else if (a.equals(QuorumPeerMain.class.getName())) { + System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: QuorumPeerMain BEGIN"); - v.add(new ZookeeperServerConfiguration(config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: QuorumPeerMain END\n"); -System.out.println("*** ServicesManagerConfiguration.getConfigurations: QuorumPeerMain END\n"); } else if (a.equals(TransactionServer.class.getName())) { + System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: TransactionServer BEGIN"); + v.add(new TransactionServerConfiguration(TransactionServer.class, config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: TransactionServer END\n"); - v.add(new TransactionServerConfiguration(config)); + } else if (a.equals(MetadataServer.class.getName())) { -System.out.println("*** ServicesManagerConfiguration.getConfigurations: TransactionServer END\n"); - } else if (a.equals(MetadataServer.class.getName())) { System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: MetaDataServer BEGIN"); + v.add(new MetadataServerConfiguration(MetadataServer.class, config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: MetaDataServer END\n"); - v.add(new MetadataServerConfiguration(config)); + } else if (a.equals(DataServer.class.getName())) { -System.out.println("*** ServicesManagerConfiguration.getConfigurations: MetaDataServer END\n"); - } else if (a.equals(DataServer.class.getName())) { System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: DataServer BEGIN"); - v.add(new DataServerConfiguration(config)); - System.out.println("*** ServicesManagerConfiguration.getConfigurations: DataServer END\n"); } else if (a.equals(LoadBalancerServer.class.getName())) { + System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: LoadBalancerServer BEGIN"); v.add(new LoadBalancerConfiguration(LoadBalancerServer.class, config)); System.out.println("*** ServicesManagerConfiguration.getConfigurations: LoadBalancerServer END\n"); - } else if (a.equals(com.bigdata.loadbalancer.ServiceImpl.class.getName())) { + + } else if (a.equals(ClientServer.class.getName())) { + +System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: ClientServer BEGIN"); + v.add(new ClientServerConfiguration(config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: ClientServer END\n"); + + +//BTM - BEGIN: smart proxy impls ------------------------------------------------------------ + + } else if (a.equals(com.bigdata.transaction.ServiceImpl.class.getName())) {//transaction service + +System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: com.bigdata.transaction.ServiceImpl BEGIN"); + v.add(new TransactionServerConfiguration(com.bigdata.transaction.ServiceImpl.class, config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: com.bigdata.transaction.ServiceImpl END\n"); + + } else if (a.equals(com.bigdata.metadata.ServiceImpl.class.getName())) {//shard locator (metadata) service + +System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: com.bigdata.metadata.ServiceImpl BEGIN"); + v.add(new MetadataServerConfiguration(com.bigdata.metadata.ServiceImpl.class, config)); +System.out.println("*** ServicesManagerConfiguration.getConfigurations: com.bigdata.metadata.ServiceImpl END\n"); + + } else if (a.equals(com.bigdata.loadbalancer.ServiceImpl.class.getName())) {//load balancer service + System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: com.bigdata.loadbalancer.ServiceImpl BEGIN"); v.add(new LoadBalancerConfiguration(com.bigdata.loadbalancer.ServiceImpl.class, config)); System.out.println("*** ServicesManagerConfiguration.getConfigurations: com.bigdata.loadbalancer.ServiceImpl END\n"); - } else if (a.equals(ClientServer.class.getName())) { -System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: ClientServer BEGIN"); +//BTM - END: smart proxy impls ------------------------------------------------------------ - v.add(new ClientServerConfiguration(config)); - -System.out.println("*** ServicesManagerConfiguration.getConfigurations: ClientServer END\n"); } else { System.out.println("\n*** ServicesManagerConfiguration.getConfigurations: "+a+" BEGIN"); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/TransactionServerConfiguration.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/TransactionServerConfiguration.java 2010-09-12 19:57:56 UTC (rev 3533) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/start/config/TransactionServerConfiguration.java 2010-09-13 14:53:57 UTC (rev 3534) @@ -37,6 +37,9 @@ import com.bigdata.service.jini.TransactionServer; import com.bigdata.util.NV; +//BTM +import com.bigdata.transaction.EmbeddedTransactionService; + /** * Configuration for the {@link TransactionServer}. * @@ -54,17 +57,27 @@ /** * @param config */ - public TransactionServerConfiguration(Configuration config) - throws ConfigurationException { - - super(TransactionServer.class, config); - +//BTM - BEGIN +//BTM public TransactionServerConfiguration(Configuration config) +//BTM throws ConfigurationException { +//BTM +//BTM super(TransactionServer.class, config); +//BTM +//BTM } + public TransactionServerConfiguration(Class classType, + Configuration config) + throws ConfigurationException + { + super(classType, config); +Sy... [truncated message content] |
From: <ble...@us...> - 2010-09-12 19:58:03
|
Revision: 3533 http://bigdata.svn.sourceforge.net/bigdata/?rev=3533&view=rev Author: blevine218 Date: 2010-09-12 19:57:56 +0000 (Sun, 12 Sep 2010) Log Message: ----------- removed duplicate dependency declaration on apache river tools Modified Paths: -------------- branches/maven_scaleout/bigdata-core/pom.xml Modified: branches/maven_scaleout/bigdata-core/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-core/pom.xml 2010-09-11 22:53:40 UTC (rev 3532) +++ branches/maven_scaleout/bigdata-core/pom.xml 2010-09-12 19:57:56 UTC (rev 3533) @@ -281,11 +281,6 @@ </dependency> <dependency> <groupId>org.apache.river</groupId> - <artifactId>tools</artifactId> - <version>2.1</version> - </dependency> - <dependency> - <groupId>org.apache.river</groupId> <artifactId>jsk-lib</artifactId> <version>2.1</version> </dependency> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-11 22:53:49
|
Revision: 3532 http://bigdata.svn.sourceforge.net/bigdata/?rev=3532&view=rev Author: thompsonbry Date: 2010-09-11 22:53:40 +0000 (Sat, 11 Sep 2010) Log Message: ----------- Further reorganization of the federated query engine and its use of buffers. There are now thick (payload including with RMI) and thin (RMI message with payload via NIO over the resource service) messages for moving chunks around during distributed query processing. There are bare bones unit tests for these as well. (The ResourceServer does not actually use NIO yet, but it can be optimized later. Also, we are not yet applying compression suitable for binding sets, but again that can be an optimization.) Now that RMI messages and payload transfers are more or less in place, I am going to work through some unit tests of distributed query evaluation. To do that I still need to reconcile the concept of a "query buffer" where the final solutions are written with the new model for moving data around. I think that the query buffer will no longer be privledged (it used to be a proxy object for a buffer on the client). Instead, scale-out will require an operator at the top of the query plan whose evaluation context is the query controller. The mere presence of an operator which copies its inputs to its outputs whose evaluation context is the query controller is sufficient to do the trick. For practical purposes, this can be a SliceOp, since that already must run in the query controller context. If an offset/limit are not specified, then they can be set to 0L and MAX_LONG on the SliceOp which has the effect of turning it into a NOP (unless you are visiting an unbelievable #of results). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/io/DirectBufferPoolAllocator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/ManagedResourceService.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/Dechunkerator.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/mutation/TestDelete.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/mutation/TestInsert.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -12,7 +12,7 @@ * be consumed by some {@link BOp} in a specific query (this is only used in * query evaluation for the standalone database). */ -public class BindingSetChunk implements IChunkMessage { +public class BindingSetChunk<E> implements IChunkMessage<E> { /** The query controller. */ private final IQueryClient queryController; @@ -35,7 +35,7 @@ /** * The binding sets to be consumed by that {@link BOp}. */ - private IAsynchronousIterator<IBindingSet[]> source; + private IAsynchronousIterator<E[]> source; public IQueryClient getQueryController() { return queryController; @@ -59,7 +59,7 @@ public BindingSetChunk(final IQueryClient queryController, final long queryId, final int bopId, final int partitionId, - final IAsynchronousIterator<IBindingSet[]> source) { + final IAsynchronousIterator<E[]> source) { if (queryController == null) throw new IllegalArgumentException(); @@ -89,8 +89,12 @@ public void materialize(FederatedRunningQuery runningQuery) { // NOP } + + public void release() { + // NOP + } - public IAsynchronousIterator<IBindingSet[]> iterator() { + public IAsynchronousIterator<E[]> iterator() { return source; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -15,8 +15,18 @@ * processing. There are several implementations of this interface supporting * same-JVM messages, thick RMI messages, and RMI messages where the payload is * materialized using NIO transfers from the {@link ResourceService}. + * + * @param <E> + * The generic type of the elements in the chunk (binding sets, + * elements from a relation, etc). + * + * @todo Compressed representations of binding sets with the ability to read + * them in place or materialize them onto the java heap. The + * representation should be amenable to processing in C since we want to + * use them on GPUs as well. See {@link IChunkMessage} and perhaps + * {@link IRaba}. */ -public interface IChunkMessage { +public interface IChunkMessage<E> { /** The proxy for the query controller. */ IQueryClient getQueryController(); @@ -44,6 +54,11 @@ void materialize(FederatedRunningQuery runningQuery); /** + * Discard the materialized data. + */ + void release(); + + /** * Visit the binding sets in the chunk. * * @todo we do not need to use {@link IAsynchronousIterator} any more. This @@ -67,7 +82,10 @@ * source for processing. For selective operators, those chunks can be * combined before we execute the operator. For unselective operators, * we are going to run over all the data anyway. + * + * @throws IllegalStateException + * if the payload is not materialized. */ - IAsynchronousIterator<IBindingSet[]> iterator(); + IAsynchronousIterator<E[]> iterator(); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -5,6 +5,7 @@ import java.util.UUID; import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; import com.bigdata.service.IService; /** @@ -46,6 +47,6 @@ * @throws UnsupportedOperationException * unless running in scale-out. */ - void bufferReady(IChunkMessage msg) throws RemoteException; + void bufferReady(IChunkMessage<IBindingSet> msg) throws RemoteException; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -45,6 +45,7 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; import com.bigdata.bop.bset.Union; +import com.bigdata.bop.fed.FederatedQueryEngine; import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; @@ -411,11 +412,6 @@ /** * The currently executing queries. - * - * @todo DEADLINE: There should be a data structure representing - * {@link RunningQuery} having deadlines so we can - * {@link RunningQuery#cancel(boolean)} queries when their deadline - * expires. */ final ConcurrentHashMap<Long/* queryId */, RunningQuery> runningQueries = new ConcurrentHashMap<Long, RunningQuery>(); @@ -514,54 +510,39 @@ * if the sink has not been taken, e.g., by combining the chunk into * the same target ByteBuffer, or when we add the chunk to the * RunningQuery.] - * - * @todo SCALEOUT: High volume query operators must demand that their inputs - * are materialized before they can begin evaluation. Scaleout - * therefore requires a separate queue which looks at the metadata - * concerning chunks available on remote nodes for an operator which - * will run on this node and then demands the data either when the - * predecessors in the pipeline are done (operator at once evaluation) - * or when sufficient data are available to run the operator (mega - * chunk pipelining). Once the data are locally materialized, the - * operator may be queued for evaluation. */ private class QueryEngineTask implements Runnable { public void run() { - try { - System.err.println("QueryEngine running: "+this); - while (true) { + System.err.println("QueryEngine running: " + this); + while (true) { + try { final RunningQuery q = priorityQueue.take(); final long queryId = q.getQueryId(); if (q.isCancelled()) continue; - final IChunkMessage chunk = q.chunksIn.poll(); - if (chunk == null) { - // not expected, but can't do anything without a chunk. - if (log.isDebugEnabled()) - log.debug("Dropping chunk: queryId=" + queryId); - continue; - } + final IChunkMessage<IBindingSet> chunk = q.chunksIn.poll(); if (log.isTraceEnabled()) log.trace("Accepted chunk: queryId=" + queryId + ", bopId=" + chunk.getBOpId()); + // create task. try { - // create task. final FutureTask<?> ft = q.newChunkTask(chunk); // execute task. localIndexManager.getExecutorService().execute(ft); } catch (RejectedExecutionException ex) { - // shutdown of the pool (should be an unbounded pool). + // shutdown of the pool (should be an unbounded + // pool). log.warn("Dropping chunk: queryId=" + queryId); continue; - } catch (Throwable ex) { - // log and continue - log.error(ex, ex); - continue; } + } catch (InterruptedException e) { + log.warn("Interrupted."); + return; + } catch (Throwable ex) { + // log and continue + log.error(ex, ex); + continue; } - } catch (InterruptedException e) { - log.warn("Interrupted."); - return; } } } // QueryEngineTask @@ -579,7 +560,7 @@ * @throws IllegalStateException * if the chunk is not materialized. */ - void acceptChunk(final IChunkMessage chunk) { + void acceptChunk(final IChunkMessage<IBindingSet> chunk) { if (chunk == null) throw new IllegalArgumentException(); @@ -625,9 +606,25 @@ } + // hook for subclasses. + didShutdown(); + + // stop the query engine. + final Future<?> f = engineFuture.get(); + if (f != null) + f.cancel(true/* mayInterruptIfRunning */); + } /** + * Hook is notified by {@link #shutdown()} when all running queries have + * terminated. + */ + protected void didShutdown() { + + } + + /** * Do not accept new queries and halt any running binding set chunk tasks. */ public void shutdownNow() { @@ -686,7 +683,7 @@ } - public void bufferReady(IChunkMessage msg) { + public void bufferReady(IChunkMessage<IBindingSet> msg) { throw new UnsupportedOperationException(); @@ -844,6 +841,10 @@ * @todo Could return a data structure which encapsulates the query results * and could allow multiple results from a query, e.g., one per step * in a program. + * + * @deprecated This is going away. + * + * @see FederatedQueryEngine#newQueryBuffer(BindingSetPipelineOp) */ protected IBlockingBuffer<IBindingSet[]> newQueryBuffer( final BindingSetPipelineOp query) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -48,13 +48,15 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.BOpContext; +import com.bigdata.bop.BOpEvaluationContext; import com.bigdata.bop.BOpUtility; import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.NoSuchBOpException; +import com.bigdata.bop.bset.CopyBindingSetOp; +import com.bigdata.bop.solutions.SliceOp; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITx; -import com.bigdata.journal.TimestampUtility; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.service.IBigdataFederation; @@ -90,24 +92,15 @@ final private long queryId; // /** -// * The timestamp or transaction identifier against which the query is -// * reading. -// */ -// final private long readTimestamp; -// -// /** -// * The timestamp or transaction identifier against which the query is -// * writing. -// */ -// final private long writeTimestamp; - -// /** // * The timestamp when the query was accepted by this node (ms). // */ // final private long begin; + /** * The query deadline. The value is the system clock time in milliseconds * when the query is due and {@link Long#MAX_VALUE} if there is no deadline. + * In order to have a guarantee of a consistent clock, the deadline is + * interpreted by the query controller. */ final private AtomicLong deadline = new AtomicLong(Long.MAX_VALUE); @@ -132,8 +125,6 @@ */ final private IQueryClient clientProxy; -// /** The query iff materialized on this node. */ -// final private AtomicReference<BOp> queryRef; /** The query. */ final private BOp query; @@ -141,7 +132,12 @@ * The buffer used for the overall output of the query pipeline. * * FIXME SCALEOUT: This should only exist on the query controller. Other - * nodes will send {@link IChunkMessage}s to the query controller. + * nodes will send {@link IChunkMessage}s to the query controller. s/o will + * use an operator with {@link BOpEvaluationContext#CONTROLLER} in order to + * ensure that the results are transferred to the query controller. When a + * {@link SliceOp} is used, this is redundant. The operator in other cases + * can be a {@link CopyBindingSetOp} whose {@link BOpEvaluationContext} has + * been overridden. */ final private IBlockingBuffer<IBindingSet[]> queryBuffer; @@ -218,7 +214,7 @@ * Note: This is package private so it will be visible to the * {@link QueryEngine}. */ - final/* private */BlockingQueue<IChunkMessage> chunksIn = new LinkedBlockingDeque<IChunkMessage>(); + final/* private */BlockingQueue<IChunkMessage<IBindingSet>> chunksIn = new LinkedBlockingDeque<IChunkMessage<IBindingSet>>(); /** * Set the query deadline. The query will be cancelled when the deadline is @@ -286,45 +282,13 @@ } + /** + * Return the operator tree for this query. + */ public BOp getQuery() { return query; } -// /** -// * Return the operator tree for this query. If query processing is -// * distributed and the query has not been materialized on this node, then it -// * is materialized now. -// * -// * @return The query. -// */ -// public BOp getQuery() { -// -// if (queryRef.get() == null) { -// -// synchronized (queryRef) { -// -// if (queryRef.get() == null) { -// -// try { -// -// queryRef.set(clientProxy.getQuery(queryId)); -// -// } catch (RemoteException e) { -// -// throw new RuntimeException(e); -// -// } -// -// } -// -// } -// -// } -// -// return queryRef.get(); -// -// } - /** * Return <code>true</code> iff this is the query controller. */ @@ -361,8 +325,6 @@ * if the <i>writeTimestamp</i> is neither * {@link ITx#UNISOLATED} nor a read-write transaction * identifier. - * - * @todo is queryBuffer required? should it be allocated from the top bop? */ public RunningQuery(final QueryEngine queryEngine, final long queryId, // final long begin, @@ -390,42 +352,6 @@ this.statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() : null; -// /* -// * @todo when making a per-bop annotation, queries must obtain a tx for -// * each timestamp up front on the controller and rewrite the bop to hold -// * the tx until it is done. -// * -// * @todo This is related to how we handle sequences of steps, parallel -// * steps, closure of steps, and join graphs. Those operations need to be -// * evaluated on the controller. We will have to model the relationship -// * between the subquery and the query in order to terminate the subquery -// * when the query halts and to terminate the query if the subquery -// * fails. -// * -// * @todo Closure operations must rewrite the query to update the -// * annotations. Each pass in a closure needs to be its own "subquery" -// * and will need to have a distinct queryId. -// */ -// final Long timestamp = query -// .getProperty(BOp.Annotations.TIMESTAMP); -// -// // @todo remove default when elevating to per-writable bop annotation. -// final long writeTimestamp = query.getProperty( -// BOp.Annotations.WRITE_TIMESTAMP, ITx.UNISOLATED); -// -// if (readTimestamp == null) -// throw new IllegalArgumentException(); -// -// if (readTimestamp.longValue() == ITx.UNISOLATED) -// throw new IllegalArgumentException(); -// -// if (TimestampUtility.isReadOnly(writeTimestamp)) -// throw new IllegalArgumentException(); -// -// this.readTimestamp = readTimestamp; -// -// this.writeTimestamp = writeTimestamp; - this.timeout = query.getProperty(BOp.Annotations.TIMEOUT, BOp.Annotations.DEFAULT_TIMEOUT); @@ -463,8 +389,9 @@ /* * Note: The partitionId will always be -1 in scale-up. */ - final BindingSetChunk chunk = new BindingSetChunk(clientProxy, queryId, - sinkId, -1/* partitionId */, sink.iterator()); + final BindingSetChunk<IBindingSet> chunk = new BindingSetChunk<IBindingSet>( + clientProxy, queryId, sinkId, -1/* partitionId */, sink + .iterator()); queryEngine.acceptChunk(chunk); @@ -475,12 +402,12 @@ /** * Make a chunk of binding sets available for consumption by the query. * <p> - * Note: this is invoked by {@link QueryEngine#add(BindingSetChunk)}. + * Note: this is invoked by {@link QueryEngine#acceptChunk(IChunkMessage)} * * @param msg * The chunk. */ - protected void acceptChunk(final IChunkMessage msg) { + protected void acceptChunk(final IChunkMessage<IBindingSet> msg) { if (msg == null) throw new IllegalArgumentException(); @@ -506,7 +433,7 @@ * * @todo this should reject multiple invocations for a given query instance. */ - public void startQuery(final IChunkMessage chunk) { + public void startQuery(final IChunkMessage<IBindingSet> chunk) { if (!controller) throw new UnsupportedOperationException(); if (chunk == null) @@ -776,7 +703,7 @@ * A chunk to be consumed. */ @SuppressWarnings("unchecked") - protected FutureTask<Void> newChunkTask(final IChunkMessage chunk) { + protected FutureTask<Void> newChunkTask(final IChunkMessage<IBindingSet> chunk) { /* * Look up the BOp in the index, create the BOpContext for that BOp, and * return the value returned by BOp.eval(context). @@ -792,6 +719,9 @@ * @todo evaluation of element[] pipelines needs to use pretty much * the same code, but it needs to be typed for E[] rather than * IBindingSet[]. + * + * @todo evaluation of Monet style BATs would also operate under + * different assumptions, closer to those of an element[]. */ throw new UnsupportedOperationException(bop.getClass().getName()); } @@ -961,16 +891,4 @@ } -// public long getReadTimestamp() { -// -// return readTimestamp; -// -// } -// -// public long getWriteTimestamp() { -// -// return writeTimestamp; -// -// } - } Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -1,244 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Sep 10, 2010 - */ - -package com.bigdata.bop.fed; - -import java.io.Serializable; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.UUID; - -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.engine.IChunkMessage; -import com.bigdata.bop.engine.IQueryClient; -import com.bigdata.io.DirectBufferPoolAllocator.IAllocation; -import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; -import com.bigdata.relation.accesspath.IAsynchronousIterator; -import com.bigdata.service.ResourceService; - -/** - * An {@link IChunkMessage} where the payload is made available to the receiving - * service using an NIO transfer against the sender's {@link ResourceService}. - * This is suitable for moving large blocks of data during query evaluation. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class ChunkMessageWithNIOPayload implements IChunkMessage, Serializable { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Metadata about an allocation to be retrieved from the sender's - * {@link ResourceService}. - */ - private final class A implements Serializable { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * The identifier of the resource on the sender's - * {@link ResourceService}. - */ - private final UUID bufferId; - - /** - * The size of that resource in bytes. - */ - private final int nbytes; - - /** - * - * @param bufferId - * The identifier of the resource on the sender's - * {@link ResourceService}. - * @param nbytes - * The size of that resource in bytes. - */ - public A(final UUID bufferId, final int nbytes) { - this.bufferId = bufferId; - this.nbytes = nbytes; - } - } - - final private IQueryClient queryController; - - final private long queryId; - - final private int bopId; - - final private int partitionId; - - final private int nbytes; - - /** - * Note: Even when we send one message per chunk, we can still have a list - * of {@link IAllocation}s if the chunk did not get formatted onto a single - * {@link IAllocation}. - */ - final private A[] allocations; - - /** - * The Internet address and port where the receiver can fetch the payload - * using the sender's {@link ResourceService}. - */ - final private InetSocketAddress addr; - - public IQueryClient getQueryController() { - return queryController; - } - - public long getQueryId() { - return queryId; - } - - public int getBOpId() { - return bopId; - } - - public int getPartitionId() { - return partitionId; - } - - /** The #of bytes of data which are available for that operator. */ - public int getBytesAvailable() { - return nbytes; - } - - /** - * The Internet address and port of a {@link ResourceService} from which the - * receiver may demand the data. - */ - public InetSocketAddress getServiceAddr() { - return addr; - } - - /** - * - * @param queryController - * @param queryId - * @param sinkId - * @param partitionId - * @param allocations - * The ordered list of {@link IAllocation}s comprising the chunk. - * @param addr - * The Internet address and port where the receiver can fetch the - * payload using the sender's {@link ResourceService}. - */ - public ChunkMessageWithNIOPayload(final IQueryClient queryController, - final long queryId, final int sinkId, final int partitionId, - final List<IAllocation> allocations, final InetSocketAddress addr) { - - if (queryController == null) - throw new IllegalArgumentException(); - - if (allocations == null) - throw new IllegalArgumentException(); - - if (addr == null) - throw new IllegalArgumentException(); - - this.queryController = queryController; - this.queryId = queryId; - this.bopId = sinkId; - this.partitionId = partitionId; - final int n = allocations.size(); - this.allocations = new A[n]; - int i = 0; - int nbytes = 0; - final Iterator<IAllocation> itr = allocations.iterator(); - while (itr.hasNext()) { - final IAllocation alloc = itr.next(); - final int len = alloc.getSlice().capacity(); - this.allocations[i++] = new A(alloc.getId(), len); - nbytes += len; - } - this.nbytes = nbytes; - this.addr = addr; - - } - - public boolean isMaterialized() { - return materialized; - } - private volatile boolean materialized = false; - - /** - * - * FIXME unit tests for materializing and visiting the chunk. - */ - synchronized public void materialize(FederatedRunningQuery runningQuery) { - - if (materialized) - return; - - final AllocationContextKey key = new ShardContext(queryId, bopId, - partitionId); - - final IAllocationContext allocationContext = runningQuery - .getAllocationContext(key); - - final ResourceService resourceService = runningQuery.getQueryEngine() - .getResourceService(); - -// for (A a : allocations) { -// -// /* -// * FIXME harmonize an IAllocation[] with a ByteBuffer for the {@link -// * ResourceService}. The problem is that an object to be sent across -// * the wire may span multiple ByteBuffers. -// */ -// final ByteBuffer tmp = allocationContext.alloc(a.nbytes); -// -// new ResourceService.ReadBufferTask(addr, a.bufferId, tmp); -// -// } - - throw new UnsupportedOperationException(); - - } - - public IAsynchronousIterator<IBindingSet[]> iterator() { - - if (!isMaterialized()) - throw new UnsupportedOperationException(); - - // TODO Auto-generated method stub - throw new UnsupportedOperationException(); - - } - -} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -31,7 +31,13 @@ import java.nio.ByteBuffer; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.log4j.Logger; + import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.engine.IChunkMessage; @@ -41,7 +47,6 @@ import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.RunningQuery; import com.bigdata.bop.solutions.SliceOp; -import com.bigdata.btree.raba.IRaba; import com.bigdata.journal.IIndexManager; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; @@ -51,6 +56,7 @@ import com.bigdata.service.IDataService; import com.bigdata.service.ManagedResourceService; import com.bigdata.service.ResourceService; +import com.bigdata.util.InnerCause; /** * An {@link IBigdataFederation} aware {@link QueryEngine}. @@ -59,18 +65,13 @@ * @version $Id: FederatedQueryEngine.java 3508 2010-09-05 17:02:34Z thompsonbry * $ * - * @todo buffer management for s/o bindingSet[] movement - * - * @todo buffer management for s/o DHT element[] movement - * - * @todo Compressed representations of binding sets with the ability to read - * them in place or materialize them onto the java heap. The - * representation should be amenable to processing in C since we want to - * use them on GPUs as well. See {@link IChunkMessage} and perhaps - * {@link IRaba}. + * @todo DEFAULT_GRAPH_QUERY: buffer management for s/o DHT element[] movement */ public class FederatedQueryEngine extends QueryEngine { + private final static transient Logger log = Logger + .getLogger(FederatedQueryEngine.class); + /** * The {@link IBigdataFederation} iff running in scale-out. * <p> @@ -88,6 +89,17 @@ private final ManagedResourceService resourceService; /** + * A priority queue of {@link IChunkMessage}s which needs to have their data + * materialized so an operator can consume those data on this node. + */ + final private PriorityBlockingQueue<IChunkMessage<?>> chunkMaterializationQueue = new PriorityBlockingQueue<IChunkMessage<?>>(); + + /** + * The {@link Future} for the task draining the {@link #chunkMaterializationQueue}. + */ + private final AtomicReference<FutureTask<Void>> materializeChunksFuture = new AtomicReference<FutureTask<Void>>(); + + /** * Constructor used on a {@link DataService} (a query engine peer). * * @param dataService @@ -101,7 +113,44 @@ } + @Override + public UUID getServiceUUID() { + + return fed.getServiceUUID(); + + } + + @Override + public IBigdataFederation<?> getFederation() { + + return fed; + + } + /** + * The service used to expose {@link ByteBuffer}s and managed index + * resources for transfer to remote services in support of distributed query + * evaluation. + */ + public ManagedResourceService getResourceService() { + + return resourceService; + + } + + /** + * Overridden to strengthen the return type. + * <p> + * {@inheritDoc} + */ + @Override + protected FederatedRunningQuery getRunningQuery(final long queryId) { + + return (FederatedRunningQuery) super.getRunningQuery(queryId); + + } + + /** * Constructor used on a non-{@link DataService} node to expose a query * controller. Since the query controller is not embedded within a data * service it needs to provide its own {@link ResourceService} and local @@ -131,43 +180,121 @@ } + /** + * {@inheritDoc} + * <p> + * Extended to also initialize a thread which will materialize + * {@link IChunkMessage} for consumption by this node. + * + * @todo ANALYTIC_QUERY: {@link IChunkMessage} are dropped onto a queue and + * materialized in order of arrival. This works fine for low latency + * pipelined query evaluation. + * <p> + * For analytic query, we (a) manage the #of high volume operators + * which run concurrently, presumably based on their demands on + * memory; and (b) model the chunks available before they are + * materialized locally such that (c) they can be materialized on + * demand (flow control); and (d) we can run the operator when there + * are sufficient chunks available without taking on too much data. + * <p> + * This requires a separate queue for executing high volume operators + * and also separate consideration of when chunks available on remote + * nodes should be materialized. + */ @Override - public UUID getServiceUUID() { + public void init() { - return fed.getServiceUUID(); + final FutureTask<Void> ft = new FutureTask<Void>( + new MaterializeChunksTask(), (Void) null); + + if (materializeChunksFuture.compareAndSet(null/* expect */, ft)) { + + getIndexManager().getExecutorService().execute(ft); + + } else { + + throw new IllegalStateException("Already running"); + + } } + /** + * {@inheritDoc} + * <p> + * Extended to stop materializing chunks once all running queries are done. + */ @Override - public IBigdataFederation<?> getFederation() { + protected void didShutdown() { + + // stop materializing chunks. + final Future<?> f = materializeChunksFuture.get(); + if (f != null) + f.cancel(true/* mayInterruptIfRunning */); - return fed; - } - - /** - * The service used to expose {@link ByteBuffer}s and managed index - * resources for transfer to remote services in support of distributed query - * evaluation. - */ - public ManagedResourceService getResourceService() { - return resourceService; - - } - /** - * Overridden to strengthen the return type. + * {@inheritDoc} * <p> - * {@inheritDoc} + * Extended to stop materializing chunks. */ @Override - protected FederatedRunningQuery getRunningQuery(final long queryId) { + public void shutdownNow() { + + // stop materializing chunks. + final Future<?> f = materializeChunksFuture.get(); + if (f != null) + f.cancel(true/* mayInterruptIfRunning */); - return (FederatedRunningQuery) super.getRunningQuery(queryId); + super.shutdownNow(); } + /** + * Runnable materializes chunks and makes them available for further + * processing. + */ + private class MaterializeChunksTask implements Runnable { + public void run() { + while (true) { + try { + final IChunkMessage<?> c = chunkMaterializationQueue.take(); + final long queryId = c.getQueryId(); + final FederatedRunningQuery q = getRunningQuery(queryId); + if (q.isCancelled()) + continue; + final IChunkMessage<?> msg = chunkMaterializationQueue + .poll(); + try { + msg.materialize(q); + /* + * @todo The type warning here is because the rest of + * the API does not know what to do with messages for + * chunks other than IBindingSet[], e.g., IElement[], + * etc. + */ + FederatedQueryEngine.this + .bufferReady((IChunkMessage) msg); + } catch(Throwable t) { + if(InnerCause.isInnerCause(t, InterruptedException.class)) { + log.warn("Interrupted."); + return; + } + throw new RuntimeException(t); + } + } catch (InterruptedException e) { + log.warn("Interrupted."); + return; + } catch (Throwable ex) { + // log and continue + log.error(ex, ex); + continue; + } + } + } + } // MaterializeChunksTask + public void declareQuery(final IQueryDecl queryDecl) { final long queryId = queryDecl.getQueryId(); @@ -179,7 +306,7 @@ } @Override - public void bufferReady(final IChunkMessage msg) { + public void bufferReady(final IChunkMessage<IBindingSet> msg) { if (msg == null) throw new IllegalArgumentException(); @@ -200,12 +327,6 @@ } else { /* - * FIXME SCALEOUT: We need to model the chunks available before they - * are materialized locally such that (a) they can be materialized - * on demand (flow control); and (b) we can run the operator when - * there are sufficient chunks available without taking on too much - * data. [For the sort term, they can be dropped onto a queue and - * materialized in order of arrival.] */ throw new UnsupportedOperationException("FIXME"); @@ -248,6 +369,8 @@ * normally. Also pay attention when the client closes the * {@link IAsynchronousIterator} from which it is draining solutions * early. + * + * @deprecated This is going away. */ @Override protected IBlockingBuffer<IBindingSet[]> newQueryBuffer( Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-10 20:47:27 UTC (rev 3531) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -29,10 +29,7 @@ import java.nio.ByteBuffer; import java.rmi.RemoteException; -import java.util.Arrays; import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -47,9 +44,7 @@ import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryPeer; import com.bigdata.bop.engine.RunningQuery; -import com.bigdata.io.DirectBufferPoolAllocator; -import com.bigdata.io.SerializerUtil; -import com.bigdata.io.DirectBufferPoolAllocator.IAllocation; +import com.bigdata.io.DirectBufferPool; import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; import com.bigdata.mdi.PartitionLocator; import com.bigdata.relation.accesspath.BlockingBuffer; @@ -58,7 +53,6 @@ import com.bigdata.relation.accesspath.IBuffer; import com.bigdata.resources.ResourceManager; import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.ManagedResourceService; import com.bigdata.service.ResourceService; import com.bigdata.striterator.IKeyOrder; @@ -245,7 +239,7 @@ * {@inheritDoc} */ @Override - protected void acceptChunk(final IChunkMessage msg) { + protected void acceptChunk(final IChunkMessage<IBindingSet> msg) { super.acceptChunk(msg); @@ -475,11 +469,11 @@ * The identifier of the target {@link BOp}. * @param allocationContext * The allocation context within which the {@link ByteBuffer}s - * will be managed for this {@link ChunkMessageWithNIOPayload}. + * will be managed for this {@link NIOChunkMessage}. * @param source * The binding sets to be formatted onto a buffer. * - * @return The {@link ChunkMessageWithNIOPayload}. + * @return The {@link NIOChunkMessage}. * * @todo This is basically a factory for creating {@link IChunkMessage}s. * That factory pattern in combined with the logic to send the message @@ -489,10 +483,6 @@ * could help to cut latency when an operator has a large fan out (in * scale-out when mapping over shards or nodes). * - * @todo We probably need to use the {@link DirectBufferPoolAllocator} to - * receive the chunks within the {@link ManagedResourceService} as - * well. - * * @todo Release the allocations associated with each output chunk once it * is received by the remote service. * <p> @@ -506,10 +496,32 @@ * closed, then the output chunks for the query controller should be * immediately dropped. * - * @todo There are a few things where the resource must be made available to - * more than one operator evaluation phase. The best examples are - * temporary graphs for parallel closure and large collections of - * graphIds for SPARQL "NAMED FROM DATA SET" extensions. + * @todo There are a few things for which the resource must be made + * available to more than one operator evaluation phase. The best + * examples are temporary graphs for parallel closure and large + * collections of graphIds for SPARQL "NAMED FROM DATA SET" + * extensions. + * + * @todo Rethink the multiplicity relationship between chunks output from an + * operator, chunks output from mapping the operator over shards or + * nodes, RMI messages concerning buffers available for the sink + * operator on the various nodes, and the #of allocations per RMI + * message on both the sender and the receiver. + * <p> + * I am pretty sure that none of these are strongly coupled, e.g., + * they are not 1:1. Some stages can combine chunks. Multiple + * allocations could be required on either the sender or the receiver + * purely due to where the slices fall on the backing direct + * {@link ByteBuffer}s in the {@link DirectBufferPool} and the sender + * and receiver do not need to use the same allocation context or have + * the same projection of slices onto the backing buffers. + * <p> + * The one thing which is critical is that the query controller is + * properly informed of the #of chunks made available to an operator + * and consumed by that operator, that those reports must be in the + * same units, and that the reports must be delivered back to the + * query controller in a manner which does not transiently violate the + * termination conditions of the query. */ protected void sendChunkMessage( final UUID serviceUUID, @@ -540,13 +552,15 @@ final boolean thisService = peerProxy == getQueryEngine(); if(thisService) { + /* * Leave the chunk as Java objects and drop it directly onto the * query engine. */ - final IChunkMessage msg = new BindingSetChunk(getQueryController(), - getQueryId(), sinkId, partitionId, source.iterator()); + final IChunkMessage<IBindingSet> msg = new BindingSetChunk<IBindingSet>( + getQueryController(), getQueryId(), sinkId, partitionId, + source.iterator()); getQueryEngine().bufferReady(msg); @@ -561,37 +575,23 @@ * RMI message or out of band using NIO. This decision effects how we * serialize the chunk. */ - final IChunkMessage msg; + final IChunkMessage<IBindingSet> msg; if (source.size() < 100) { - /* - * FIXME Send payload inline with the RMI message. - */ + msg = new ThickChunkMessage<IBindingSet>(getQueryController(), + getQueryId(), sinkId, partitionId, source); -// final byte[] data = SerializerUtil.serialize(obj); -// -// // @todo harmonize serialization and compression and ctors. -// msg = new ThickChunkMessage(getQueryController(), getQueryId(), -// sinkId, partitionId, data); - throw new UnsupportedOperationException(); + } else { - } else - { - /* * Marshall the data onto direct ByteBuffer(s) and send a thin * message by RMI. The receiver will retrieve the data using NIO * against the ResourceService. - * - * @todo harmonize serialization and compression and ctors. */ - final List<IAllocation> allocations = moveToNIOBuffers( - allocationContext, source); + msg = new NIOChunkMessage<IBindingSet>(getQueryController(), + getQueryId(), sinkId, partitionId, allocationContext, + source, getQueryEngine().getResourceService().getAddr()); - msg = new ChunkMessageWithNIOPayload(getQueryController(), - getQueryId(), sinkId, partitionId, allocations, - getQueryEngine().getResourceService().getAddr()); - } try { @@ -606,61 +606,4 @@ } - /** - * Chunk-wise serialization of the data onto allocations. - * @param allocationContext - * @param source - * @return - * - * @todo should be on message per chunk, right? - */ - private List<IAllocation> moveToNIOBuffers( - final IAllocationContext allocationContext, - final IBlockingBuffer<IBindingSet[]> source) { - - int nbytes = 0; - - final List<IAllocation> allocations = new LinkedList<IAllocation>(); - - final IAsynchronousIterator<IBindingSet[]> itr = source.iterator(); - - try { - - while (itr.hasNext()) { - - // Next chunk to be serialized. - final IBindingSet[] chunk = itr.next(); - - // serialize the chunk of binding sets. - final byte[] data = SerializerUtil.serialize(chunk); - - // track size of the allocations. - nbytes += data.length; - - // allocate enough space for those data. - final IAllocation[] tmp; - try { - tmp = allocationContext.alloc(data.length); - } catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - - // copy the data into the allocations. - DirectBufferPoolAllocator.put(data, tmp); - - // append the new allocations. - allocations.addAll(Arrays.asList(tmp)); - - } - - return allocations; - - } finally { - - itr.close(); - - } - - } - } Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java (from rev 3531, branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java 2010-09-11 22:53:40 UTC (rev 3532) @@ -0,0 +1,502 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 10, 2010 + */ + +package com.bigdata.bop.fed; + +import java.io.Serializable; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import com.bigdata.bop.engine.IChunkMessage; +import com.bigdata.bop.engine.IQueryClient; +import com.bigdata.io.DirectBufferPoolAllocator; +import com.bigdata.io.SerializerUtil; +import com.bigdata.io.DirectBufferPoolAllocator.IAllocation; +import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.relation.accesspath.IBlockingBuffer; +import com.bigdata.service.ManagedResourceService; +import com.bigdata.service.ResourceService; + +/** + * An {@link IChunkMessage} where the payload is made available to the receiving + * service using an NIO transfer against the sender's {@link ResourceService}. + * This is suitable for moving large blocks of data during query evaluation. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class NIOChunkMessage<E> implements IChunkMessage<E>, Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + final private IQueryClient queryController; + + final private long queryId; + + final private int bopId; + + final private int partitionId; + + final private int solutionCount; + + final private int nbytes; + + /** + * Note: Even when we send one message per chunk, we can still have a list + * of {@link IAllocation}s if the chunk did not get formatted onto a single + * {@link IAllocation}. + */ + final private A[] allocations; + + /** + * The Internet address and port where the receiver can fetch the payload + * using the sender's {@link ResourceService}. + */ + final private InetSocketAddress addr; + + public IQueryClient getQueryController() { + return queryController; + } + + public long getQueryId() { + return queryId; + } + + public int getBOpId() { + return bopId; + } + + public int getPartitionId() { + return partitionId; + } + + /** + * The #of elements in this chunk. + * + * @todo we could track this in total and in {@link A} on a per-slice basis. + */ + public int getSolutionCount() { + return solutionCount; + } + + /** The #of bytes of data which are available for that operator. */ + public int getBytesAvailable() { + return nbytes; + } + + /** + * The Internet address and port of a {@link ResourceService} from which the + * receiver may demand the data. + */ + public InetSocketAddress getServiceAddr() { + return addr; + } + + public String toString() { + + return getClass().getName() + "{queryId=" + queryId + ",bopId=" + bopId + + ",partitionId=" + partitionId + ", solutionCount=" + + solutionCount + ", bytesAvailable=" + nbytes + ", nslices=" + + allocations.length + ", serviceAddr=" + addr + "}"; + + } + + /** + * + * @param queryController + * @param queryId + * @param sinkId + * @param partitionId + * @param allocations + * The ordered list of {@link IAllocation}s comprising the chunk. + * @param addr + * The Internet address and port where the receiver can fetch the + * payload using the sender's {@link ResourceService}. + */ + public NIOChunkMessage(final IQueryClient queryController, + final long queryId, final int sinkId, final int partitionId, + final IAllocationContext allocationContext, + final IBlockingBuffer<E[]> source, + final InetSocketAddress addr) { + + if (queryController == null) + throw new IllegalArgumentException(); + + if (allocationContext == null) + throw new IllegalArgumentException(); + + if (source == null) + throw new IllegalArgumentException(); + + if (addr == null) + throw new IllegalArgumentException(); + + // format onto NIO buffers. + final AtomicInteger nsolutions = new AtomicInteger(); + final List<IAllocation> allocations = moveToNIOBuffers( + allocationContext, source, nsolutions); + + this.queryController = queryController; + this.queryId = queryId; + this.bopId = sinkId; + this.partitionId = partitionId; + final int n = allocations.size(); + this.allocations = new A[n]; + int i = 0; + int nbytes = 0; + final Iterator<IAllocation> itr = allocations.iterator(); + while (itr.hasNext()) { + final IAllocation alloc = itr.next(); + final int len = alloc.getSlice().capacity(); + this.allocations[i++] = new A(alloc.getId(), len); + nbytes += len; + } + this.solutionCount = nsolutions.get(); + this.nbytes = nbytes; + this.addr = addr; + + } + + /** + * Chunk-wise serialization of the data onto allocations. + * + * @param allocationContext + * @param source + * @return + */ + static private <E> List<IAllocation> moveToNIOBuffers( + final IAllocationContext allocationContext, + final IBlockingBuffer<E[]> source, + final AtomicInteger nsolutions) { + + int nbytes = 0; + + int n = 0; + + final List<IAllocation> allocations = new LinkedList<IAllocation>(); + + final IAsynchronousIterator<E[]> itr = source.iterator(); + + try { + + while (itr.hasNext()) { + + // Next chunk to be serialized. + final E[] chunk = itr.next(); + + // track #of solutions. + n += chunk.length; + + // serialize the chunk of binding sets. + final byte[] data = SerializerUtil.serialize(chunk); + + // track size of the allocations. + nbytes += data.length; + + // allocate enough space for those data. + final IAllocation[] tmp; + try { + tmp = allocationContext.alloc(data.length); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + + // copy the data into the allocations. + DirectBufferPoolAllocator.put(data, tmp); + + for(IAllocation a : tmp) { + + // prepare for reading. + a.getSlice().flip(); + + // append the allocation. + allocations.add(a); + + } + + } + + nsolutions.addAndGet(n); + + return allocations; + + } finally { + + itr.close(); + + } + + } + + /** + * Metadata about an allocation to be retrieved from the sender's + * {@link ResourceService}. + */ + private static final class A implements Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** + * The identifier of the resource on the sender's + * {@link ResourceService}. + */ + private final UUID bufferId; + + /** + * The size of that resource in bytes. + */ + private final int nbytes; + + /** + ... [truncated message content] |
From: <tho...@us...> - 2010-09-10 20:47:35
|
Revision: 3531 http://bigdata.svn.sourceforge.net/bigdata/?rev=3531&view=rev Author: thompsonbry Date: 2010-09-10 20:47:27 +0000 (Fri, 10 Sep 2010) Log Message: ----------- Move the timestamp and mutation onto the BOp. More work on moving binding sets around. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ndx/SampleLocalShard.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestDistinctBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -140,9 +140,25 @@ * shards, mapped against nodes, or evaluated on the query controller must * override this method. */ - public BOpEvaluationContext getEvaluationContext(); + BOpEvaluationContext getEvaluationContext(); + + /** + * Return <code>true</code> iff this operator is an access path which writes + * on the database. + * + * @see Annotations#MUTATION + */ + boolean isMutation(); /** + * The timestamp or transaction identifier on which the operator will read + * or write. + * + * @see Annotations#TIMESTAMP + */ + long getTimestamp(); + + /** * Interface declaring well known annotations. */ public interface Annotations { @@ -176,20 +192,31 @@ long DEFAULT_TIMEOUT = Long.MAX_VALUE; /** - * The timestamp (or transaction identifier) associated with a read from - * the database. + * Boolean property whose value is <code>true</code> iff this operator + * writes on a database. + * <p> + * Most operators operate solely on streams of elements or binding sets. + * Some operators read or write on the database using an access path, + * which is typically described by an {@link IPredicate}. This property + * MUST be <code>true</code> when access path is used to write on the + * database. + * <p> + * Operators which read or write on the database must declare the + * {@link Annotations#TIMESTAMP} associated with that operation. * - * @todo Combine the read and write timestamps as a single - * <code>TX</code> value and require this on any operator which - * reads or writes on the database. + * @see #TIMESTAMP */ - String READ_TIMESTAMP = BOp.class.getName() + ".readTimestamp"; - + String MUTATION = BOp.class.getName() + ".mutation"; + + boolean DEFAULT_MUTATION = false; + /** - * The timestamp (or transaction identifier) associated with a write on - * the database. + * The timestamp (or transaction identifier) used by this operator if it + * reads or writes on the database. + * + * @see #MUTATION */ - String WRITE_TIMESTAMP = BOp.class.getName() + ".writeTimestamp"; + String TIMESTAMP = BOp.class.getName() + ".timestamp"; /** * For hash partitioned operators, this is the set of the member nodes @@ -202,7 +229,7 @@ * @todo Move onto an interface parallel to {@link IShardwisePipelineOp} */ String MEMBER_SERVICES = "memberServices"; - + } - + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -338,10 +338,28 @@ } + /** + * The default implementation returns {@link BOpEvaluationContext#ANY} and + * must be overridden by operators which have a different {@link BOpEvaluationContext}. + * <p> + * {@inheritDoc} + */ public BOpEvaluationContext getEvaluationContext() { return BOpEvaluationContext.ANY; } - + + public final boolean isMutation() { + + return getProperty(Annotations.MUTATION, Annotations.DEFAULT_MUTATION); + + } + + public final long getTimestamp() { + + return getRequiredProperty(Annotations.TIMESTAMP); + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -115,23 +115,27 @@ return runningQuery.getIndexManager(); } - /** - * The timestamp or transaction identifier against which the query is - * reading. - */ - public final long getReadTimestamp() { - return runningQuery.getReadTimestamp(); - } +// /** +// * The timestamp or transaction identifier against which the query is +// * reading. +// * +// * @deprecated by {@link BOp.Annotations#TIMESTAMP} +// */ +// public final long getReadTimestamp() { +// return runningQuery.getReadTimestamp(); +// } +// +// /** +// * The timestamp or transaction identifier against which the query is +// * writing. +// * +// * @deprecated by {@link BOp.Annotations#TIMESTAMP} +// */ +// public final long getWriteTimestamp() { +// return runningQuery.getWriteTimestamp(); +// } /** - * The timestamp or transaction identifier against which the query is - * writing. - */ - public final long getWriteTimestamp() { - return runningQuery.getWriteTimestamp(); - } - - /** * The index partition identifier -or- <code>-1</code> if the index is not * sharded. */ @@ -305,105 +309,49 @@ * order to support mutation operator we will also have to pass in the * {@link #writeTimestamp} or differentiate this in the method name. */ - public IRelation getReadRelation(final IPredicate<?> pred) { + public IRelation getRelation(final IPredicate<?> pred) { /* - * @todo Cache the resource locator? - * - * @todo This should be using the federation as the index manager when - * locating a resource for scale-out, right? But s/o reads must use the - * local index manager when actually obtaining the index view for the - * relation. + * Note: This uses the federation as the index manager when locating a + * resource for scale-out. However, s/o reads must use the local index + * manager when actually obtaining the index view for the relation. */ - return (IRelation) getIndexManager().getResourceLocator().locate( - pred.getOnlyRelationName(), getReadTimestamp()); + final IIndexManager tmp = getFederation() == null ? getIndexManager() + : getFederation(); + + final long timestamp = pred + .getRequiredProperty(BOp.Annotations.TIMESTAMP); - } + return (IRelation<?>) tmp.getResourceLocator().locate( + pred.getOnlyRelationName(), timestamp); - /** - * Return a writable view of the relation. - * - * @param namespace - * The namespace of the relation. - * - * @return A writable view of the relation. - */ - public IRelation getWriteRelation(final String namespace) { - - /* - * @todo Cache the resource locator? - * - * @todo This should be using the federation as the index manager when - * locating a resource for scale-out, right? But s/o writes must use - * the local index manager when actually obtaining the index view for - * the relation. - */ - return (IRelation) getIndexManager().getResourceLocator().locate( - namespace, getWriteTimestamp()); - } - /** - * Return an mutable view of the specified index. - * - * @param <T> - * The generic type of the elements in the relation. - * @param relation - * The relation. - * @param keyOrder - * The key order for that index. - * @param partitionId - * The partition identifier and <code>-1</code> unless running - * against an {@link IBigdataFederation}. - * - * @return The mutable view of the index. - * - * @throws UnsupportedOperationException - * if there is an attempt to read on an index partition when the - * database is not an {@link IBigdataFederation} or when the - * database is an {@link IBigdataFederation} unless the index - * partition was specified. - */ - public <T> ILocalBTreeView getMutableLocalIndexView( - final IRelation<T> relation, final IKeyOrder<T> keyOrder, - final int partitionId) { +// /** +// * Return a writable view of the relation. +// * +// * @param namespace +// * The namespace of the relation. +// * +// * @return A writable view of the relation. +// * +// * @deprecated by getRelation() +// */ +// public IRelation getWriteRelation(final String namespace) { +// +// /* +// * @todo Cache the resource locator? +// * +// * @todo This should be using the federation as the index manager when +// * locating a resource for scale-out, right? But s/o writes must use +// * the local index manager when actually obtaining the index view for +// * the relation. +// */ +// return (IRelation) getIndexManager().getResourceLocator().locate( +// namespace, getWriteTimestamp()); +// +// } - final String namespace = relation.getNamespace(); - - final ILocalBTreeView ndx; - - if (partitionId == -1) { - - if (getFederation() != null) { - // This is scale-out so the partition identifier is required. - throw new UnsupportedOperationException(); - } - - // The index is not partitioned. - ndx = (ILocalBTreeView) getIndexManager().getIndex(namespace + "." - + keyOrder.getIndexName(), getWriteTimestamp()); - - } else { - - if (getFederation() == null) { - // This is not scale-out so index partitions are not supported. - throw new UnsupportedOperationException(); - } - - // The name of the desired index partition. - final String name = DataService.getIndexPartitionName(namespace - + "." + keyOrder.getIndexName(), partitionId); - - // MUST be a local index view. - ndx = (ILocalBTreeView) getIndexManager().getIndex(name, - getWriteTimestamp()); - - } - - return ndx; - - } - /** * Obtain an access path reading from relation for the specified predicate * (from the tail of some rule). @@ -443,10 +391,13 @@ final int partitionId = predicate.getPartitionId(); + final long timestamp = predicate + .getRequiredProperty(BOp.Annotations.TIMESTAMP); + final int flags = predicate.getProperty( PipelineOp.Annotations.FLAGS, PipelineOp.Annotations.DEFAULT_FLAGS) - | (TimestampUtility.isReadOnly(getReadTimestamp()) ? IRangeQuery.READONLY + | (TimestampUtility.isReadOnly(timestamp) ? IRangeQuery.READONLY : 0); final int chunkOfChunksCapacity = predicate.getProperty( @@ -463,8 +414,6 @@ final IIndexManager indexManager = getIndexManager(); - final long readTimestamp = getReadTimestamp(); - if (predicate.getPartitionId() != -1) { /* @@ -497,9 +446,9 @@ // MUST be a local index view. final ILocalBTreeView ndx = (ILocalBTreeView) indexManager - .getIndex(name, readTimestamp); + .getIndex(name, timestamp); - return new AccessPath(relation, indexManager, readTimestamp, + return new AccessPath(relation, indexManager, timestamp, predicate, keyOrder, ndx, flags, chunkOfChunksCapacity, chunkCapacity, fullyBufferedReadThreshold).init(); @@ -522,13 +471,13 @@ throw new IllegalArgumentException("no index? relation=" + relation.getNamespace() + ", timestamp=" - + readTimestamp + ", keyOrder=" + keyOrder + ", pred=" + + timestamp + ", keyOrder=" + keyOrder + ", pred=" + predicate + ", indexManager=" + getIndexManager()); } accessPath = new AccessPath((IRelation) relation, indexManager, - readTimestamp, (IPredicate) predicate, + timestamp, (IPredicate) predicate, (IKeyOrder) keyOrder, ndx, flags, chunkOfChunksCapacity, chunkCapacity, fullyBufferedReadThreshold).init(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -15,7 +15,7 @@ public class BindingSetChunk implements IChunkMessage { /** The query controller. */ - private final IQueryClient clientProxy; + private final IQueryClient queryController; /** * The query identifier. @@ -38,7 +38,7 @@ private IAsynchronousIterator<IBindingSet[]> source; public IQueryClient getQueryController() { - return clientProxy; + return queryController; } public long getQueryId() { @@ -57,22 +57,17 @@ return true; } - /** - * - * @todo constructor to accept the BlockingBuffer instead as part of - * {@link IChunkMessage} harmonization (or an "IChunk" API). - */ - public BindingSetChunk(final IQueryClient clientProxy, final long queryId, - final int bopId, final int partitionId, + public BindingSetChunk(final IQueryClient queryController, + final long queryId, final int bopId, final int partitionId, final IAsynchronousIterator<IBindingSet[]> source) { - if (clientProxy == null) + if (queryController == null) throw new IllegalArgumentException(); if (source == null) throw new IllegalArgumentException(); - this.clientProxy = clientProxy; + this.queryController = queryController; this.queryId = queryId; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -30,15 +30,6 @@ /** The identifier for the target index partition. */ int getPartitionId(); - /* - * @todo Report the #of bytes available with this message. However, first - * figure out if that if the #of bytes in this {@link OutputChunk} or across - * all {@link OutputChunk}s available for the target service and sink. - */ - // @todo move to concrete subclass or allow ZERO if data are in memory (no RMI). -// /** The #of bytes of data which are available for that operator. */ -// int getBytesAvailable(); - /** * Return <code>true</code> if the chunk is materialized on the receiver. */ @@ -79,15 +70,4 @@ */ IAsynchronousIterator<IBindingSet[]> iterator(); - // /** - // * The Internet address and port of a {@link ResourceService} from which - // * the receiver may demand the data. - // */ - // InetSocketAddress getServiceAddr(); - // - // /** - // * The set of resources on the sender which comprise the data. - // */ - // Iterator<UUID> getChunkIds(); - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -56,25 +56,25 @@ */ IIndexManager getIndexManager(); - /** - * The timestamp or transaction identifier against which the query is - * reading. - * - * @todo may be moved into the individual operator. See - * {@link BOp.Annotations#READ_TIMESTAMP} - */ - long getReadTimestamp(); +// /** +// * The timestamp or transaction identifier against which the query is +// * reading. +// * +// * @deprecated move into the individual operator. See +// * {@link BOp.Annotations#TIMESTAMP} +// */ +// long getReadTimestamp(); +// +// /** +// * The timestamp or transaction identifier against which the query is +// * writing. +// * +// * @deprecated moved into the individual operator. See +// * {@link BOp.Annotations#TIMESTAMP} +// */ +// long getWriteTimestamp(); /** - * The timestamp or transaction identifier against which the query is - * writing. - * - * @todo may be moved into the individual operator. See - * {@link BOp.Annotations#WRITE_TIMESTAMP} - */ - long getWriteTimestamp(); - - /** * Terminate query evaluation */ void halt(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -89,18 +89,18 @@ /** The unique identifier for this query. */ final private long queryId; - /** - * The timestamp or transaction identifier against which the query is - * reading. - */ - final private long readTimestamp; +// /** +// * The timestamp or transaction identifier against which the query is +// * reading. +// */ +// final private long readTimestamp; +// +// /** +// * The timestamp or transaction identifier against which the query is +// * writing. +// */ +// final private long writeTimestamp; - /** - * The timestamp or transaction identifier against which the query is - * writing. - */ - final private long writeTimestamp; - // /** // * The timestamp when the query was accepted by this node (ms). // */ @@ -389,41 +389,42 @@ this.bopIndex = BOpUtility.getIndex(query); this.statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() : null; - /* - * @todo when making a per-bop annotation, queries must obtain a tx for - * each timestamp up front on the controller and rewrite the bop to hold - * the tx until it is done. - * - * @todo This is related to how we handle sequences of steps, parallel - * steps, closure of steps, and join graphs. Those operations need to be - * evaluated on the controller. We will have to model the relationship - * between the subquery and the query in order to terminate the subquery - * when the query halts and to terminate the query if the subquery - * fails. - * - * @todo Closure operations must rewrite the query to update the - * annotations. Each pass in a closure needs to be its own "subquery" - * and will need to have a distinct queryId. - */ - final Long readTimestamp = query - .getProperty(BOp.Annotations.READ_TIMESTAMP); - - // @todo remove default when elevating to per-writable bop annotation. - final long writeTimestamp = query.getProperty( - BOp.Annotations.WRITE_TIMESTAMP, ITx.UNISOLATED); - - if (readTimestamp == null) - throw new IllegalArgumentException(); - - if (readTimestamp.longValue() == ITx.UNISOLATED) - throw new IllegalArgumentException(); - - if (TimestampUtility.isReadOnly(writeTimestamp)) - throw new IllegalArgumentException(); - - this.readTimestamp = readTimestamp; - this.writeTimestamp = writeTimestamp; +// /* +// * @todo when making a per-bop annotation, queries must obtain a tx for +// * each timestamp up front on the controller and rewrite the bop to hold +// * the tx until it is done. +// * +// * @todo This is related to how we handle sequences of steps, parallel +// * steps, closure of steps, and join graphs. Those operations need to be +// * evaluated on the controller. We will have to model the relationship +// * between the subquery and the query in order to terminate the subquery +// * when the query halts and to terminate the query if the subquery +// * fails. +// * +// * @todo Closure operations must rewrite the query to update the +// * annotations. Each pass in a closure needs to be its own "subquery" +// * and will need to have a distinct queryId. +// */ +// final Long timestamp = query +// .getProperty(BOp.Annotations.TIMESTAMP); +// +// // @todo remove default when elevating to per-writable bop annotation. +// final long writeTimestamp = query.getProperty( +// BOp.Annotations.WRITE_TIMESTAMP, ITx.UNISOLATED); +// +// if (readTimestamp == null) +// throw new IllegalArgumentException(); +// +// if (readTimestamp.longValue() == ITx.UNISOLATED) +// throw new IllegalArgumentException(); +// +// if (TimestampUtility.isReadOnly(writeTimestamp)) +// throw new IllegalArgumentException(); +// +// this.readTimestamp = readTimestamp; +// +// this.writeTimestamp = writeTimestamp; this.timeout = query.getProperty(BOp.Annotations.TIMEOUT, BOp.Annotations.DEFAULT_TIMEOUT); @@ -960,16 +961,16 @@ } - public long getReadTimestamp() { - - return readTimestamp; - - } +// public long getReadTimestamp() { +// +// return readTimestamp; +// +// } +// +// public long getWriteTimestamp() { +// +// return writeTimestamp; +// +// } - public long getWriteTimestamp() { - - return writeTimestamp; - - } - } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -0,0 +1,244 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 10, 2010 + */ + +package com.bigdata.bop.fed; + +import java.io.Serializable; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; + +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.engine.IChunkMessage; +import com.bigdata.bop.engine.IQueryClient; +import com.bigdata.io.DirectBufferPoolAllocator.IAllocation; +import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.service.ResourceService; + +/** + * An {@link IChunkMessage} where the payload is made available to the receiving + * service using an NIO transfer against the sender's {@link ResourceService}. + * This is suitable for moving large blocks of data during query evaluation. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class ChunkMessageWithNIOPayload implements IChunkMessage, Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** + * Metadata about an allocation to be retrieved from the sender's + * {@link ResourceService}. + */ + private final class A implements Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + /** + * The identifier of the resource on the sender's + * {@link ResourceService}. + */ + private final UUID bufferId; + + /** + * The size of that resource in bytes. + */ + private final int nbytes; + + /** + * + * @param bufferId + * The identifier of the resource on the sender's + * {@link ResourceService}. + * @param nbytes + * The size of that resource in bytes. + */ + public A(final UUID bufferId, final int nbytes) { + this.bufferId = bufferId; + this.nbytes = nbytes; + } + } + + final private IQueryClient queryController; + + final private long queryId; + + final private int bopId; + + final private int partitionId; + + final private int nbytes; + + /** + * Note: Even when we send one message per chunk, we can still have a list + * of {@link IAllocation}s if the chunk did not get formatted onto a single + * {@link IAllocation}. + */ + final private A[] allocations; + + /** + * The Internet address and port where the receiver can fetch the payload + * using the sender's {@link ResourceService}. + */ + final private InetSocketAddress addr; + + public IQueryClient getQueryController() { + return queryController; + } + + public long getQueryId() { + return queryId; + } + + public int getBOpId() { + return bopId; + } + + public int getPartitionId() { + return partitionId; + } + + /** The #of bytes of data which are available for that operator. */ + public int getBytesAvailable() { + return nbytes; + } + + /** + * The Internet address and port of a {@link ResourceService} from which the + * receiver may demand the data. + */ + public InetSocketAddress getServiceAddr() { + return addr; + } + + /** + * + * @param queryController + * @param queryId + * @param sinkId + * @param partitionId + * @param allocations + * The ordered list of {@link IAllocation}s comprising the chunk. + * @param addr + * The Internet address and port where the receiver can fetch the + * payload using the sender's {@link ResourceService}. + */ + public ChunkMessageWithNIOPayload(final IQueryClient queryController, + final long queryId, final int sinkId, final int partitionId, + final List<IAllocation> allocations, final InetSocketAddress addr) { + + if (queryController == null) + throw new IllegalArgumentException(); + + if (allocations == null) + throw new IllegalArgumentException(); + + if (addr == null) + throw new IllegalArgumentException(); + + this.queryController = queryController; + this.queryId = queryId; + this.bopId = sinkId; + this.partitionId = partitionId; + final int n = allocations.size(); + this.allocations = new A[n]; + int i = 0; + int nbytes = 0; + final Iterator<IAllocation> itr = allocations.iterator(); + while (itr.hasNext()) { + final IAllocation alloc = itr.next(); + final int len = alloc.getSlice().capacity(); + this.allocations[i++] = new A(alloc.getId(), len); + nbytes += len; + } + this.nbytes = nbytes; + this.addr = addr; + + } + + public boolean isMaterialized() { + return materialized; + } + private volatile boolean materialized = false; + + /** + * + * FIXME unit tests for materializing and visiting the chunk. + */ + synchronized public void materialize(FederatedRunningQuery runningQuery) { + + if (materialized) + return; + + final AllocationContextKey key = new ShardContext(queryId, bopId, + partitionId); + + final IAllocationContext allocationContext = runningQuery + .getAllocationContext(key); + + final ResourceService resourceService = runningQuery.getQueryEngine() + .getResourceService(); + +// for (A a : allocations) { +// +// /* +// * FIXME harmonize an IAllocation[] with a ByteBuffer for the {@link +// * ResourceService}. The problem is that an object to be sent across +// * the wire may span multiple ByteBuffers. +// */ +// final ByteBuffer tmp = allocationContext.alloc(a.nbytes); +// +// new ResourceService.ReadBufferTask(addr, a.bufferId, tmp); +// +// } + + throw new UnsupportedOperationException(); + + } + + public IAsynchronousIterator<IBindingSet[]> iterator() { + + if (!isMaterialized()) + throw new UnsupportedOperationException(); + + // TODO Auto-generated method stub + throw new UnsupportedOperationException(); + + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ChunkMessageWithNIOPayload.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -299,5 +299,5 @@ * Cache for {@link #getQueryPeer(UUID)}. */ private final ConcurrentHashMap<UUID, IQueryPeer> proxyMap = new ConcurrentHashMap<UUID, IQueryPeer>(); - + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -27,7 +27,6 @@ package com.bigdata.bop.fed; -import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.rmi.RemoteException; import java.util.Arrays; @@ -47,7 +46,6 @@ import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryPeer; -import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.RunningQuery; import com.bigdata.io.DirectBufferPoolAllocator; import com.bigdata.io.SerializerUtil; @@ -242,22 +240,81 @@ } /** + * Overridden to make this visible to the {@link FederatedQueryEngine}. + * <p> + * {@inheritDoc} + */ + @Override + protected void acceptChunk(final IChunkMessage msg) { + + super.acceptChunk(msg); + + } + + /** + * Resolve the proxy for an {@link IQueryPeer}. This is special cased for + * both <i>this</i> service (the actual reference is returned) and the query + * controller (we use an alternative path to discover the query controller + * since it might not be registered against a lookup service if it is not a + * data service). + * + * @param serviceUUID + * The service identifier for the peer. + * + * @return The proxy for the service or <code>null</code> if the service + * could not be discovered. + */ + protected IQueryPeer getQueryPeer(final UUID serviceUUID) { + + if (serviceUUID == null) + throw new IllegalArgumentException(); + + final IQueryPeer queryPeer; + + if(serviceUUID.equals(getQueryEngine().getServiceUUID())) { + + // Return a hard reference to the query engine (NOT a proxy). + return getQueryEngine(); + + } else if (serviceUUID.equals(queryControllerUUID)) { + + // The target is the query controller. + queryPeer = getQueryController(); + + } else { + + // The target is some data service. + queryPeer = getQueryEngine().getQueryPeer(serviceUUID); + + } + + return queryPeer; + + } + + /** * Return the {@link IAllocationContext} for the given key. * * @param key * The key. * * @return The allocation context. + * + * @todo use typesafe enum for the types of allocation contexts? */ - private IAllocationContext getAllocationContext( + public IAllocationContext getAllocationContext( final AllocationContextKey key) { final IAllocationContext ctx = getQueryEngine().getResourceService() .getAllocator().getAllocationContext(key); // note the allocation contexts associated with this running query. - allocationContexts.putIfAbsent(key, ctx); + final IAllocationContext ctx2 = allocationContexts + .putIfAbsent(key, ctx); + if (ctx2 != null) + return ctx2; + return ctx; } @@ -309,10 +366,6 @@ * Note: IKeyOrder tells us which index will be used and should be * set on the predicate by the join optimizer. * - * @todo Use the read or write timestamp depending on whether the - * operator performs mutation [this must be part of the operator - * metadata.] - * * @todo Set the capacity of the the "map" buffer to the size of the * data contained in the sink (in fact, we should just process the * sink data in place). @@ -321,7 +374,7 @@ @SuppressWarnings("unchecked") final IPredicate<E> pred = ((IShardwisePipelineOp) bop).getPredicate(); final IKeyOrder<E> keyOrder = pred.getKeyOrder(); - final long timestamp = getReadTimestamp(); // @todo read vs write timestamp. + final long timestamp = pred.getTimestamp(); final int capacity = 1000;// @todo final int capacity2 = 1000;// @todo final MapBindingSetsOverShardsBuffer<IBindingSet, E> mapper = new MapBindingSetsOverShardsBuffer<IBindingSet, E>( @@ -373,11 +426,11 @@ final IBuffer<IBindingSet> shardSink = e.getValue(); - // FIXME harmonize IBuffer<IBindingSet> vs IBuffer<IBindingSet[]> -// sendOutputChunkReadyMessage(newOutputChunk(locator -// .getDataServiceUUID(), sinkId, allocationContext, -// shardSink)); +// // FIXME harmonize IBuffer<IBindingSet> vs IBuffer<IBindingSet[]> +// sendChunkMessage(locator.getDataServiceUUID(), sinkId, locator +// .getPartitionId(), allocationContext, shardSink); throw new UnsupportedOperationException(); + } return nchunksout; @@ -394,8 +447,8 @@ final IAllocationContext allocationContext = getAllocationContext(new QueryContext( getQueryId())); - sendOutputChunkReadyMessage(newOutputChunk(queryControllerUUID, - sinkId, allocationContext, sink)); + sendChunkMessage(queryControllerUUID, sinkId, -1/* partitionId */, + allocationContext, sink); /* * Chunks send to the query controller do not keep the query @@ -411,7 +464,9 @@ } /** - * Create an {@link OutputChunk} from some intermediate results. + * Create and send an {@link IChunkMessage} from some intermediate results. + * Various optimizations are employed depending on the amount of data to be + * moved and whether or not the target is this service. * * @param serviceUUID * The {@link UUID} of the {@link IQueryPeer} who is the @@ -420,15 +475,46 @@ * The identifier of the target {@link BOp}. * @param allocationContext * The allocation context within which the {@link ByteBuffer}s - * will be managed for this {@link OutputChunk}. + * will be managed for this {@link ChunkMessageWithNIOPayload}. * @param source * The binding sets to be formatted onto a buffer. * - * @return The {@link OutputChunk}. + * @return The {@link ChunkMessageWithNIOPayload}. + * + * @todo This is basically a factory for creating {@link IChunkMessage}s. + * That factory pattern in combined with the logic to send the message + * so we can do within JVM handoffs. We could break these things apart + * using {@link IChunkMessage#isMaterialized()} to detect inline + * cases. That would let us send out the messages in parallel, which + * could help to cut latency when an operator has a large fan out (in + * scale-out when mapping over shards or nodes). + * + * @todo We probably need to use the {@link DirectBufferPoolAllocator} to + * receive the chunks within the {@link ManagedResourceService} as + * well. + * + * @todo Release the allocations associated with each output chunk once it + * is received by the remote service. + * <p> + * When the query terminates all output chunks targeting any node + * EXCEPT the query controller should be immediately dropped. + * <p> + * If there is an error during query evaluation, then the output + * chunks for the query controller should be immediately dropped. + * <p> + * If the iterator draining the results on the query controller is + * closed, then the output chunks for the query controller should be + * immediately dropped. + * + * @todo There are a few things where the resource must be made available to + * more than one operator evaluation phase. The best examples are + * temporary graphs for parallel closure and large collections of + * graphIds for SPARQL "NAMED FROM DATA SET" extensions. */ - protected OutputChunk newOutputChunk( + protected void sendChunkMessage( final UUID serviceUUID, final int sinkId, + final int partitionId, final IAllocationContext allocationContext, final IBlockingBuffer<IBindingSet[]> source) { @@ -441,6 +527,97 @@ if (source == null) throw new IllegalArgumentException(); + if (source.isEmpty()) + throw new RuntimeException(); + + // The peer to be notified. + final IQueryPeer peerProxy = getQueryPeer(serviceUUID); + + if (peerProxy == null) + throw new RuntimeException("Not found: serviceId=" + serviceUUID); + + // true iff the target is this service (no proxy, no RMI). + final boolean thisService = peerProxy == getQueryEngine(); + + if(thisService) { + /* + * Leave the chunk as Java objects and drop it directly onto the + * query engine. + */ + + final IChunkMessage msg = new BindingSetChunk(getQueryController(), + getQueryId(), sinkId, partitionId, source.iterator()); + + getQueryEngine().bufferReady(msg); + + return; + + } + + /* + * We will be notifying another service (RMI) that a chunk is available. + * + * Note: Depending on how much data it involved, we may move it with the + * RMI message or out of band using NIO. This decision effects how we + * serialize the chunk. + */ + final IChunkMessage msg; + if (source.size() < 100) { + + /* + * FIXME Send payload inline with the RMI message. + */ + +// final byte[] data = SerializerUtil.serialize(obj); +// +// // @todo harmonize serialization and compression and ctors. +// msg = new ThickChunkMessage(getQueryController(), getQueryId(), +// sinkId, partitionId, data); + throw new UnsupportedOperationException(); + + } else + { + + /* + * Marshall the data onto direct ByteBuffer(s) and send a thin + * message by RMI. The receiver will retrieve the data using NIO + * against the ResourceService. + * + * @todo harmonize serialization and compression and ctors. + */ + final List<IAllocation> allocations = moveToNIOBuffers( + allocationContext, source); + + msg = new ChunkMessageWithNIOPayload(getQueryController(), + getQueryId(), sinkId, partitionId, allocations, + getQueryEngine().getResourceService().getAddr()); + + } + + try { + + peerProxy.bufferReady(msg); + + } catch (RemoteException e) { + + throw new RuntimeException(e); + + } + + } + + /** + * Chunk-wise serialization of the data onto allocations. + * @param allocationContext + * @param source + * @return + * + * @todo should be on message per chunk, right? + */ + private List<IAllocation> moveToNIOBuffers( + final IAllocationContext allocationContext, + final IBlockingBuffer<IBindingSet[]> source) { + int nbytes = 0; final List<IAllocation> allocations = new LinkedList<IAllocation>(); @@ -476,135 +653,14 @@ } + return allocations; + } finally { itr.close(); } - return new OutputChunk(getQueryId(), serviceUUID, sinkId, nbytes, - allocations); - } - - protected IQueryPeer getQueryPeer(final UUID serviceUUID) { - if (serviceUUID == null) - throw new IllegalArgumentException(); - - final IQueryPeer queryPeer; - - if (serviceUUID.equals(queryControllerUUID)) { - - // The target is the query controller. - queryPeer = getQueryController(); - - } else { - - // The target is some data service. - queryPeer = getQueryEngine().getQueryPeer(serviceUUID); - - } - - return queryPeer; - - } - - /** - * Overridden to make this visible to the {@link FederatedQueryEngine}. - * <p> - * {@inheritDoc} - */ - @Override - protected void acceptChunk(final IChunkMessage msg) { - - super.acceptChunk(msg); - - } - - /** - * Notify a remote {@link IQueryPeer} that data is available for it. - * - * @todo If the target for the {@link OutputChunk} is this node then just - * drop it onto the {@link QueryEngine}. - * - * FIXME Fast path with inline RMI based transfer for small sets of - * data using a 'think' {@link IChunkMessage}. - */ - protected void sendOutputChunkReadyMessage(final OutputChunk outputChunk) { - - try { - - // The peer to be notified. - final IQueryPeer peerProxy = getQueryPeer(outputChunk.serviceId); - - // The Internet address and port where the peer can read the data - // from this node. - final InetSocketAddress serviceAddr = getQueryEngine() - .getResourceService().getAddr(); - - // FIXME invoke peerProxy.bufferReady(msg) here! -// peerProxy.bufferReady(getQueryController(), serviceAddr, -// getQueryId(), outputChunk.sinkId); - peerProxy.bufferReady(null/*FIXME msg.*/); - - } catch (RemoteException e) { - - throw new RuntimeException(e); - - } - - } - - /** - * A chunk of outputs. - * - * @todo We probably need to use the {@link DirectBufferPoolAllocator} to - * receive the chunks within the {@link ManagedResourceService} as - * well. - * - * @todo Release the allocations associated with each output chunk once it - * is received by the remote service. - * <p> - * When the query terminates all output chunks targeting any node - * EXCEPT the query controller should be immediately dropped. - * <p> - * If there is an error during query evaluation, then the output - * chunks for the query controller should be immediately dropped. - * <p> - * If the iterator draining the results on the query controller is - * closed, then the output chunks for the query controller should be - * immediately dropped. - * - * @todo There are a few things where the resource must be made available to - * more than one operator evaluation phase. The best examples are - * temporary graphs for parallel closure and large collections of - * graphIds for SPARQL "NAMED FROM DATA SET" extensions. - */ - private static class OutputChunk { - - final long queryId; - - final UUID serviceId; - - final int sinkId; - - final int nbytes; - - final List<IAllocation> allocations; - - public OutputChunk(final long queryId, final UUID serviceId, - final int sinkId, final int nbytes, - final List<IAllocation> allocations) { - - this.queryId = queryId; - this.serviceId = serviceId; - this.sinkId = sinkId; - this.nbytes = nbytes; - this.allocations = allocations; - - } - - } - } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -50,7 +50,7 @@ */ private static final long serialVersionUID = 1L; - final private IQueryClient clientProxy; + final private IQueryClient queryController; final private long queryId; @@ -61,7 +61,7 @@ final private byte[] data; public IQueryClient getQueryController() { - return clientProxy; + return queryController; } public long getQueryId() { @@ -84,11 +84,19 @@ return data.length; } - public ThickChunkMessage(final IQueryClient clientProxy, + /** + * + * @param queryController + * @param queryId + * @param bopId + * @param partitionId + * @param data + */ + public ThickChunkMessage(final IQueryClient queryController, final long queryId, final int bopId, final int partitionId, final byte[] data) { - if (clientProxy == null) + if (queryController == null) throw new IllegalArgumentException(); if (data == null) @@ -98,7 +106,7 @@ if (data.length == 0) throw new IllegalArgumentException(); - this.clientProxy = clientProxy; + this.queryController = queryController; this.queryId = queryId; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -492,7 +492,7 @@ this.optional = joinOp.isOptional(); this.variablesToKeep = joinOp.variablesToKeep(); this.context = context; - this.relation = context.getReadRelation(right); + this.relation = context.getRelation(right); this.source = context.getSource(); this.sink = context.getSink(); this.sink2 = context.getSink2(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -41,10 +41,15 @@ import com.bigdata.bop.engine.BOpStats; import com.bigdata.btree.ILocalBTreeView; import com.bigdata.btree.ITupleSerializer; +import com.bigdata.btree.UnisolatedReadWriteIndex; import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.ITx; import com.bigdata.relation.IRelation; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; +import com.bigdata.service.DataService; +import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.IKeyOrder; /** @@ -181,7 +186,7 @@ predicate = op.getPredicate(); - relation = context.getWriteRelation(op.getRelation()); + relation = context.getRelation(predicate); keyOrder = op.getKeyOrder(); @@ -198,14 +203,8 @@ */ public Void call() throws Exception { - /* - * @todo validate for s/o. Since this goes through a common code - * path, what we really need to test is getMutableLocalIndexView(). - * The rest of the insert operation can be tested against a local - * Journal. - */ - final ILocalBTreeView ndx = context.getMutableLocalIndexView( - relation, keyOrder, context.getPartitionId()); + final ILocalBTreeView ndx = getMutableLocalIndexView(relation, + keyOrder, context.getPartitionId()); final IKeyBuilder keyBuilder = ndx.getIndexMetadata() .getKeyBuilder(); @@ -260,6 +259,95 @@ } + /** + * Return an mutable view of the specified index. + * + * @param <T> + * The generic type of the elements in the relation. + * @param relation + * The relation. + * @param keyOrder + * The key order for that index. + * @param partitionId + * The partition identifier and <code>-1</code> unless + * running against an {@link IBigdataFederation}. + * + * @return The mutable view of the index. + * + * @throws UnsupportedOperationException + * if there is an attempt to read on an index partition when + * the database is not an {@link IBigdataFederation} or when + * the database is an {@link IBigdataFederation} unless the + * index partition was specified. + * + * @todo validate for standalone. probably needs to be wrapped as an + * {@link UnisolatedReadWriteIndex} whcih migtht be done by how we + * get the relation view. + * + * @todo validate for s/o. Since this goes through a common code path, + * what we really need to test is getMutableLocalIndexView(). The + * rest of the insert operation can be tested against a local + * Journal. + * + * FIXME This must obtain the appropriate lock for the mutable + * index in scale-out. + */ + public <T> ILocalBTreeView getMutableLocalIndexView( + final IRelation<T> relation, final IKeyOrder<T> keyOrder, + final int partitionId) { + + if(true) { + /* + * FIXME Concurrency control and locks. Maybe submit as an + * AbstractTask? + */ + throw new UnsupportedOperationException(); + } + + final IBigdataFederation<?> fed = context.getFederation(); + final IIndexManager indexManager = context.getIndexManager(); + final long writeTimestamp = predicate.getTimestamp(); + + final String namespace = relation.getNamespace(); + + final ILocalBTreeView ndx; + + if (partitionId == -1) { + + if (fed != null) { + // This is scale-out so the partition identifier is required. + throw new UnsupportedOperationException(); + } + + // The index is not partitioned. + ndx = (ILocalBTreeView) indexManager.getIndex(namespace + "." + + keyOrder.getIndexName(), writeTimestamp); + + } else { + + if (fed == null) { + /* + * This is not scale-out so index partitions are not + * supported. + */ + throw new UnsupportedOperationException(); + } + + // The name of the desired index partition. + final String name = DataService.getIndexPartitionName(namespace + + "." + keyOrder.getIndexName(), partitionId); + + // MUST be a local index view. + ndx = (ILocalBTreeView) indexManager.getIndex(name, + writeTimestamp); + + } + + return ndx; + + } + + } @@ -280,5 +368,5 @@ return BOpEvaluationContext.SHARDED; } - + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ndx/SampleLocalShard.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ndx/SampleLocalShard.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ndx/SampleLocalShard.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -64,7 +64,7 @@ final IPredicate<E> pred = pred(); - final IRelation<E> view = context.getReadRelation(pred); + final IRelation<E> view = context.getRelation(pred); final IAccessPath<E> accessPath = view.getAccessPath(pred); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java 2010-09-10 19:44:37 UTC (rev 3530) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java 2010-09-10 20:47:27 UTC (rev 3531) @@ -49,7 +49,6 @@ import com.bigdata.bop.engine.MockRunningQuery; import com.bigdata.bop.engine.TestQueryEngine; import com.bigdata.bop.solutions.DistinctBindingSetOp; -import com.bigdata.journal.ITx; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; @@ -210,11 +209,8 @@ final IBlockingBuffer<IBindingSet[]> sink2 =... [truncated message content] |
From: <ble...@us...> - 2010-09-10 19:44:46
|
Revision: 3530 http://bigdata.svn.sourceforge.net/bigdata/?rev=3530&view=rev Author: blevine218 Date: 2010-09-10 19:44:37 +0000 (Fri, 10 Sep 2010) Log Message: ----------- Get initial test to pass. Re-work setting of system properties. Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Util.java Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-10 17:12:33 UTC (rev 3529) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-10 19:44:37 UTC (rev 3530) @@ -1,5 +1,4 @@ -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> <parent> @@ -15,7 +14,40 @@ <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> - <project.build.deployDirectory>${project.build.directory}/deploy</project.build.deployDirectory> + <deploy.root.dir>${project.build.directory}/deploy</deploy.root.dir> + <bigdata.dependency>bigdata-core</bigdata.dependency> + + + <!-- + This is kinda hokey, but not sure there's a better way to + construct the path to the root of the exploded tarball --> + + <deploy.dir>${deploy.root.dir}/${bigdata.dependency}-${project.version}</deploy.dir> + <test.dir>${deploy.dir}/testing</test.dir> + <testScript>${test.dir}/test.xml</testScript> + + <basedir>${test.dir}</basedir> + <app.home>${deploy.dir}</app.home> + <deploy.conf.dir>${test.dir}/conf</deploy.conf.dir> + <deploy.lib>${deploy.dir}/lib</deploy.lib> + <deploy.lib.test>${test.dir}/lib-test</deploy.lib.test> + <deploy.lib.dl>${deploy.dir}/lib-dl</deploy.lib.dl> + <test.codebase.dir>${deploy.lib.dl}</test.codebase.dir> + <test.codebase.port>23333</test.codebase.port> + <java.security.policy>${deploy.conf.dir}/policy.all</java.security.policy> + <log4j.configuration>${deploy.dir}/var/config/logging/log4j.properties</log4j.configuration> + <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack> + <default.nic>eth0</default.nic> + <parent.artifactName>bigdata-core</parent.artifactName> + + + <!-- + In the ANT script, hostname is obtained by an exec of the + 'hostname' command. Hard-coding to localhost for now. + --> + <hostname>blevine-desktop</hostname> + <test.codebase>http://${hostname}:${test.codebase.port}/jsk-dl.jar</test.codebase> + <federation.name>bigdata.test.group-${hostname}</federation.name> </properties> @@ -33,15 +65,17 @@ <configuration> <artifactItems> <artifactItem> - <groupId>com.nokia.dataos.rds.bigdata</groupId> - <artifactId>bigdata-dist</artifactId> - <version>0.83.2-SNAPSHOT</version> - <type>tgz</type> - <outputDirectory>${project.build.deployDirectory}</outputDirectory> + <groupId>com.bigdata</groupId> + <artifactId>bigdata-core</artifactId> + <classifier>deploy</classifier> + <type>tar.gz</type> + <outputDirectory>${deploy.directory}</outputDirectory> </artifactItem> </artifactItems> + <useSubdirPerArtifact>true</useSubdirPerArtifact> + <!-- <overWriteSnapshots>true</overWriteSnapshots> - <overWriteReleases>true</overWriteReleases> + <overWriteReleases>true</overWriteReleases> --> </configuration> </execution> </executions> @@ -64,13 +98,55 @@ </plugin> <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>failsafe-maven-plugin</artifactId> - <version>2.4.3-alpha-1</version> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-failsafe-plugin</artifactId> <configuration> <includes> - <include>**/*.java</include> + <include>**/Test*.java</include> + <include>**/*Test.java</include> </includes> + <excludes> + <exclude>**/TestBigdataClientRemote.java</exclude> + </excludes> + <systemPropertyVariables> + <foo.bar.prop>hello</foo.bar.prop> + <java.security.policy>${java.security.policy}</java.security.policy> + <java.net.preferIPv4Stack>{java.net.preferIPv4Stack}"</java.net.preferIPv4Stack> + <log4j.configuration>${log4j.configuration}</log4j.configuration> + <!-- ><log4j.debug>true"</log4j.debug> --> + + <basedir>${basedir}</basedir> <!-- Tells the unit tests where the ant script is, so they can find resources. --> + <app.home>${app.home}</app.home> <!-- This is the deployment directory, easily accessed by the DataFinder class. --> + <log4j.path>${log4j.configuration}</log4j.path> + <default.nic>${default.nic}</default.nic> + + <!-- Jini group name --> + <federation.name>${federation.name}</federation.name> + + <!-- TODO !!!!!! + <property key="java.class.path" value="${junit.classpath.text}" /> + --> + + <classserver.jar>${deploy.lib}/classserver.jar</classserver.jar> + <colt.jar>${deploy.lib}/colt.jar</colt.jar> + <ctc_utils.jar>${deploy.lib}/ctc_utils.jar</ctc_utils.jar> + <cweb-commons.jar>${deploy.lib}/cweb-commons.jar</cweb-commons.jar> + <cweb-extser.jar>${deploy.lib}/cweb-extser.jar</cweb-extser.jar> + <highscalelib.jar>${deploy.lib}/highscalelib.jar</highscalelib.jar> + <dsiutils.jar>${deploy.lib}/dsiutils.jar</dsiutils.jar> + <lgplutils.jar>${deploy.lib}/lgplutils.jar</lgplutils.jar> + <fastutil.jar>${deploy.lib}/fastutil.jar</fastutil.jar> + <icu4j.jar>${deploy.lib}/icu4j.jar</icu4j.jar> + <jsk-lib.jar>${deploy.lib}/jsk-lib.jar</jsk-lib.jar> + <jsk-platform.jar>${deploy.lib}jsk-platform.jar</jsk-platform.jar> + <log4j.jar>${deploy.lib}/log4j.jar</log4j.jar> + <iris.jar>${deploy.lib}/iris.jar</iris.jar> + <jgrapht.jar>${deploy.lib}/jgrapht.jar</jgrapht.jar> + <openrdf-sesame.jar>${deploy.lib}/openrdf-sesame.jar</openrdf-sesame.jar> + <slf4j.jar>${deploy.lib}/slf4j.jar</slf4j.jar> + <nxparser.jar>${deploy.lib}/nxparser.jar</nxparser.jar> + <zookeeper.jar>${deploy.lib}/zookeeper.jar</zookeeper.jar> + </systemPropertyVariables> </configuration> <executions> <execution> @@ -91,9 +167,8 @@ <phase>pre-integration-test</phase> <configuration> <tasks> - <ant - antfile="${project.build.testOutputDirectory}/test.xml" - target="hello" /> + <echo message="testscript = ${testScript}" /> + <ant antfile="${testScript}" target="startTestServices" useNativeBasedir="true" inheritAll="false"/> </tasks> </configuration> <goals> @@ -106,9 +181,8 @@ <phase>post-integration-test</phase> <configuration> <tasks> - <ant - antfile="${project.build.testOutputDirectory}/test.xml" - target="hello" /> + <echo message="testscript = ${testScript}" /> + <ant antfile="${testScript}" target="stopTestServices" useNativeBasedir="true" inheritAll="false"/> </tasks> </configuration> <goals> @@ -127,9 +201,16 @@ <groupId>com.bigdata</groupId> <artifactId>bigdata-core</artifactId> <version>${project.version}</version> + <classifier>deploy</classifier> + <type>tar.gz</type> <scope>test</scope> </dependency> + <dependency> + <groupId>com.bigdata</groupId> + <artifactId>bigdata-core</artifactId> + <version>${project.version}</version> + </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> @@ -137,6 +218,13 @@ <scope>test</scope> </dependency> + <dependency> + <groupId>com.bigdata.thirdparty</groupId> + <artifactId>cweb-junit-ext</artifactId> + <version>1.1.0-b3-dev</version> + <scope>test</scope> + </dependency> + </dependencies> </project> Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-10 19:44:37 UTC (rev 3530) @@ -0,0 +1,503 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Apr 22, 2007 + */ + +package com.bigdata.service.jini; + +import java.io.IOException; +import java.net.InetAddress; + +import junit.framework.AssertionFailedError; +import junit.framework.TestCase2; +import net.jini.core.discovery.LookupLocator; +import net.jini.core.lookup.ServiceID; +import net.jini.core.lookup.ServiceRegistrar; +import net.jini.core.lookup.ServiceTemplate; + +import com.bigdata.journal.ITx; +import com.bigdata.mdi.IResourceMetadata; +import com.bigdata.mdi.LocalPartitionMetadata; +import com.bigdata.mdi.PartitionLocator; +import com.bigdata.service.DataService; +import com.bigdata.service.IDataService; +import com.bigdata.service.MetadataService; +import com.sun.jini.tool.ClassServer; +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.NicUtil; + +/** + * Abstract base class for tests of remote services. + * <p> + * Note: jini MUST be running. You can get the jini starter kit and install it + * to get jini running. + * </p> + * <p> + * Note: You MUST specify a security policy that is sufficiently lax. + * </p> + * <p> + * Note: You MUST specify the codebase for downloadable code. + * </p> + * <p> + * Note: The <code>bigdata</code> JAR must be current in order for the client + * and the service to agree on interface definitions, etc. You can use + * <code>build.xml</code> in the root of this module to update that JAR. + * </p> + * <p> + * Note: A {@link ClassServer} will be started on port 8081 by default. If that + * port is in use then you MUST specify another port. + * </p> + * + * The following system properties will do the trick unless you have something + * running on port 8081. + * + * <pre> + * -Djava.security.policy=policy.all -Djava.rmi.server.codebase=http://localhost:8081 + * </pre> + * + * To use another port, try: + * + * <pre> + * -Djava.security.policy=policy.all -Dbigdata.test.port=8082 -Djava.rmi.server.codebase=http://localhost:8082 + * </pre> + * + * You can enable NIO using: + * <pre> + * -Dcom.sun.jini.jeri.tcp.useNIO=true + * </pre> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public abstract class AbstractServerTestCase extends TestCase2 { + + /** + * Equal to {@link ITx#UNISOLATED}. + */ + protected final long UNISOLATED = ITx.UNISOLATED; + + /** + * + */ + public AbstractServerTestCase() { + } + + /** + * @param arg0 + */ + public AbstractServerTestCase(String arg0) { + super(arg0); + } + +// /** +// * Return an open port on current machine. Try the suggested port first. If +// * suggestedPort is zero, just select a random port +// */ +// private static int getPort(int suggestedPort) throws IOException { +// ServerSocket openSocket; +// try { +// openSocket = new ServerSocket(suggestedPort); +// } catch (BindException ex) { +// // the port is busy, so look for a random open port +// openSocket = new ServerSocket(0); +// } +// +// int port = openSocket.getLocalPort(); +// openSocket.close(); +// +// return port; +// } + +// /** +// * This may be used to verify that a specific port is available. The method +// * will return iff the port is available at the time that this method was +// * called. The method will retry a few times since sometimes it takes a bit +// * for a socket to get released and we are reusing the same socket for the +// * {@link ClassServer} for each test. +// * +// * @param port +// * The port to try. +// * +// * @exception AssertionFailedError +// * if the port is not available. +// */ +// protected static void assertOpenPort(final int port) throws IOException { +// +// ServerSocket openSocket; +// +// int i = 0; +// +// final int maxTries = 3; +// +// while (i < maxTries) { +// +// try { +// +// // try to open a server socket on that port. +// openSocket = new ServerSocket(port); +// +// // close the socket - it is available for the moment. +// openSocket.close(); +// +// return; +// +// } catch (BindException ex) { +// +// if (i++ < maxTries) { +// +// log.warn("Port " + port + " is busy - retrying: " + ex); +// +// try { +// Thread.sleep(100/* ms */); +// } catch (InterruptedException t) { +// /* ignore */ +// } +// +// } else { +// +// fail("Port is busy: " + ex + " - use " + PORT_OPTION +// + " to specify another port?"); +// +// } +// +// } +// +// } +// +// } + +// private ClassServer classServer; +// +// /** +// * The name of the System property that may be used to change the port on which +// * the {@link ClassServer} will be started. +// */ +// public static final String PORT_OPTION = "bigdata.test.port"; +// +// /** +// * The default port on which the {@link ClassServer} will be started. +// * <p> +// * Note: Outlook appears to conflict with 8081. +// */ +// public static final String DEFAULT_PORT = "8082"; +// +// /** +// * Starts a {@link ClassServer} that supports downloadable code for the unit +// * test. The {@link ClassServer} will start on the port named by the System +// * property {@link #PORT_OPTION} and on port {@link #DEFAULT_PORT} if that +// * system property is not set. +// * +// * @throws IOException +// */ +// protected void startClassServer() throws IOException { +// +// // Note: See below. +//// if(true) return; +// +// Logger.getLogger("com.sun.jini.tool.ClassServer").setLevel(Level.ALL); +// +// /* +// * Obtain port from System.getProperties() so that other ports may be +// * used. +// */ +// final int port = Integer.parseInt(System.getProperty(PORT_OPTION,DEFAULT_PORT)); +// +// /* +// * The directories containing the JARs and the compiled classes for the +// * bigdata project. +// */ +// String dirlist = +// "lib"+File.pathSeparatorChar+ +// "lib"+File.separatorChar+"icu"+File.pathSeparatorChar+ +// "lib"+File.separatorChar+"jini"+File.pathSeparatorChar +// /* +// * FIXME This does not seem to be resolving the bigdata classes +// * necessitating that we list that jar explicitly below (and that it +// * be up to date). The problem can be seen in the Jini Service +// * Browser and the console for the Service Browser. In fact, the +// * test suite executes just fine if you do NOT use the ClassServer! +// * +// * I can only get this working right now by placing bigdata-core.jar into +// * the lib directory (or some other directory below the current +// * working directory, but not ant-build since that gives the ant +// * script fits). +// * +// * I still see a ClassNotFound problem in the Jini console complaining +// * that it can not find IDataService, but only when I select the +// * registrar on which the services are running! +// */ +//// + +//// "bin" +// //+File.pathSeparatorChar+ +//// "ant-build" +// ; +// +// assertOpenPort(port); +// +// classServer = new ClassServer( +// port, +// dirlist, +// true, // trees - serve up files inside of JARs, +// true // verbose +// ); +// +// classServer.start(); +// +// } + + public void setUp() throws Exception { + + if (log.isInfoEnabled()) + log.info(getName()); + +// startClassServer(); + + } + + /** + * Stops the {@link ClassServer}. + */ + public void tearDown() throws Exception { + +// if (classServer != null) { +// +// classServer.terminate(); +// +// } + + super.tearDown(); + + if (log.isInfoEnabled()) + log.info(getName()); + + } + + /** + * Return the {@link ServiceID} of a server that we started ourselves. The + * method waits until the {@link ServiceID} becomes available on + * {@link AbstractServer#getServiceID()}. + * + * @exception AssertionFailedError + * If the {@link ServiceID} can not be found after a timeout. + * + * @exception InterruptedException + * if the thread is interrupted while it is waiting to retry. + */ + static public ServiceID getServiceID(final AbstractServer server) + throws AssertionFailedError, InterruptedException { + + ServiceID serviceID = null; + + for(int i=0; i<10 && serviceID == null; i++) { + + /* + * Note: This can be null since the serviceID is not assigned + * synchronously by the registrar. + */ + + serviceID = server.getServiceID(); + + if(serviceID == null) { + + /* + * We wait a bit and retry until we have it or timeout. + */ + + Thread.sleep(200); + + } + + } + + assertNotNull("serviceID",serviceID); + + /* + * Verify that we have discovered the _correct_ service. This is a + * potential problem when starting a stopping services for the test + * suite. + */ + assertEquals("serviceID", server.getServiceID(), serviceID); + + return serviceID; + + } + + /** + * Lookup a {@link DataService} by its {@link ServiceID} using unicast + * discovery on localhost. + * + * @param serviceID + * The {@link ServiceID}. + * + * @return The service. + * + * @todo Modify to return the service item? + * + * @todo Modify to not be specific to {@link DataService} vs + * {@link MetadataService} (we need a common base interface for both + * that carries most of the functionality but allows us to make + * distinctions easily during discovery). + */ + public IDataService lookupDataService(ServiceID serviceID) + throws IOException, ClassNotFoundException, InterruptedException { + + /* + * Lookup the discover service (unicast on localhost). + */ + + // get the hostname. + String hostname = NicUtil.getIpAddress("default.nic", "default", true); + + // Find the service registrar (unicast protocol). + final int timeout = 4*1000; // seconds. + System.err.println("hostname: "+hostname); + LookupLocator lookupLocator = new LookupLocator("jini://"+hostname); + ServiceRegistrar serviceRegistrar = lookupLocator.getRegistrar( timeout ); + + /* + * Prepare a template for lookup search. + * + * Note: The client needs a local copy of the interface in order to be + * able to invoke methods on the service without using reflection. The + * implementation class will be downloaded from the codebase identified + * by the server. + */ + ServiceTemplate template = new ServiceTemplate(// + /* + * use this to request the service by its serviceID. + */ + serviceID, + /* + * Use this to filter services by an interface that they expose. + */ +// new Class[] { IDataService.class }, + null, + /* + * use this to filter for services by Entry attributes. + */ + null); + + /* + * Lookup a service. This can fail if the service registrar has not + * finished processing the service registration. If it does, you can + * generally just retry the test and it will succeed. However this + * points out that the client may need to wait and retry a few times if + * you are starting everything up at once (or just register for + * notification events for the service if it is not found and enter a + * wait state). + */ + + IDataService service = null; + + for (int i = 0; i < 10 && service == null; i++) { + + service = (IDataService) serviceRegistrar + .lookup(template /* , maxMatches */); + + if (service == null) { + + System.err.println("Service not found: sleeping..."); + + Thread.sleep(200); + + } + + } + + if (service != null) { + + System.err.println("Service found."); + + } + + return service; + + } + + /** + * Compares two representations of the {@link PartitionLocator} + * without the left- and right-separator keys that bound the index + * partition. + * + * @param expected + * @param actual + */ + protected void assertEquals(PartitionLocator expected, PartitionLocator actual) { + + assertEquals("partitionId", expected.getPartitionId(), actual + .getPartitionId()); + + assertEquals("dataServiceUUID", expected.getDataServiceUUID(), actual + .getDataServiceUUID()); + + } + + /** + * Compares two representations of the {@link LocalPartitionMetadata} for an + * index partition including the optional resource descriptions. + * + * @param expected + * @param actual + */ + protected void assertEquals(LocalPartitionMetadata expected, + LocalPartitionMetadata actual) { + + assertEquals("partitionId",expected.getPartitionId(), actual.getPartitionId()); + + assertEquals("leftSeparatorKey", expected.getLeftSeparatorKey(), + ((LocalPartitionMetadata) actual) + .getLeftSeparatorKey()); + + assertEquals("rightSeparatorKey", expected.getRightSeparatorKey(), + ((LocalPartitionMetadata) actual) + .getRightSeparatorKey()); + + final IResourceMetadata[] expectedResources = expected.getResources(); + + final IResourceMetadata[] actualResources = actual.getResources(); + + assertEquals("#resources",expectedResources.length,actualResources.length); + + for(int i=0;i<expected.getResources().length; i++) { + + // verify by components so that it is obvious what is wrong. + + assertEquals("filename[" + i + "]", expectedResources[i].getFile(), + actualResources[i].getFile()); + +// assertEquals("size[" + i + "]", expectedResources[i].size(), +// actualResources[i].size()); + + assertEquals("UUID[" + i + "]", expectedResources[i].getUUID(), + actualResources[i].getUUID()); + + // verify by equals. + assertTrue("resourceMetadata",expectedResources[i].equals(actualResources[i])); + + } + + } + +} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-10 19:44:37 UTC (rev 3530) @@ -0,0 +1,214 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Apr 23, 2007 + */ + +package com.bigdata.service.jini; + +import java.io.Serializable; +import java.util.Random; +import java.util.UUID; + +import com.bigdata.btree.IIndex; +import com.bigdata.btree.ITuple; +import com.bigdata.btree.ITupleIterator; +import com.bigdata.btree.IndexMetadata; + +import com.bigdata.btree.proc.BatchInsert.BatchInsertConstructor; +import com.bigdata.journal.ITx; +import com.bigdata.service.DataService; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.service.IDataService; +import com.bigdata.service.jini.util.JiniServicesHelper; +import com.bigdata.test.util.Util; + +/** + * Test suite for the {@link JiniClient}. + * <p> + * Note: The core test suite has already verified the basic semantics of the + * {@link IDataService} interface and partitioned indices so all we have to + * focus on here is the jini integration and verifying that the serialization + * imposed by RMI goes off without a hitch (e.g., that everything implements + * {@link Serializable} and that those {@link Serializable} implementations can + * correctly round trip the data). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestBigdataClient extends AbstractServerTestCase { + + protected boolean serviceImplRemote; + + public TestBigdataClient() { + this.serviceImplRemote = false; + } + + public TestBigdataClient(String name) { + super(name); + this.serviceImplRemote = false; + } + + public TestBigdataClient(boolean serviceImplRemote) { + this.serviceImplRemote = serviceImplRemote; + } + + public TestBigdataClient(String name, boolean serviceImplRemote) { + super(name); + this.serviceImplRemote = serviceImplRemote; + } + + /** + * Starts a {@link DataServer} ({@link #dataServer1}) and then a + * {@link MetadataServer} ({@link #metadataServer0}). Each runs in its own + * thread. + */ + public void setUp() throws Exception { + super.setUp(); + helper = new JiniServicesHelper(serviceImplRemote); + helper.start(); + } + + protected JiniServicesHelper helper = null; + + /** + * Destroy the test services. + */ + public void tearDown() throws Exception { + if (helper != null) { + helper.destroy(); + } + + super.tearDown(); + } + + /** + * Test ability to registers a scale-out index on one of the + * {@link DataService}s. + * + * @throws Exception + */ + public void test_registerIndex1() throws Exception { + final IBigdataFederation<?> fed = helper.client.connect(); + final String name = "testIndex"; + final IndexMetadata metadata = new IndexMetadata(name, UUID.randomUUID()); + + metadata.setDeleteMarkers(true); + fed.registerIndex(metadata); + final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); + + assertEquals("indexUUID", metadata.getIndexUUID(), ndx + .getIndexMetadata().getIndexUUID()); + + doBasicIndexTests(ndx); + } + + /** + * Test ability to registers a scale-out index on both of the + * {@link DataService}s. + * + * @throws Exception + */ + public void test_registerIndex2() throws Exception { + final IBigdataFederation<?> fed = helper.client.connect(); + final String name = "testIndex"; + final IndexMetadata metadata = new IndexMetadata(name,UUID.randomUUID()); + + metadata.setDeleteMarkers(true); + + final UUID indexUUID = fed.registerIndex( metadata, // + // separator keys. + new byte[][] { + new byte[]{}, + Util.asSortKey(500) + },// + // data service assignments. + new UUID[] { // + helper.getDataService0().getServiceUUID(),// + helper.getDataService1().getServiceUUID() // + }); + + final IIndex ndx = fed.getIndex(name, ITx.UNISOLATED); + + assertEquals("indexUUID", indexUUID, ndx.getIndexMetadata() + .getIndexUUID()); + + // verify partition 0 on dataService0 + assertNotNull(helper.getDataService0().getIndexMetadata( + DataService.getIndexPartitionName(name, 0), ITx.UNISOLATED)); + + // verify partition 1 on dataService1 + assertNotNull(helper.getDataService1().getIndexMetadata( + DataService.getIndexPartitionName(name, 1), ITx.UNISOLATED)); + + doBasicIndexTests(ndx); + } + + /** + * Test helper reads and writes some data on the index in order to verify + * that these operations can be performed without serialization errors + * arising from the RPC calls. + * + * @param ndx + */ + protected void doBasicIndexTests(final IIndex ndx) { + + final int limit = 1000; + + final byte[][] keys = new byte[limit][]; + final byte[][] vals = new byte[limit][]; + + final Random r = new Random(); + + for (int i = 0; i < limit; i++) { + keys[i] = Util.asSortKey(i); + final byte[] val = new byte[10]; + r.nextBytes(val); + vals[i] = val; + } + + // batch insert. + ndx.submit(0/* fromIndex */, limit/* toIndex */, keys, vals, BatchInsertConstructor.RETURN_NO_VALUES, null); + + // verify #of index entries. + assertEquals(limit, ndx.rangeCount(null, null)); + + // verify data. + { + final ITupleIterator<?> itr = ndx.rangeIterator(null, null); + + int i = 0; + + while (itr.hasNext()) { + final ITuple<?> tuple = itr.next(); + + assertEquals(keys[i], tuple.getKey()); + assertEquals(vals[i], tuple.getValue()); + i++; + } + + assertEquals(limit, i); + } + } +} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClientRemote.java 2010-09-10 19:44:37 UTC (rev 3530) @@ -0,0 +1,40 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.service.jini; + +/** + * Test suite for the {@link JiniClient} using the purely remote + * service implementations. + */ +public class TestBigdataClientRemote extends TestBigdataClient { + + public TestBigdataClientRemote() { + super(true); + } + + public TestBigdataClientRemote(String name) { + super(name, true); + } +} Added: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Util.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Util.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Util.java 2010-09-10 19:44:37 UTC (rev 3530) @@ -0,0 +1,100 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.test.util; + +import java.util.Locale; + +import com.bigdata.btree.keys.IKeyBuilder; +import com.bigdata.btree.keys.KeyBuilder; + +/** + * A utility class for utility methods useful to unit and integration tests. + * + * @todo This may need to be moved into the unit test component and a dependency + * on that component added to the integration tests. + * + * @author blevine + * + */ +public class Util +{ + /** + * Used to unbox an application key (convert it to an unsigned byte[]). + */ + static private final IKeyBuilder _keyBuilder = KeyBuilder.newUnicodeInstance(); + + /** + * Utility method converts an application key to a sort key (an unsigned + * byte[] that imposes the same sort order). + * <p> + * Note: This method is thread-safe. + * <p> + * Note: Strings are Unicode safe for the default locale. See + * {@link Locale#getDefault()}. If you require a specific local or different + * locals at different times or for different indices then you MUST + * provision and apply your own {@link KeyBuilder}. + * <p> + * Note: This method circumvents explicit configuration of the + * {@link KeyBuilder} and is used nearly exclusively by unit tests. While + * explicit configuration is not required for keys which do not include + * Unicode sort key components, this method also relies on a single global + * {@link KeyBuilder} instance protected by a lock. That lock is therefore a + * bottleneck. The correct practice is to use thread-local or per task + * {@link IKeyBuilder}s to avoid lock contention. + * + * This method is cloned from a method of the same name in <code>TestKeyBuilder</code>. + * Moving it into a utility class to remove a direct dependency from one test on + * another. + * + * @param val + * An application key. + * + * @return The unsigned byte[] equivalent of that key. This will be + * <code>null</code> iff the <i>key</i> is <code>null</code>. If the + * <i>key</i> is a byte[], then the byte[] itself will be returned. + */ + public static final byte[] asSortKey(final Object val) + { + + if (val == null) + { + return null; + } + + if (val instanceof byte[]) + { + return (byte[]) val; + } + + /* + * Synchronize on the keyBuilder to avoid concurrent modification of its + * state. + */ + + synchronized (_keyBuilder) + { + return _keyBuilder.getSortKey(val); + } + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-10 17:12:40
|
Revision: 3529 http://bigdata.svn.sourceforge.net/bigdata/?rev=3529&view=rev Author: martyncutcher Date: 2010-09-10 17:12:33 +0000 (Fri, 10 Sep 2010) Log Message: ----------- Refined RWStore allocation stats display Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-10 16:02:26 UTC (rev 3528) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-10 17:12:33 UTC (rev 3529) @@ -194,9 +194,9 @@ * com.bigdata.rwstore.RWStore.allocSizes=1,2,3,5... * */ - // static final int[] DEFAULT_ALLOC_SIZES = { 1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128, 192, 320, 512, 832, 1344, 2176, 3520 }; - private static final int[] DEFAULT_ALLOC_SIZES = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181 }; - // static final int[] ALLOC_SIZES = { 1, 2, 4, 8, 16, 32, 64, 128 }; + private static final int[] DEFAULT_ALLOC_SIZES = { 1, 2, 3, 5, 8, 12, 16, 32, 48, 64, 128, 192, 320, 512, 832, 1344, 2176, 3520 }; + // private static final int[] DEFAULT_ALLOC_SIZES = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181 }; + // private static final int[] ALLOC_SIZES = { 1, 2, 4, 8, 16, 32, 64, 128 }; final int m_maxFixedAlloc; final int m_minFixedAlloc; @@ -2108,18 +2108,23 @@ str.append("RWStore Allocation Summary\n"); str.append("-------------------------\n"); long treserved = 0; + long treservedSlots = 0; long tfilled = 0; + long tfilledSlots = 0; for (int i = 0; i < stats.length; i++) { - str.append("Allocation: " + stats[i].m_blockSize); long reserved = stats[i].m_reservedSlots * stats[i].m_blockSize; treserved += reserved; - str.append(", reserved: " + reserved); + treservedSlots += stats[i].m_reservedSlots; long filled = stats[i].m_filledSlots * stats[i].m_blockSize; tfilled += filled; - str.append(", filled: " + filled); + tfilledSlots += stats[i].m_filledSlots; + + str.append("Allocation: " + stats[i].m_blockSize); + str.append(", slots: " + stats[i].m_filledSlots + "/" + stats[i].m_reservedSlots); + str.append(", storage: " + filled + "/" + reserved); str.append("\n"); } - str.append("Total - file: " + convertAddr(m_fileSize) + ", reserved: " + treserved + ", filled: " + tfilled + "\n"); + str.append("Total - file: " + convertAddr(m_fileSize) + ", slots: " + tfilledSlots + "/" + treservedSlots + ", storage: " + tfilled + "/" + treserved + "\n"); } public ArrayList getStorageBlockAddresses() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-10 16:02:33
|
Revision: 3528 http://bigdata.svn.sourceforge.net/bigdata/?rev=3528&view=rev Author: thompsonbry Date: 2010-09-10 16:02:26 +0000 (Fri, 10 Sep 2010) Log Message: ----------- Modified the configuration files to no longer specify the JVM -Xms argument for the data services per https://sourceforge.net/apps/trac/bigdata/ticket/157. Modified Paths: -------------- trunk/src/resources/config/bigdataCluster.config trunk/src/resources/config/bigdataCluster16.config trunk/src/resources/config/bigdataStandalone.config Modified: trunk/src/resources/config/bigdataCluster.config =================================================================== --- trunk/src/resources/config/bigdataCluster.config 2010-09-10 14:26:34 UTC (rev 3527) +++ trunk/src/resources/config/bigdataCluster.config 2010-09-10 16:02:26 UTC (rev 3528) @@ -758,10 +758,11 @@ * have for your applications! */ "-Xmx1600m",// was 800 - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS but is less necessary for other bigdata services. + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 + "-Xms800m", */ - "-Xms800m", // 1/2 of the max heap is a good value. /* * This option will keep the JVM "alive" even when it is memory starved * but perform of a memory starved JVM is terrible. Modified: trunk/src/resources/config/bigdataCluster16.config =================================================================== --- trunk/src/resources/config/bigdataCluster16.config 2010-09-10 14:26:34 UTC (rev 3527) +++ trunk/src/resources/config/bigdataCluster16.config 2010-09-10 16:02:26 UTC (rev 3528) @@ -813,12 +813,11 @@ * http://blogs.msdn.com/ntdebugging/archive/2009/02/06/microsoft-windows-dynamic-cache-service.aspx */ "-Xmx9G", // Note: out of 32 available! - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS, but is less necessary for other bigdata services. If the machine is - * dedicated to the DataService then use the maximum heap. Otherwise 1/2 of - * the maximum heap is a good value. - */ + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 "-Xms9G", + */ /* * FIXME This might not be required, so that should be tested. * However, you don't want the JVM to just die if it is being Modified: trunk/src/resources/config/bigdataStandalone.config =================================================================== --- trunk/src/resources/config/bigdataStandalone.config 2010-09-10 14:26:34 UTC (rev 3527) +++ trunk/src/resources/config/bigdataStandalone.config 2010-09-10 16:02:26 UTC (rev 3528) @@ -781,10 +781,11 @@ * have for your applications! */ "-Xmx4g",// was 800 - /* Optionally, grab all/most of the max heap at once. This makes sense for - * DS but is less necessary for other bigdata services. + /* Pre-allocation of the DS heap is no longer recommended. + * + * See https://sourceforge.net/apps/trac/bigdata/ticket/157 + "-Xms2G", */ - "-Xms2G", // 1/2 of the max heap is a good value. /* * This option will keep the JVM "alive" even when it is memory starved * but perform of a memory starved JVM is terrible. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2010-09-10 14:26:41
|
Revision: 3527 http://bigdata.svn.sourceforge.net/bigdata/?rev=3527&view=rev Author: martyncutcher Date: 2010-09-10 14:26:34 +0000 (Fri, 10 Sep 2010) Log Message: ----------- Add stats output for RWStore allocations Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/AllocBlock.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -27,6 +27,7 @@ import java.util.ArrayList; import com.bigdata.io.writecache.WriteCacheService; +import com.bigdata.rwstore.RWStore.AllocationStats; /** * Bit maps for an allocator. The allocator is a bit map managed as int[]s. @@ -189,10 +190,17 @@ return allocBits; } - public String getStats() { + public String getStats(AllocationStats stats) { final int total = m_ints * 32; final int allocBits = getAllocBits(); + if (stats != null) { + stats.m_reservedSlots += total; + stats.m_filledSlots += allocBits; + + return ""; + } + return " - start addr : " + RWStore.convertAddr(m_addr) + " [" + allocBits + "::" + total + "]"; } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/Allocator.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -28,7 +28,9 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.rwstore.RWStore.AllocationStats; + public interface Allocator extends Comparable { public int getBlockSize(); public void setIndex(int index); @@ -50,7 +52,7 @@ public void addAddresses(ArrayList addrs); public int getRawStartAddr(); public int getIndex(); - public void appendShortStats(StringBuffer str); + public void appendShortStats(StringBuilder str, AllocationStats[] stats); public boolean canImmediatelyFree(int addr, int size, IAllocationContext context); } \ No newline at end of file Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/BlobAllocator.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -7,6 +7,7 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.rwstore.RWStore.AllocationStats; import com.bigdata.util.ChecksumUtility; /** @@ -297,7 +298,7 @@ return m_hdrs[hdrIndex]; } - public void appendShortStats(StringBuffer str) { + public void appendShortStats(StringBuilder str, AllocationStats[] stats) { str.append("Index: " + m_index + ", address: " + getStartAddr() + ", BLOB\n"); } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -30,6 +30,7 @@ import org.apache.log4j.Logger; +import com.bigdata.rwstore.RWStore.AllocationStats; import com.bigdata.util.ChecksumUtility; /** @@ -334,7 +335,7 @@ if (block.m_addr == 0) { break; } - sb.append(block.getStats() + "\r\n"); + sb.append(block.getStats(null) + "\r\n"); counter.addAndGet(block.getAllocBits() * m_size); } @@ -489,14 +490,26 @@ return m_index; } - public void appendShortStats(StringBuffer str) { - str.append("Index: " + m_index + ", " + m_size); + public void appendShortStats(StringBuilder str, AllocationStats[] stats) { + + int si = -1; + + if (stats == null) { + str.append("Index: " + m_index + ", " + m_size); + } else { + for (int i = 0; i < stats.length; i++) { + if (m_size == stats[i].m_blockSize) { + si = i; + break; + } + } + } Iterator<AllocBlock> blocks = m_allocBlocks.iterator(); while (blocks.hasNext()) { AllocBlock block = blocks.next(); if (block.m_addr != 0) { - str.append(block.getStats()); + str.append(block.getStats(si == -1 ? null : stats[si])); } else { break; } Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -634,7 +634,7 @@ // clearOutstandingDeferrels(deferredFreeListAddr, deferredFreeListEntries); if (log.isTraceEnabled()) { - final StringBuffer str = new StringBuffer(); + final StringBuilder str = new StringBuilder(); this.showAllocators(str); log.trace(str); } @@ -778,7 +778,7 @@ } if (false) { - StringBuffer tmp = new StringBuffer(); + StringBuilder tmp = new StringBuilder(); showAllocators(tmp); System.out.println("Allocators: " + tmp.toString()); @@ -2076,16 +2076,50 @@ } + public static class AllocationStats { + public AllocationStats(int i) { + m_blockSize = i; + } + long m_blockSize; + long m_reservedSlots; + long m_filledSlots; + } /** * Utility debug outputing the allocator array, showing index, start * address and alloc type/size + * + * Collected statistics are against each Allocation Block size: + * total number of slots | store size + * number of filled slots | store used */ - public void showAllocators(StringBuffer str) { + public void showAllocators(StringBuilder str) { + AllocationStats[] stats = new AllocationStats[m_allocSizes.length]; + for (int i = 0; i < stats.length; i++) { + stats[i] = new AllocationStats(m_allocSizes[i]*64); + } Iterator allocs = m_allocs.iterator(); while (allocs.hasNext()) { Allocator alloc = (Allocator) allocs.next(); - alloc.appendShortStats(str); + alloc.appendShortStats(str, stats); } + + // Append Summary + str.append("\n-------------------------\n"); + str.append("RWStore Allocation Summary\n"); + str.append("-------------------------\n"); + long treserved = 0; + long tfilled = 0; + for (int i = 0; i < stats.length; i++) { + str.append("Allocation: " + stats[i].m_blockSize); + long reserved = stats[i].m_reservedSlots * stats[i].m_blockSize; + treserved += reserved; + str.append(", reserved: " + reserved); + long filled = stats[i].m_filledSlots * stats[i].m_blockSize; + tfilled += filled; + str.append(", filled: " + filled); + str.append("\n"); + } + str.append("Total - file: " + convertAddr(m_fileSize) + ", reserved: " + treserved + ", filled: " + tfilled + "\n"); } public ArrayList getStorageBlockAddresses() { Modified: branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-09-09 17:17:21 UTC (rev 3526) +++ branches/JOURNAL_HA_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java 2010-09-10 14:26:34 UTC (rev 3527) @@ -75,11 +75,13 @@ import com.bigdata.LRUNexus; import com.bigdata.btree.IndexMetadata; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.IJournal; import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; +import com.bigdata.journal.RWStrategy; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSailGraphQuery; @@ -89,6 +91,7 @@ import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.relation.AbstractResource; import com.bigdata.relation.RelationSchema; +import com.bigdata.rwstore.RWStore; import com.bigdata.service.AbstractDistributedFederation; import com.bigdata.service.AbstractFederation; import com.bigdata.service.IBigdataFederation; @@ -363,6 +366,17 @@ } // sb.append(tripleStore.predicateUsage()); + + if (tripleStore.getIndexManager() instanceof Journal) { + Journal journal = (Journal) tripleStore.getIndexManager(); + IBufferStrategy strategy = journal.getBufferStrategy(); + if (strategy instanceof RWStrategy) { + RWStore store = ((RWStrategy) strategy).getRWStore(); + + store.showAllocators(sb); + + } + } } catch (Throwable t) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-09 17:17:30
|
Revision: 3526 http://bigdata.svn.sourceforge.net/bigdata/?rev=3526&view=rev Author: thompsonbry Date: 2010-09-09 17:17:21 +0000 (Thu, 09 Sep 2010) Log Message: ----------- Working through support for moving bindingSet chunks around in scale-out and life cycle management of buffers in scale-out. I've raised the read/write timestamp into operator annotations. This might turn into a single "timestamp" operator and a BOp#isMutationOp() method to mark operators which write data rather than reading data. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BSBundle.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/ResourceService.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/AllocationContextKey.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/QueryContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ServiceContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ShardContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -147,13 +147,6 @@ */ public interface Annotations { -// /** -// * A cross reference to the query identifier. This is required on -// * operators which associate distributed state with a query. [We can -// * probably get this from the evaluation context.] -// */ -// String QUERY_REF = "queryRef"; - /** * The unique identifier within a query for a specific {@link BOp}. The * {@link #QUERY_ID} and the {@link #BOP_ID} together provide a unique @@ -161,11 +154,19 @@ * query. */ String BOP_ID = "bopId"; - + /** * The timeout for the operator evaluation (milliseconds). * * @see #DEFAULT_TIMEOUT + * + * @todo Probably support both deadlines and timeouts. A deadline + * expresses when the query must be done while a timeout expresses + * how long it may run. A deadline may be imposed as soon as the + * query plan is formulated and could even be communicated from a + * remote client (e.g., as an httpd header). A timeout will always + * be interpreted with respect to the time when the query began to + * execute. */ String TIMEOUT = "timeout"; @@ -175,12 +176,30 @@ long DEFAULT_TIMEOUT = Long.MAX_VALUE; /** + * The timestamp (or transaction identifier) associated with a read from + * the database. + * + * @todo Combine the read and write timestamps as a single + * <code>TX</code> value and require this on any operator which + * reads or writes on the database. + */ + String READ_TIMESTAMP = BOp.class.getName() + ".readTimestamp"; + + /** + * The timestamp (or transaction identifier) associated with a write on + * the database. + */ + String WRITE_TIMESTAMP = BOp.class.getName() + ".writeTimestamp"; + + /** * For hash partitioned operators, this is the set of the member nodes * for the operator. * <p> * This annotation is required for such operators since the set of known * nodes of a given type (such as all data services) can otherwise * change at runtime. + * + * @todo Move onto an interface parallel to {@link IShardwisePipelineOp} */ String MEMBER_SERVICES = "memberServices"; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -30,6 +30,7 @@ import org.apache.log4j.Logger; import com.bigdata.bop.engine.BOpStats; +import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.RunningQuery; @@ -227,6 +228,10 @@ * source if the source will be ignored). * @throws IllegalArgumentException * if the <i>sink</i> is <code>null</code> + * + * @todo modify to accept {@link IChunkMessage} or an interface available + * from getChunk() on {@link IChunkMessage} which provides us with + * flexible mechanisms for accessing the chunk data. */ // * @throws IllegalArgumentException // * if the <i>indexManager</i> is <code>null</code> Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BSBundle.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BSBundle.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BSBundle.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -50,8 +50,6 @@ /** * {@inheritDoc} - * - * @todo verify that this is a decent hash function. */ public int hashCode() { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -1,44 +1,87 @@ package com.bigdata.bop.engine; +import java.io.Serializable; + import com.bigdata.bop.BOp; import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.fed.FederatedRunningQuery; import com.bigdata.relation.accesspath.IAsynchronousIterator; /** - * A chunk of intermediate results which are ready to be consumed by some - * {@link BOp} in a specific query. + * An non-{@link Serializable} chunk of intermediate results which are ready to + * be consumed by some {@link BOp} in a specific query (this is only used in + * query evaluation for the standalone database). */ -public class BindingSetChunk { +public class BindingSetChunk implements IChunkMessage { + /** The query controller. */ + private final IQueryClient clientProxy; + /** * The query identifier. */ - final long queryId; + private final long queryId; /** * The target {@link BOp}. */ - final int bopId; + private final int bopId; /** * The index partition which is being targeted for that {@link BOp}. */ - final int partitionId; + private final int partitionId; /** * The binding sets to be consumed by that {@link BOp}. */ - final IAsynchronousIterator<IBindingSet[]> source; + private IAsynchronousIterator<IBindingSet[]> source; - public BindingSetChunk(final long queryId, final int bopId, - final int partitionId, + public IQueryClient getQueryController() { + return clientProxy; + } + + public long getQueryId() { + return queryId; + } + + public int getBOpId() { + return bopId; + } + + public int getPartitionId() { + return partitionId; + } + + public boolean isMaterialized() { + return true; + } + + /** + * + * @todo constructor to accept the BlockingBuffer instead as part of + * {@link IChunkMessage} harmonization (or an "IChunk" API). + */ + public BindingSetChunk(final IQueryClient clientProxy, final long queryId, + final int bopId, final int partitionId, final IAsynchronousIterator<IBindingSet[]> source) { + + if (clientProxy == null) + throw new IllegalArgumentException(); + if (source == null) throw new IllegalArgumentException(); + + this.clientProxy = clientProxy; + this.queryId = queryId; + this.bopId = bopId; + this.partitionId = partitionId; + this.source = source; + } public String toString() { @@ -47,5 +90,13 @@ + ",partitionId=" + partitionId + "}"; } + + public void materialize(FederatedRunningQuery runningQuery) { + // NOP + } + public IAsynchronousIterator<IBindingSet[]> iterator() { + return source; + } + } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -0,0 +1,93 @@ +package com.bigdata.bop.engine; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.fed.FederatedRunningQuery; +import com.bigdata.relation.accesspath.BlockingBuffer; +import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.service.ResourceService; + +/** + * A message describing a chunk of intermediate results which are available for + * processing. There are several implementations of this interface supporting + * same-JVM messages, thick RMI messages, and RMI messages where the payload is + * materialized using NIO transfers from the {@link ResourceService}. + */ +public interface IChunkMessage { + + /** The proxy for the query controller. */ + IQueryClient getQueryController(); + + /** The query identifier. */ + long getQueryId(); + + /** The identifier for the target {@link BOp}. */ + int getBOpId(); + + /** The identifier for the target index partition. */ + int getPartitionId(); + + /* + * @todo Report the #of bytes available with this message. However, first + * figure out if that if the #of bytes in this {@link OutputChunk} or across + * all {@link OutputChunk}s available for the target service and sink. + */ + // @todo move to concrete subclass or allow ZERO if data are in memory (no RMI). +// /** The #of bytes of data which are available for that operator. */ +// int getBytesAvailable(); + + /** + * Return <code>true</code> if the chunk is materialized on the receiver. + */ + boolean isMaterialized(); + + /** + * Materialize the chunk on the receiver. + * + * @param runningQuery + * The running query. + */ + void materialize(FederatedRunningQuery runningQuery); + + /** + * Visit the binding sets in the chunk. + * + * @todo we do not need to use {@link IAsynchronousIterator} any more. This + * could be much more flexible and should be harmonized to support + * high volume operators, GPU operators, etc. probably the right thing + * to do is introduce another interface here with a getChunk():IChunk + * where IChunk let's you access the chunks data in different ways + * (and chunks can be both {@link IBindingSet}[]s and element[]s so we + * might need to raise that into the interfaces and/or generics as + * well). + * + * @todo It is likely that we can convert to the use of + * {@link BlockingQueue} instead of {@link BlockingBuffer} in the + * operators and then handle the logic for combining chunks inside of + * the {@link QueryEngine}. E.g., by scanning this list for chunks for + * the same bopId and combining them logically into a single chunk. + * <p> + * For scale-out, chunk combination will naturally occur when the node + * on which the operator will run requests the {@link ByteBuffer}s + * from the source nodes. Those will get wrapped up logically into a + * source for processing. For selective operators, those chunks can be + * combined before we execute the operator. For unselective operators, + * we are going to run over all the data anyway. + */ + IAsynchronousIterator<IBindingSet[]> iterator(); + + // /** + // * The Internet address and port of a {@link ResourceService} from which + // * the receiver may demand the data. + // */ + // InetSocketAddress getServiceAddr(); + // + // /** + // * The set of resources on the sender which comprise the data. + // */ + // Iterator<UUID> getChunkIds(); + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -1,76 +1,24 @@ package com.bigdata.bop.engine; -import java.rmi.Remote; import java.rmi.RemoteException; -import com.bigdata.bop.BOp; - /** * Interface for a client executing queries (the query controller). */ public interface IQueryClient extends IQueryPeer { - /* - * @todo Could return a data structure which encapsulates the query results - * and could allow multiple results from a query, e.g., one per step in a - * program. - */ - // /** -// * Evaluate a query which materializes elements, such as an -// * {@link IPredicate}. +// * Return the query. // * // * @param queryId -// * The unique identifier for the query. -// * @param timestamp -// * The timestamp or transaction against which the query will run. -// * @param query -// * The query to evaluate. -// * @param source -// * The initial binding sets to get the query going (this is -// * typically an iterator visiting a single empty binding set). +// * The query identifier. +// * @return The query. // * -// * @return An iterator visiting the elements materialized by the query. -// * -// * @throws Exception +// * @throws RemoteException // */ -// public IChunkedIterator<?> eval(long queryId, long timestamp, BOp query) -// throws Exception; +// public BOp getQuery(long queryId) throws RemoteException; -// /** -// * Evaluate a query which visits {@link IBindingSet}s, such as a join. -// * -// * @param queryId -// * The unique identifier for the query. -// * @param timestamp -// * The timestamp or transaction against which the query will run. -// * @param query -// * The query to evaluate. -// * @param source -// * The initial binding sets to get the query going (this is -// * typically an iterator visiting a single empty binding set). -// * -// * @return An iterator visiting {@link IBindingSet}s which result from -// * evaluating the query. -// * -// * @throws Exception -// */ -// public IChunkedIterator<IBindingSet> eval(long queryId, long timestamp, -// BOp query, IAsynchronousIterator<IBindingSet[]> source) -// throws Exception; - /** - * Return the query. - * - * @param queryId - * The query identifier. - * @return The query. - * - * @throws RemoteException - */ - public BOp getQuery(long queryId) throws RemoteException; - - /** * Notify the client that execution has started for some query, operator, * node, and index partition. */ @@ -84,22 +32,4 @@ */ public void haltOp(HaltOpMessage msg) throws RemoteException; -// /** -// * Notify the query controller that a chunk of intermediate results is -// * available for the query. -// * -// * @param queryId -// * The query identifier. -// */ -// public void addChunk(long queryId) throws RemoteException; -// -// /** -// * Notify the query controller that a chunk of intermediate results was -// * taken for processing by the query. -// * -// * @param queryId -// * The query identifier. -// */ -// public void takeChunk(long queryId) throws RemoteException; - } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -0,0 +1,25 @@ +package com.bigdata.bop.engine; + +import com.bigdata.bop.BindingSetPipelineOp; + +/** + * A query declaration. + */ +public interface IQueryDecl { + + /** + * The proxy for the query controller. + */ + IQueryClient getQueryController(); + + /** + * The query identifier. + */ + long getQueryId(); + + /** + * The query. + */ + BindingSetPipelineOp getQuery(); + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -1,6 +1,5 @@ package com.bigdata.bop.engine; -import java.net.InetSocketAddress; import java.rmi.Remote; import java.rmi.RemoteException; import java.util.UUID; @@ -23,26 +22,30 @@ UUID getServiceUUID() throws RemoteException; /** + * Declare a query to a peer. This message is sent to the peer before any + * other message for that query and declares the query and the query + * controller with which the peer must communicate during query evaluation. + * + * @param queryDecl + * The query declaration. + * + * @throws UnsupportedOperationException + * unless running in scale-out. + */ + void declareQuery(IQueryDecl queryDecl); + + /** * Notify a service that a buffer having data for some {@link BOp} in some * running query is available. The receiver may request the data when they * are ready. If the query is cancelled, then the sender will drop the * buffer. * - * @param clientProxy - * proxy used to communicate with the client running the query. - * @param serviceAddr - * address which may be used to demand the data. - * @param queryId - * the unique query identifier. - * @param bopId - * the identifier for the target {@link BOp}. + * @param msg + * The message. * - * @return <code>true</code> unless the receiver knows that the query has - * already been cancelled. + * @throws UnsupportedOperationException + * unless running in scale-out. */ -// * @param nbytes -// * The #of bytes of data which are available for that operator. - void bufferReady(IQueryClient clientProxy, InetSocketAddress serviceAddr, - long queryId, int bopId/*, int nbytes*/) throws RemoteException; + void bufferReady(IChunkMessage msg) throws RemoteException; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -27,6 +27,7 @@ package com.bigdata.bop.engine; +import com.bigdata.bop.BOp; import com.bigdata.btree.ILocalBTreeView; import com.bigdata.journal.IIndexManager; import com.bigdata.service.IBigdataFederation; @@ -58,12 +59,18 @@ /** * The timestamp or transaction identifier against which the query is * reading. + * + * @todo may be moved into the individual operator. See + * {@link BOp.Annotations#READ_TIMESTAMP} */ long getReadTimestamp(); /** * The timestamp or transaction identifier against which the query is * writing. + * + * @todo may be moved into the individual operator. See + * {@link BOp.Annotations#WRITE_TIMESTAMP} */ long getWriteTimestamp(); Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -0,0 +1,82 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 9, 2010 + */ + +package com.bigdata.bop.engine; + +import java.io.Serializable; + +import com.bigdata.bop.BindingSetPipelineOp; + +/** + * Default implementation. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class QueryDecl implements IQueryDecl, Serializable { + + /** + * + */ + private static final long serialVersionUID = 1L; + + private final long queryId; + + private final IQueryClient clientProxy; + + private final BindingSetPipelineOp query; + + public QueryDecl(final IQueryClient clientProxy, final long queryId, + final BindingSetPipelineOp query) { + + if (clientProxy == null) + throw new IllegalArgumentException(); + + if (query == null) + throw new IllegalArgumentException(); + + this.clientProxy = clientProxy; + + this.queryId = queryId; + + this.query = query; + + } + + public BindingSetPipelineOp getQuery() { + return query; + } + + public IQueryClient getQueryController() { + return clientProxy; + } + + public long getQueryId() { + return queryId; + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -27,7 +27,6 @@ package com.bigdata.bop.engine; -import java.net.InetSocketAddress; import java.rmi.RemoteException; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -50,8 +49,6 @@ import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.ITx; -import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.spo.SPORelation; import com.bigdata.relation.IMutableRelation; @@ -328,6 +325,11 @@ .getLogger(QueryEngine.class); /** + * Error message used if a query is not running. + */ + protected static final transient String ERR_QUERY_NOT_RUNNING = "Query is not running:"; + + /** * Access to the indices. * <p> * Note: You MUST NOT use unisolated indices without obtaining the necessary @@ -477,6 +479,19 @@ private volatile boolean shutdown = false; /** + * Return if the query engine is running. + * + * @throws IllegalStateException + * if the query engine is shutting down. + */ + protected void assertRunning() { + + if (shutdown) + throw new IllegalStateException("Shutting down."); + + } + + /** * Runnable submits chunks available for evaluation against running queries. * * @todo Handle priority for selective queries based on the time remaining @@ -519,7 +534,7 @@ final long queryId = q.getQueryId(); if (q.isCancelled()) continue; - final BindingSetChunk chunk = q.chunksIn.poll(); + final IChunkMessage chunk = q.chunksIn.poll(); if (chunk == null) { // not expected, but can't do anything without a chunk. if (log.isDebugEnabled()) @@ -528,7 +543,7 @@ } if (log.isTraceEnabled()) log.trace("Accepted chunk: queryId=" + queryId - + ", bopId=" + chunk.bopId); + + ", bopId=" + chunk.getBOpId()); try { // create task. final FutureTask<?> ft = q.newChunkTask(chunk); @@ -558,19 +573,27 @@ * * @param chunk * A chunk of intermediate results. + * + * @throws IllegalArgumentException + * if the chunk is <code>null</code>. + * @throws IllegalStateException + * if the chunk is not materialized. */ - void add(final BindingSetChunk chunk) { + void acceptChunk(final IChunkMessage chunk) { if (chunk == null) throw new IllegalArgumentException(); - final RunningQuery q = runningQueries.get(chunk.queryId); + if (!chunk.isMaterialized()) + throw new IllegalStateException(); + + final RunningQuery q = runningQueries.get(chunk.getQueryId()); if(q == null) throw new IllegalStateException(); // add chunk to the query's input queue on this node. - q.add(chunk); + q.acceptChunk(chunk); // add query to the engine's task queue. priorityQueue.add(q); @@ -657,33 +680,35 @@ * IQueryPeer */ - public void bufferReady(IQueryClient clientProxy, - InetSocketAddress serviceAddr, long queryId, int bopId) { - // NOP + public void declareQuery(final IQueryDecl queryDecl) { + + throw new UnsupportedOperationException(); + } + public void bufferReady(IChunkMessage msg) { + + throw new UnsupportedOperationException(); + + } + /* * IQueryClient */ - /** - * @todo Define the behavior for these methods if the queryId is not found - * whether because the caller has the wrong value or because the query - * has terminated. - */ - public BOp getQuery(final long queryId) throws RemoteException { - - final RunningQuery q = runningQueries.get(queryId); - - if (q != null) { - - return q.getQuery(); - - } - - return null; - - } +// public BOp getQuery(final long queryId) throws RemoteException { +// +// final RunningQuery q = runningQueries.get(queryId); +// +// if (q != null) { +// +// return q.getQuery(); +// +// } +// +// return null; +// +// } public void startOp(final StartOpMessage msg) throws RemoteException { @@ -715,85 +740,110 @@ * * @param queryId * The unique identifier for the query. - * @param readTimestamp - * The timestamp or transaction against which the query will run. - * @param writeTimestamp - * The timestamp or transaction against which the query will - * write. * @param query * The query to evaluate. * * @return An iterator visiting {@link IBindingSet}s which result from * evaluating the query. * - * @throws IllegalArgumentException - * if the <i>readTimestamp</i> is {@link ITx#UNISOLATED} - * (queries may not read on the unisolated indices). - * @throws IllegalArgumentException - * if the <i>writeTimestamp</i> is neither - * {@link ITx#UNISOLATED} nor a read-write transaction - * identifier. * @throws IllegalStateException * if the {@link QueryEngine} has been {@link #shutdown()}. * @throws Exception - * - * @todo Consider elevating the read/write timestamps into the query plan as - * annotations. Closure would then rewrite the query plan for each - * pass, replacing the readTimestamp with the new read-behind - * timestamp. [This is related to how we will handle sequences of - * steps, parallel steps, and closure of steps.] */ - public RunningQuery eval(final long queryId, final long readTimestamp, - final long writeTimestamp, final BindingSetPipelineOp query) - throws Exception { + public RunningQuery eval(final long queryId, + final BindingSetPipelineOp query) throws Exception { if (query == null) throw new IllegalArgumentException(); - - if (readTimestamp == ITx.UNISOLATED) - throw new IllegalArgumentException(); - - if (TimestampUtility.isReadOnly(writeTimestamp)) - throw new IllegalArgumentException(); + final RunningQuery runningQuery = newRunningQuery(this, queryId, +// System.currentTimeMillis()/* begin */, + true/* controller */, this/* clientProxy */, query); + + assertRunning(); + final long timeout = query.getProperty(BOp.Annotations.TIMEOUT, BOp.Annotations.DEFAULT_TIMEOUT); - final RunningQuery runningQuery = newRunningQuery(this, queryId, - readTimestamp, writeTimestamp, - System.currentTimeMillis()/* begin */, timeout, - true/* controller */, this/* clientProxy */, query); + if (timeout < 0) + throw new IllegalArgumentException(BOp.Annotations.TIMEOUT); - if (shutdown) { + if (timeout != Long.MAX_VALUE) { - throw new IllegalStateException("Shutting down."); + // Compute the deadline (may overflow if timeout is very large). + final long deadline = System.currentTimeMillis() + timeout; + if (deadline > 0) { + /* + * Impose a deadline on the query. + */ + runningQuery.setDeadline(deadline); + + } + } - runningQueries.put(queryId, runningQuery); + putRunningQuery(queryId, runningQuery); return runningQuery; } /** + * Return the {@link RunningQuery} associated with that query identifier. + * + * @param queryId + * The query identifier. + * + * @return The {@link RunningQuery} -or- <code>null</code> if there is no + * query associated with that query identifier. + */ + protected RunningQuery getRunningQuery(final long queryId) { + + return runningQueries.get(queryId); + + } + + /** + * Places the {@link RunningQuery} object into the internal map. + * + * @param queryId + * The query identifier. + * @param runningQuery + * The {@link RunningQuery}. + */ + protected void putRunningQuery(final long queryId, + final RunningQuery runningQuery) { + + if (runningQuery == null) + throw new IllegalArgumentException(); + + runningQueries.put(queryId, runningQuery); + + } + + /** * Factory for {@link RunningQuery}s. */ protected RunningQuery newRunningQuery(final QueryEngine queryEngine, - final long queryId, final long readTimestamp, - final long writeTimestamp, final long begin, final long timeout, - final boolean controller, final IQueryClient clientProxy, - final BindingSetPipelineOp query) { + final long queryId, final boolean controller, + final IQueryClient clientProxy, final BindingSetPipelineOp query) { - return new RunningQuery(this, queryId, readTimestamp, writeTimestamp, - System.currentTimeMillis()/* begin */, timeout, - true/* controller */, this/* clientProxy */, query, - newQueryBuffer(query)); + return new RunningQuery(this, queryId, true/* controller */, + this/* clientProxy */, query, newQueryBuffer(query)); } /** * Return a buffer onto which the solutions will be written. + * + * @todo This method is probably in the wrong place. We should use whatever + * is associated with the top-level {@link BOp} in the query and then + * rely on the NIO mechanisms to move the data around as necessary. + * + * @todo Could return a data structure which encapsulates the query results + * and could allow multiple results from a query, e.g., one per step + * in a program. */ protected IBlockingBuffer<IBindingSet[]> newQueryBuffer( final BindingSetPipelineOp query) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-09 13:50:53 UTC (rev 3525) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-09 17:17:21 UTC (rev 3526) @@ -27,7 +27,6 @@ */ package com.bigdata.bop.engine; -import java.nio.ByteBuffer; import java.rmi.RemoteException; import java.util.LinkedHashMap; import java.util.LinkedHashSet; @@ -43,7 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; import org.apache.log4j.Logger; @@ -54,9 +52,9 @@ import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.NoSuchBOpException; -import com.bigdata.bop.ap.Predicate; import com.bigdata.journal.IIndexManager; -import com.bigdata.relation.accesspath.BlockingBuffer; +import com.bigdata.journal.ITx; +import com.bigdata.journal.TimestampUtility; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.service.IBigdataFederation; @@ -103,10 +101,15 @@ */ final private long writeTimestamp; +// /** +// * The timestamp when the query was accepted by this node (ms). +// */ +// final private long begin; /** - * The timestamp when the query was accepted by this node (ms). + * The query deadline. The value is the system clock time in milliseconds + * when the query is due and {@link Long#MAX_VALUE} if there is no deadline. */ - final private long begin; + final private AtomicLong deadline = new AtomicLong(Long.MAX_VALUE); /** * How long the query is allowed to run (elapsed milliseconds) -or- @@ -129,15 +132,16 @@ */ final private IQueryClient clientProxy; - /** The query iff materialized on this node. */ - final private AtomicReference<BOp> queryRef; +// /** The query iff materialized on this node. */ +// final private AtomicReference<BOp> queryRef; + /** The query. */ + final private BOp query; /** * The buffer used for the overall output of the query pipeline. * - * @todo How does the pipeline get attached to this buffer? Via a special - * operator? Or do we just target the coordinating {@link QueryEngine} - * as the sink of the last operator so we can use NIO transfers? + * FIXME SCALEOUT: This should only exist on the query controller. Other + * nodes will send {@link IChunkMessage}s to the query controller. */ final private IBlockingBuffer<IBindingSet[]> queryBuffer; @@ -208,26 +212,49 @@ private final Set<Integer/*bopId*/> startedSet = new LinkedHashSet<Integer>(); /** - * The chunks available for immediate processing. + * The chunks available for immediate processing (they must have been + * materialized). * <p> * Note: This is package private so it will be visible to the * {@link QueryEngine}. + */ + final/* private */BlockingQueue<IChunkMessage> chunksIn = new LinkedBlockingDeque<IChunkMessage>(); + + /** + * Set the query deadline. The query will be cancelled when the deadline is + * passed. If the deadline is passed, the query is immediately cancelled. * - * @todo It is likely that we can convert to the use of - * {@link BlockingQueue} instead of {@link BlockingBuffer} in the - * operators and then handle the logic for combining chunks inside of - * the {@link QueryEngine}. E.g., by scanning this list for chunks for - * the same bopId and combining them logically into a single chunk. - * <p> - * For scale-out, chunk combination will naturally occur when the node - * on which the operator will run requests the {@link ByteBuffer}s - * from the source nodes. Those will get wrapped up logically into a - * source for processing. For selective operators, those chunks can be - * combined before we execute the operator. For unselective operators, - * we are going to run over all the data anyway. + * @param deadline + * The deadline. + * @throws IllegalArgumentException + * if the deadline is non-positive. + * @throws IllegalStateException + * if the deadline was already set. + * @throws UnsupportedOperationException + * unless node is the query controller. */ - final /*private*/ BlockingQueue<BindingSetChunk> chunksIn = new LinkedBlockingDeque<BindingSetChunk>(); + public void setDeadline(final long deadline) { + if(!controller) + throw new UnsupportedOperationException(); + + if (deadline <= 0) + throw new IllegalArgumentException(); + + // set the deadline. + if (!this.deadline + .compareAndSet(Long.MAX_VALUE/* expect */, deadline/* update */)) { + // the deadline is already set. + throw new IllegalStateException(); + } + + if (deadline < System.currentTimeMillis()) { + // deadline has already expired. + cancel(true/* mayInterruptIfRunning */); + } + + } + /** * The class executing the query on this node. */ @@ -259,41 +286,45 @@ } - /** - * Return the operator tree for this query. If query processing is - * distributed and the query has not been materialized on this node, then it - * is materialized now. - * - * @return The query. - */ public BOp getQuery() { + return query; + } + +// /** +// * Return the operator tree for this query. If query processing is +// * distributed and the query has not been materialized on this node, then it +// * is materialized now. +// * +// * @return The query. +// */ +// public BOp getQuery() { +// +// if (queryRef.get() == null) { +// +// synchronized (queryRef) { +// +// if (queryRef.get() == null) { +// +// try { +// +// queryRef.set(clientProxy.getQuery(queryId)); +// +// } catch (RemoteException e) { +// +// throw new RuntimeException(e); +// +// } +// +// } +// +// } +// +// } +// +// return queryRef.get(); +// +// } - if (queryRef.get() == null) { - - synchronized (queryRef) { - - if (queryRef.get() == null) { - - try { - - queryRef.set(clientProxy.getQuery(queryId)); - - } catch (RemoteException e) { - - throw new RuntimeException(e); - - } - - } - - } - - } - - return queryRef.get(); - - } - /** * Return <code>true</code> iff this is the query controller. */ @@ -305,11 +336,8 @@ /** * Return the current statistics for the query and <code>null</code> unless - * this is the query controller. - * - * @todo When the query is done, there will be one entry in this map for - * each operator in the pipeline. Non-pipeline operators such as - * {@link Predicate}s do not currently make it into this map. + * this is the query controller. For {@link BindingSetPipelineOp} operator + * which is evaluated there will be a single entry in this map. */ public Map<Integer/*bopId*/,BOpStats> getStats() { @@ -323,35 +351,100 @@ * @param begin * @param clientProxy * @param query - * The query (optional). + * + * @throws IllegalArgumentException + * if any argument is <code>null</code>. + * @throws IllegalArgumentException + * if the <i>readTimestamp</i> is {@link ITx#UNISOLATED} + * (queries may not read on the unisolated indices). + * @throws IllegalArgumentException + * if the <i>writeTimestamp</i> is neither + * {@link ITx#UNISOLATED} nor a read-write transaction + * identifier. + * + * @todo is queryBuffer required? should it be allocated from the top bop? */ public RunningQuery(final QueryEngine queryEngine, final long queryId, - final long readTimestamp, final long writeTimestamp, - final long begin, final long timeout, final boolean controller, +// final long begin, + final boolean controller, final IQueryClient clientProxy, final BOp query, final IBlockingBuffer<IBindingSet[]> queryBuffer) { + + if (queryEngine == null) + throw new IllegalArgumentException(); + + if (clientProxy == null) + throw new IllegalArgumentException(); + + if (query == null) + throw new IllegalArgumentException(); + this.queryEngine = queryEngine; this.queryId = queryId; - this.readTimestamp = readTimestamp; - this.writeTimestamp = writeTimestamp; - this.begin = begin; - this.timeout = timeout; +// this.begin = begin; this.controller = controller; this.clientProxy = clientProxy; - this.queryRef = new AtomicReference<BOp>(query); - if (controller && query == null) - throw new IllegalArgumentException(); + this.query = query; this.queryBuffer = queryBuffer; this.bopIndex = BOpUtility.getIndex(query); this.statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() : null; + /* + * @todo when making a per-bop annotation, queries must obtain a tx for + * each timestamp up front on the controller and rewrite the bop to hold + * the tx until it is done. + * + * @todo This is related to how we handle sequences of steps, parallel + * steps, closure of steps, and join graphs. Those operations need to be + * evaluated on the controller. We will have to model the relationship + * between the subquery and the query in order to terminate the subquery + * when the query halts and to terminate the query if the subquery + * fails. + * + * @todo Closure operations must rewrite the query to update the + * annotations. Each pass in a closure needs to be its own "subquery" + * and will need to have a distinct queryId. + */ + final Long readTimestamp = query + .getProperty(BOp.Annotations.READ_TIMESTAMP); + + // @todo remove default when elevating to per-writable bop annotation. + final long writeTimestamp = query.getProperty( + BOp.Annotations.WRITE_TIMESTAMP, ITx.UNISOLATED); + + if (readTimestamp == null) + throw new IllegalArgumentException(); + + if (readTimestamp.longValue() == ITx.UNISOLATED) + throw new IllegalArgumentException(); + + if (TimestampUtility.isReadOnly(writeTimestamp)) + throw new IllegalArgumentException(); + + this.readTimestamp = readTimestamp; + + this.writeTimestamp = writeTimestamp; + + this.timeout = query.getProperty(BOp.Annotations.TIMEOUT, + BOp.Annotations.DEFAULT_TIMEOUT); + + if (timeout < 0) + throw new IllegalArgumentException(); + } /** - * Create a {@link BindingSetChunk} from a sink and add it to the queue. + * Take a chunk generated by some pass over an operator and make it + * available to the target operator. How this is done depends on whether the + * query is running against a standalone database or the scale-out database. * <p> - * Note: If we are running standalone, then we leave the data on the heap - * rather than formatting it onto a {@link ByteBuffer}. + * Note: The return value is used as part of the termination criteria for + * the query. + * <p> + * The default implementation supports a standalone database. The generated + * chunk is left on the Java heap and handed off synchronously using + * {@link QueryEngine#add(IChunkMessage)}. That method will queue the chunk + * for asynchronous processing. * * @param sinkId * The identifier of the target operator. @@ -363,39 +456,42 @@ * one chunk per index partition over which the intermediate results * were mapped. */ - protected <E> int add(final int sinkId, + protected <E> int handleOutputChunk(final int sinkId, final IBlockingBuffer<IBindingSet[]> sink) { /* * Note: The partitionId will always be -1 in scale-up. */ - final BindingSetChunk chunk = new BindingSetChunk(queryId, sinkId, - -1/* partitionId */, sink.iterator()); + final BindingSetChunk chunk = new BindingSetChunk(clientProxy, queryId, + sinkId, -1/* partitionId */, sink.iterator()); - queryEngine.add(chunk); + queryEngine.acceptChunk(chunk); return 1; - } + } /** * Make a chunk of binding sets available for consumption by the query. * <p> * Note: this is invoked by {@link QueryEngine#add(BindingSetChunk)}. * - * @param chunk + * @param msg * The chunk. */ - void add(final BindingSetChunk chunk) { + protected void acceptChunk(final IChunkMessage msg) { - if (chunk == null) + if (msg == null) throw new IllegalArgumentException(); + if (!msg.isMaterialized()) + throw new IllegalStateException(); + // verify still running. future.halted(); // add chunk to be consumed. - chunksIn.add(chunk); + chunksIn.add(msg); if (log.isDebugEnabled()) log.debug("queryId=" + queryId + ", chunksIn.size()=" @@ -409,30 +505,31 @@ * * @todo this should reject multiple invocations for a given query instance. */ - public void startQuery(final BindingSetChunk chunk) { + public void startQuery(final IChunkMessage chunk) { if (!controller) throw new UnsupportedOperationException(); if (chunk == null) throw new IllegalArgumentException(); - if (chunk.queryId != queryId) // @todo equals() if queryId is UUID. + if (chunk.getQueryId() != queryId) // @todo equals() if queryId is UUID. throw new IllegalArgumentException(); + final int bopId = chunk.getBOpId(); runStateLock.lock(); try { lifeCycleSetUpQuery(); availableChunkCount++; { - AtomicLong n = availableChunkCountMap.get(chunk.bopId); + AtomicLong n = availableChunkCountMap.get(bopId); if (n == null) - availableChunkCountMap.put(chunk.bopId, n = new AtomicLong()); + availableChunkCountMap.put(bopId, n = new AtomicLong()); n.incrementAndGet(); } if (log.isInfoEnabled()) log.info("queryId=" + queryId + ",runningTaskCount=" + runningTaskCount + ",availableChunks=" + availableChunkCount); - System.err.println("startQ : bopId=" + chunk.bopId + ",running=" + System.err.println("startQ : bopId=" + bopId + ",running=" + runningTaskCount + ",available=" + availableChunkCount); - queryEngine.add(chunk); + queryEngine.acceptChunk(chunk); } finally { runStateLock.unlock(); } @@ -484,13 +581,9 @@ System.err.println("startOp: bopId=" + msg.bopId + ",running=" + runningTaskCount + ",available=" + availableChunkCount + ",fanIn=" + msg.nchunks); - final long elapsed = System.currentTimeMillis() - begin; - if (log.isTraceEnabled()) - log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId - + ",serviceId=" + msg.serviceId + " : runningTaskCount=" - + runningTaskCount + ", availableChunkCount=" - + availableChunkCount + ", elapsed=" + elapsed); - if (elapsed > timeout) { + if (deadline.get() < System.currentTimeMillis()) { + if (log.isTraceEnabled()) + log.trace("queryId: deadline expired."); future.halt(new TimeoutException()); cancel(true/* mayInterruptIfRunning */); } @@ -563,13 +656,13 @@ + runningTaskCount; assert availableChunkCount >= 0 : "availableChunkCount=" + availableChunkCount; - final long elapsed = System.currentTimeMillis() - begin; +// final long elapsed = System.currentTimeMillis() - begin; if (log.isTraceEnabled()) log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId + ",serviceId=" + queryEngine.getServiceUUID() + ", nchunks=" + fanOut + " : runningTaskCount=" + runningTaskCount + ", availableChunkCount=" - + availableChunkCount + ", elapsed=" + elapsed); + + availableChunkCount);// + ", elapsed=" + elapsed); // test termination criteria if (msg.cause != null) { // operator failed on this chunk. @@ -582,8 +675,9 @@ // success (all done). future.halt(getStats()); cancel(true/* mayInterruptIfRunning */); - } else if (elapsed > timeout) { - // timeout + } else if (deadline.get() < System.currentTimeMillis()) { + if (log.isTraceEnabled()) + log.trace("queryId: deadline expired."); future.halt(new TimeoutException()); cancel(true/* mayInterruptIfRunning */); } @@ -614,8 +708,8 @@ if (!runStateLock.isHeldByCurrentThread()) throw new IllegalMonitorStateException(); - return PipelineUtility.isDone(bopId, queryRef.get(), bopIndex, - runningCountMap, availableChunkCountMap); + return PipelineUtility.isDone(bopId, query, bopIndex, runningCountMap, + availableChunkCountMap); } @@ -681,14 +775,16 @@ * A chunk to be consumed. */ @SuppressWarnings("unchecked") - protected FutureTask<Void> newChunkTask(final BindingSetChunk chunk) { + protected FutureTask<Void> newChunkTask(final IChunkMessage chunk) { /* * Look up the BOp in the index, create the BOpContext for that BOp, and * return the value returned by BOp.eval(context). */ - final BOp bop = bopIndex.get(chunk.bopId); + final int bopId = chunk.getBOpId(); + final int partitionId = chunk.getPartitionId(); + final BOp bop = bopIndex.get(bopId); if (bop == null) { - throw new NoSuchBOpException(chunk.bopId); + throw new NoSuchBOpException(bopId); } if (!(bop instanceof BindingSetPipelineOp)) { /* @@ -701,7 +797,7 @@ // self final BindingSetPipelineOp op = ((BindingSetPipelineOp) bop); // parent (null if this is the root of the operator tree). - final BOp p = BOpUtility.getParent(queryRef.get(), op); + final BOp p = BOpUtility.getParent(query, op); // sink (null unless parent is defined) final Integer sinkId = p == null ? null : (Integer) p .getProperty(BindingSetPipelineOp.Annotations.BOP_ID); @@ -716,8 +812,8 @@ final IBlockingBuffer<IBindingSet[]> altSink = altSinkId == null ? null : op.newBuffer(); // context - final BOpContext context = new BOpContext(this, chunk.partitionId, op - .newStats(), chunk.source, sink, altSink); + final BOpContext context = new BOpContext(this, partitionId, op + .newStats(), chunk.iterator(), sink, altSink); // FutureTask for operator execution (not running yet). final FutureTask<Void> f = op.eval(context); // Hook the FutureTask. @@ -729,29 +825,29 @@ int altSinkChunksOut = 0; try { clientProxy.startOp(new StartOpMessage(queryId, - chunk.bopId, chunk.partitionId, serviceId, fanIn)); + bopId, partitionId, serviceId, fanIn)); if (log.isDebugEnabled()) log.debug("Running chunk: queryId=" + queryId - + ", bopId=" + chunk.bopId + ", bop=" + bop); + + ", bopId=" + bopId + ", bop=" + bop); f.run(); // run f.get(); // verify success if (sink != queryBuffer && !sink.isEmpty()) { // handle output chunk. - sinkChunksOut += add(sinkId, sink); + sinkChunksOut += handleOutputChunk(sinkId, sink); } if (altSink != queryBuffer && altSink != null && !altSink.isEmpty()) { // handle alt sink output chunk. - altSinkChunksOut += add(altSinkId, altSink); + altSinkChunksOut += handleOutputChunk(altSinkId, altSink); } - clientProxy.haltOp(new HaltOpMessage(queryId, chunk.bopId, - chunk.partitionId, serviceId, null/* cause */, + clientProxy.haltOp(new HaltOpMessage(queryId, bopId, + partitionId, serviceId, null/* cause */, sinkId, sinkChunksOut, altSinkId, altSinkChunksOut, context.getStats())); } catch (Throwable t) { try { clientProxy.haltOp(new HaltOpMessage(queryId, - chunk.bopId, chunk.partitionId, serviceId, + bopId, partitionId, serviceId, t/* cause */, sinkId, sinkChunksOut, altSinkId, altSinkChunksOut, context.getStats())); } catch (RemoteE... [truncated message content] |
From: <fko...@us...> - 2010-09-09 13:51:02
|
Revision: 3525 http://bigdata.svn.sourceforge.net/bigdata/?rev=3525&view=rev Author: fkoliver Date: 2010-09-09 13:50:53 +0000 (Thu, 09 Sep 2010) Log Message: ----------- Revert changes which brought sources from another project into this project. I apologize for making the change without proper consideration for license. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/pom.xml branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/standalone/ServerStarter.config branches/maven_scaleout/bigdata-core/thirdparty/maven.xml Added Paths: ----------- branches/maven_scaleout/bigdata-core/thirdparty/lib/cweb-junit-ext-1.1-b3-dev.jar Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/test/java/junit/ Property Changed: ---------------- branches/maven_scaleout/bigdata-core/ Property changes on: branches/maven_scaleout/bigdata-core ___________________________________________________________________ Deleted: svn:ignore - target Modified: branches/maven_scaleout/bigdata-core/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-core/pom.xml 2010-09-08 20:52:07 UTC (rev 3524) +++ branches/maven_scaleout/bigdata-core/pom.xml 2010-09-09 13:50:53 UTC (rev 3525) @@ -216,6 +216,12 @@ </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> + <artifactId>cweb-junit-ext</artifactId> + <version>1.1.0-b3-dev</version> + <scope>test</scope> + </dependency> + <dependency> + <groupId>${thirdParty.groupId}</groupId> <artifactId>cweb-commons</artifactId> <version>1.1.0-b2-dev</version> </dependency> Modified: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/standalone/ServerStarter.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/standalone/ServerStarter.config 2010-09-08 20:52:07 UTC (rev 3524) +++ branches/maven_scaleout/bigdata-core/src/test/deploy/testing/conf/standalone/ServerStarter.config 2010-09-09 13:50:53 UTC (rev 3525) @@ -59,6 +59,7 @@ libdir+"icu4j-3_6.jar"+File.pathSeparator+ // test suites only! libdir+"junit-3.8.1.jar"+File.pathSeparator+ + libdir+"cweb-junit-ext-1.1-b2-dev.jar"+File.pathSeparator+ // main bigdata JAR. //libdir+ "bigdata-core.jar" Copied: branches/maven_scaleout/bigdata-core/thirdparty/lib/cweb-junit-ext-1.1-b3-dev.jar (from rev 3520, branches/maven_scaleout/bigdata-core/thirdparty/lib/cweb-junit-ext-1.1-b3-dev.jar) =================================================================== (Binary files differ) Modified: branches/maven_scaleout/bigdata-core/thirdparty/maven.xml =================================================================== --- branches/maven_scaleout/bigdata-core/thirdparty/maven.xml 2010-09-08 20:52:07 UTC (rev 3524) +++ branches/maven_scaleout/bigdata-core/thirdparty/maven.xml 2010-09-09 13:50:53 UTC (rev 3525) @@ -69,6 +69,7 @@ <installJar groupId="com.bigdata.thirdparty" artifactId="ctc-utils" version="5-4-2005" jar="${thirdparty.dir}/ctc_utils-5-4-2005.jar" /> <installJar groupId="com.bigdata.thirdparty" artifactId="cweb-commons" version="1.1.0-b2-dev" jar="${thirdparty.dir}/cweb-commons-1.1-b2-dev.jar" /> <installJar groupId="com.bigdata.thirdparty" artifactId="cweb-extser" version="0.1.0-b2-dev" jar="${thirdparty.dir}/cweb-extser-0.1-b2-dev.jar" /> + <installJar groupId="com.bigdata.thirdparty" artifactId="cweb-junit-ext" version="1.1.0-b3-dev" jar="${thirdparty.dir}/cweb-junit-ext-1.1-b3-dev.jar" /> <installJar groupId="com.bigdata.thirdparty" artifactId="dsi-utils" version="1.0.6-020610" jar="${thirdparty.dir}/dsi-utils-1.0.6-020610.jar" /> <installJar groupId="com.bigdata.thirdparty" artifactId="unimi-fastutil" version="5.1.5" jar="${thirdparty.dir}/fastutil-5.1.5.jar" /> <installJar groupId="com.bigdata.thirdparty" artifactId="high-scale-lib" version="1.1.2" jar="${thirdparty.dir}/high-scale-lib-v1.1.2.jar" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |