This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2010-09-16 20:29:58
|
Revision: 3574 http://bigdata.svn.sourceforge.net/bigdata/?rev=3574&view=rev Author: thompsonbry Date: 2010-09-16 20:29:51 +0000 (Thu, 16 Sep 2010) Log Message: ----------- Added a unit test for the query engine in which an IConstraint is applied. Identified a problem with multiple concurrent evaluation of SliceOp. It needs to be modified to use the same state for each invocation and to use CATs (or AtomicLong or chunk-wise locking, or bop invocation wise locking) to prevent concurrency failures (such as letting through too many solutions). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-16 19:43:08 UTC (rev 3573) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-16 20:29:51 UTC (rev 3574) @@ -638,6 +638,14 @@ } + if (log.isTraceEnabled()) { + + log.debug("Accepted by " + + constraint.getClass().getSimpleName() + " : " + + bindingSet); + + } + } return true; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2010-09-16 19:43:08 UTC (rev 3573) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2010-09-16 20:29:51 UTC (rev 3574) @@ -70,15 +70,21 @@ } - public boolean accept(final IBindingSet s) { + public boolean accept(final IBindingSet bset) { + final IVariable<?> var = (IVariable<?>) get(0)/* var */; + // get binding for the variable. - final IConstant<?> tmp = s.get((IVariable<?>) get(0)/* var */); + final IConstant<?> asBound = bset.get(var); - if (tmp == null) + if (asBound == null) return true; // not yet bound. - return tmp.equals(get(1)); + final IConstant<?> cnst = (IConstant<?>) get(1); + + final boolean ret = asBound.equals(cnst); + + return ret; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-16 19:43:08 UTC (rev 3573) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-16 20:29:51 UTC (rev 3574) @@ -1730,10 +1730,14 @@ } if (log.isDebugEnabled()) - log.debug("Accepted element for " + naccepted - + " of " + bindingSets.length - + " possible bindingSet combinations: " - + e.toString() + ", joinOp=" + joinOp); + if (naccepted == 0) { + log.debug("Rejected element: " + e.toString()); + } else { + log.debug("Accepted element for " + naccepted + + " of " + bindingSets.length + + " possible bindingSet combinations: " + + e.toString()); + } } // if something is accepted in the chunk return true. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-16 19:43:08 UTC (rev 3573) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-16 20:29:51 UTC (rev 3574) @@ -50,6 +50,7 @@ import com.bigdata.bop.HashBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; +import com.bigdata.bop.IConstraint; import com.bigdata.bop.IVariable; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.bop.NV; @@ -59,6 +60,7 @@ import com.bigdata.bop.ap.R; import com.bigdata.bop.bset.ConditionalRoutingOp; import com.bigdata.bop.bset.StartOp; +import com.bigdata.bop.constraint.EQConstant; import com.bigdata.bop.fed.TestFederatedQueryEngine; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.bop.solutions.SliceOp; @@ -68,6 +70,7 @@ import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; import com.bigdata.striterator.ChunkedArrayIterator; +import com.bigdata.striterator.Dechunkerator; import com.bigdata.striterator.ICloseableIterator; import com.bigdata.util.concurrent.LatchedExecutor; import com.ibm.icu.impl.ByteBuffer; @@ -515,6 +518,173 @@ } /** + * A join with an {@link IConstraint}. + */ + public void test_query_join_withConstraint() throws Exception { + + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + + final int startId = 1; + final int joinId = 2; + final int predId = 3; + final int sliceId = 4; + + final StartOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + })); + + /* + * + * Note: Since the index on which this reads is formed as (column1 + + * column2) the probe key will be [null] if it does not bind the first + * column. Therefore, in order to have the 2nd column constraint we have + * to model it as an IElementFilter on the predicate. + */ + final Predicate<E> predOp = new Predicate<E>(new IVariableOrConstant[] { + x, y}, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }),// + new NV(Predicate.Annotations.PARTITION_ID, Integer + .valueOf(-1)),// + new NV(Predicate.Annotations.OPTIONAL, Boolean.FALSE),// + new NV(Predicate.Annotations.CONSTRAINT,null),// + new NV(Predicate.Annotations.EXPANDER, null),// + new NV(Predicate.Annotations.BOP_ID, predId),// + new NV(Predicate.Annotations.TIMESTAMP, + ITx.READ_COMMITTED),// + new NV(Predicate.Annotations.KEY_ORDER, + R.primaryKeyOrder),// + })); + + final PipelineJoin<E> joinOp = new PipelineJoin<E>(startOp/* left */, + predOp/* right */, + // join annotations + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, joinId),// + // impose constraint on the join. + new NV(PipelineJoin.Annotations.CONSTRAINTS, + new IConstraint[] { new EQConstant(y, + new Constant<String>("Paul")) }),// + })// + ); + + final BindingSetPipelineOp query = new SliceOp(new BOp[] { joinOp }, + // slice annotations + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, sliceId),// + })// + ); + + // the expected solutions (order is not reliable due to concurrency). + final IBindingSet[] expected = new IBindingSet[] {// +// new ArrayBindingSet(// +// new IVariable[] { x, y },// +// new IConstant[] { new Constant<String>("John"), +// new Constant<String>("Mary") }// +// ), // + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Leon"), + new Constant<String>("Paul") }// + ), // +// new ArrayBindingSet(// +// new IVariable[] { x, y },// +// new IConstant[] { new Constant<String>("Mary"), +// new Constant<String>("John") }// +// ), // + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Mary"), + new Constant<String>("Paul") }// + ), // +// new ArrayBindingSet(// +// new IVariable[] { x, y },// +// new IConstant[] { new Constant<String>("Paul"), +// new Constant<String>("Leon") }// +// ), // + }; +// new E("John", "Mary"),// [0] +// new E("Leon", "Paul"),// [1] +// new E("Mary", "Paul"),// [2] +// new E("Paul", "Leon"),// [3] + + final RunningQuery runningQuery; + { + final IBindingSet initialBindingSet = new HashBindingSet(); + +// initialBindingSet.set(y, new Constant<String>("Paul")); + + final UUID queryId = UUID.randomUUID(); + + runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId,// + -1, /* partitionId */ + newBindingSetIterator(initialBindingSet))); + } + + // verify solutions. + TestQueryEngine.assertSameSolutionsAnyOrder(expected, + new Dechunkerator<IBindingSet>(runningQuery.iterator())); + + // Wait until the query is done. + runningQuery.get(); + final Map<Integer, BOpStats> statsMap = runningQuery.getStats(); + { + // validate the stats map. + assertNotNull(statsMap); + assertEquals(3, statsMap.size()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); + } + + // validate the stats for the start operator. + { + final BOpStats stats = statsMap.get(startId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("start: "+stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(1L, stats.unitsIn.get()); + assertEquals(1L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + // validate the stats for the join operator. + { + final BOpStats stats = statsMap.get(joinId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("join : "+stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(1L, stats.unitsIn.get()); + assertEquals(2L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + // validate the stats for the slice operator. + { + final BOpStats stats = statsMap.get(sliceId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("slice: "+stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(2L, stats.unitsIn.get()); + assertEquals(2L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + } + + /** * @todo Test the ability run a query reading on an access path using a * element filter (other than DISTINCT). */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-16 19:43:08 UTC (rev 3573) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-16 20:29:51 UTC (rev 3574) @@ -44,6 +44,7 @@ import com.bigdata.bop.HashBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstant; +import com.bigdata.bop.IConstraint; import com.bigdata.bop.IVariable; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.bop.NV; @@ -52,6 +53,7 @@ import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.ap.R; import com.bigdata.bop.bset.StartOp; +import com.bigdata.bop.constraint.EQConstant; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.LocalChunkMessage; @@ -63,6 +65,7 @@ import com.bigdata.bop.solutions.SliceOp; import com.bigdata.bop.solutions.SortOp; import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.counters.CAT; import com.bigdata.journal.BufferMode; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; @@ -331,6 +334,21 @@ } /** + * Return an {@link IAsynchronousIterator} that will read a single, chunk + * containing all of the specified {@link IBindingSet}s. + * + * @param bindingSets + * the binding sets. + */ + protected ThickAsynchronousIterator<IBindingSet[]> newBindingSetIterator( + final IBindingSet[] bindingSets) { + + return new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { bindingSets }); + + } + + /** * Starts and stops the {@link QueryEngine}, but does not validate the * semantics of shutdown() versus shutdownNow() since we need to be * evaluating query mixes in order to verify the semantics of those @@ -404,6 +422,92 @@ } /** + * Unit test uses a {@link StartOp} to copy some binding sets through a + * {@link SliceOp} without involving any joins or access path reads. For + * this test, the binding sets never leave the query controller. + * + * @throws Exception + */ + public void test_query_startThenSlice() throws Exception { + + final int startId = 1; + final int sliceId = 4; + + final StartOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + })); + + final BindingSetPipelineOp query = new SliceOp(new BOp[] { startOp }, + // slice annotations + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, sliceId),// + })// + ); + + // the expected solutions (order is not reliable due to concurrency). + final IBindingSet[] expected = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { Var.var("value") },// + new IConstant[] { new Constant<String>("Paul") }// + ), // + new ArrayBindingSet(// + new IVariable[] { Var.var("value") },// + new IConstant[] { new Constant<String>("John") }// + ) }; + + final UUID queryId = UUID.randomUUID(); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId,// + -1, /* partitionId */ + newBindingSetIterator(expected))); + + // verify solutions. + TestQueryEngine.assertSameSolutionsAnyOrder(expected, + new Dechunkerator<IBindingSet>(runningQuery.iterator())); + + // Wait until the query is done. + runningQuery.get(); + final Map<Integer, BOpStats> statsMap = runningQuery.getStats(); + { + // validate the stats map. + assertNotNull(statsMap); + assertEquals(2, statsMap.size()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); + } + + // validate the stats for the start operator. + { + final BOpStats stats = statsMap.get(startId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("start: "+stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals((long) expected.length, stats.unitsIn.get()); + assertEquals((long) expected.length, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + // validate the stats for the slice operator. + { + final BOpStats stats = statsMap.get(sliceId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("slice: " + stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals((long) expected.length, stats.unitsIn.get()); + assertEquals((long) expected.length, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + } + + /** * Test the ability run a simple join. There are three operators. One feeds * an empty binding set[] into the join, another is the predicate for the * access path on which the join will read (it probes the index once for @@ -448,7 +552,7 @@ ); final BindingSetPipelineOp query = new SliceOp(new BOp[] { joinOp }, - // slice annotations + // slice annotations NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, sliceId),// })// @@ -533,16 +637,24 @@ /** * Test the ability run a simple join which is mapped across two index - * partitions. There are three operators. One feeds an empty binding set[] - * in which the 2nd column of the relation is bound into the join, another - * is the predicate for the access path on which the join will read (it will - * read everything since the primary key is on the first column then the - * second column and hence can not be used to select the index partition for - * this access path), and the third is the join itself. + * partitions. * - * @throws Exception + * FIXME This is failing because the {@link SliceOp} is not remembering its + * state across distinct invocations and is cancelling the query as soon as + * it exhausts its input. In order to have correct decision boundaries, + * slice needs to be invoked either once, concurrently if using {@link CAT} + * s, or in a series of presentations otherwise. + * <p> + * The easiest way to fix this is to have {@link SliceOp} specialize the + * {@link BOpStats}s and carry its state there. That will also make it safe + * for concurrent evaluation within the same query, and we will have to + * write a unit test for that. + * <p> + * I am not yet convinced that the problem with the test failure is double + * invocation of {@link SliceOp}. It could also be that we are not invoking + * it the 2nd time. */ - public void test_query_join1_2shards() throws Exception { + public void test_query_join_withConstraint_readsOn2shards() throws Exception { final Var<?> x = Var.var("x"); final Var<?> y = Var.var("y"); @@ -556,15 +668,22 @@ new NV(Predicate.Annotations.BOP_ID, startId),// })); + /* + * + * Note: Since the index on which this reads is formed as (column1 + + * column2) the probe key will be [null] if it does not bind the first + * column. Therefore, in order to have the 2nd column constraint we have + * to model it as an IElementFilter on the predicate. + */ final Predicate<E> predOp = new Predicate<E>(new IVariableOrConstant[] { - x, new Constant<String>("Paul")}, NV + x, y}, NV .asMap(new NV[] {// new NV(Predicate.Annotations.RELATION_NAME, new String[] { namespace }),// new NV(Predicate.Annotations.PARTITION_ID, Integer .valueOf(-1)),// new NV(Predicate.Annotations.OPTIONAL, Boolean.FALSE),// - new NV(Predicate.Annotations.CONSTRAINT, null),// + new NV(Predicate.Annotations.CONSTRAINT,null),// new NV(Predicate.Annotations.EXPANDER, null),// new NV(Predicate.Annotations.BOP_ID, predId),// new NV(Predicate.Annotations.TIMESTAMP, @@ -578,6 +697,10 @@ // join annotations NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, joinId),// + // impose constraint on the join. + new NV(PipelineJoin.Annotations.CONSTRAINTS, + new IConstraint[] { new EQConstant(y, + new Constant<String>("Paul")) }),// })// ); @@ -590,31 +713,16 @@ // the expected solutions (order is not reliable due to concurrency). final IBindingSet[] expected = new IBindingSet[] {// -// new ArrayBindingSet(// -// new IVariable[] { x, y },// -// new IConstant[] { new Constant<String>("John"), -// new Constant<String>("Mary") }// -// ), // new ArrayBindingSet(// new IVariable[] { x, y },// - new IConstant[] { new Constant<String>("John"), + new IConstant[] { new Constant<String>("Leon"), new Constant<String>("Paul") }// ), // -// new ArrayBindingSet(// -// new IVariable[] { x, y },// -// new IConstant[] { new Constant<String>("Mary"), -// new Constant<String>("John") }// -// ), // new ArrayBindingSet(// new IVariable[] { x, y },// new IConstant[] { new Constant<String>("Mary"), new Constant<String>("Paul") }// ), // -// new ArrayBindingSet(// -// new IVariable[] { x, y },// -// new IConstant[] { new Constant<String>("Paul"), -// new Constant<String>("Leon") }// -// ), // }; // // partition0 // new E("John", "Mary"),// @@ -628,7 +736,7 @@ { final IBindingSet initialBindingSet = new HashBindingSet(); - initialBindingSet.set(y, new Constant<String>("Paul")); +// initialBindingSet.set(y, new Constant<String>("Paul")); final UUID queryId = UUID.randomUUID(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-16 19:43:17
|
Revision: 3573 http://bigdata.svn.sourceforge.net/bigdata/?rev=3573&view=rev Author: thompsonbry Date: 2010-09-16 19:43:08 +0000 (Thu, 16 Sep 2010) Log Message: ----------- Tracked down some problems with distributed query evaluation and added more test suites. Broke out the "map binding sets over shards" capability into its own package, fixed a bug where it was failing on predicates which were only partly bound, updated the unit tests, refactored the implementation to include an interface which may be used to realize a variety of different algorithms for efficiently mapping binding sets across shards, detailed several such implementations, and provided two such implementations - one for fully bound predicates and another which is a general purpose technique and is what we had been using historically. Several of the described algorithms can be significantly more efficient for various conditions. I have filed an issue to implement and test these various alternative algorithms. See https://sourceforge.net/apps/trac/bigdata/ticket/162. Modified the PipelineOp#newBuffer() method to accept the BOpStats from the caller and to wrap the buffer such that it automatically tracks the #of written units and chunks. This was necessary for some operators where we otherwise did not have the necessary scope to properly track those statistics. I plan to do a similar thing with the source. Fixed some problems with SliceOp and how binding sets are routed to the query controller. Still working through the distributed query evaluation test suite. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/DistinctBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/btree/AbstractNode.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/AbstractUnsynchronizedArrayBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/UnsyncLocalOutputBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/pipeline/UnsyncDistributedOutputBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/service/ndx/ISplitter.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestConditionalRoutingOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestCopyBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestDistinctBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Algorithm_AsGivenPredicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Algorithm_FullyBoundPredicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Algorithm_GroupByLocatorScan.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Algorithm_LowShardCount.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Algorithm_NestedLocatorScan.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Bundle.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/IShardMapper.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/MapBindingSetsOverShardsBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Splitter.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestBOpStats.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/nodes/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/nodes/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/nodes/TestMapBindingSetsOverNodes.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestMapBindingSetsOverNodes.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestMapBindingSetsOverShards.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -137,7 +137,7 @@ * * @return The value of the annotation. * - * @throws IllegalArgumentException + * @throws IllegalStateException * if the named annotation is not bound. * * @todo Note: This variant without generics is required for some java @@ -153,6 +153,14 @@ BOp clone(); /** + * Return the {@link Annotations#BOP_ID}. + * + * @throws IllegalStateException + * if that annotation is not bound. + */ + int getId(); + + /** * Return the evaluation context for the operator. The default is * {@link BOpEvaluationContext#ANY}. Operators which must be mapped against * shards, mapped against nodes, or evaluated on the query controller must Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -323,12 +323,18 @@ final Object tmp = annotations.get(name); if (tmp == null) - throw new IllegalArgumentException("Required property: " + name); + throw new IllegalStateException("Required property: " + name); return tmp; } + public int getId() { + + return (Integer) getRequiredProperty(Annotations.BOP_ID); + + } + public String toString() { final StringBuilder sb = new StringBuilder(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -236,6 +236,9 @@ * @todo modify to accept {@link IChunkMessage} or an interface available * from getChunk() on {@link IChunkMessage} which provides us with * flexible mechanisms for accessing the chunk data. + * <p> + * When doing that, modify to automatically track the {@link BOpStats} + * as the <i>source</i> is consumed. */ // * @throws IllegalArgumentException // * if the <i>indexManager</i> is <code>null</code> Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPipelineOp.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPipelineOp.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -59,9 +59,13 @@ * operators which write on the database) then the operator MAY return an * immutable empty buffer. * + * @param stats + * The statistics on this object will automatically be updated as + * elements and chunks are output onto the returned buffer. + * * @return The buffer. */ - IBlockingBuffer<E[]> newBuffer(); + IBlockingBuffer<E[]> newBuffer(BOpStats stats); /** * Return a {@link FutureTask} which computes the operator against the Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -209,11 +209,68 @@ } - public IBlockingBuffer<E[]> newBuffer() { + public IBlockingBuffer<E[]> newBuffer(final BOpStats stats) { - return new BlockingBuffer<E[]>(getChunkOfChunksCapacity(), - getChunkCapacity(), getChunkTimeout(), chunkTimeoutUnit); + if (stats == null) + throw new IllegalArgumentException(); + + return new BlockingBufferWithStats<E[]>(getChunkOfChunksCapacity(), + getChunkCapacity(), getChunkTimeout(), chunkTimeoutUnit, stats); } + private static class BlockingBufferWithStats<E> extends BlockingBuffer<E> { + + private final BOpStats stats; + + /** + * @param chunkOfChunksCapacity + * @param chunkCapacity + * @param chunkTimeout + * @param chunktimeoutunit + * @param stats + */ + public BlockingBufferWithStats(int chunkOfChunksCapacity, + int chunkCapacity, long chunkTimeout, + TimeUnit chunktimeoutunit, final BOpStats stats) { + + this.stats = stats; + + } + + /** + * Overridden to track {@link BOpStats#unitsOut} and + * {@link BOpStats#chunksOut}. + * <p> + * Note: {@link BOpStats#chunksOut} will report the #of chunks added to + * this buffer. However, the buffer MAY combine chunks either on add() + * or when drained by the iterator so the actual #of chunks read back + * from the iterator MAY differ. + * <p> + * {@inheritDoc} + */ + @Override + public boolean add(final E e, final long timeout, final TimeUnit unit) + throws InterruptedException { + + final boolean ret = super.add(e, timeout, unit); + + if (e.getClass().getComponentType() != null) { + + stats.unitsOut.add(((Object[]) e).length); + + } else { + + stats.unitsOut.increment(); + + } + + stats.chunksOut.increment(); + + return ret; + + } + + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/ConditionalRoutingOp.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -183,8 +183,8 @@ sink.add(def); else sink.add(Arrays.copyOf(def, ndef)); - stats.chunksOut.increment(); - stats.unitsOut.add(ndef); +// stats.chunksOut.increment(); +// stats.unitsOut.add(ndef); } if (nalt > 0) { @@ -192,8 +192,8 @@ sink2.add(alt); else sink2.add(Arrays.copyOf(alt, nalt)); - stats.chunksOut.increment(); - stats.unitsOut.add(nalt); +// stats.chunksOut.increment(); +// stats.unitsOut.add(nalt); } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -36,6 +36,7 @@ import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.engine.BOpStats; +import com.bigdata.bop.engine.IChunkAccessor; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; @@ -82,40 +83,39 @@ } /** - * Copy the source to the sink. + * Copy the source to the sink. + * + * @todo Optimize this. When using an {@link IChunkAccessor} we should be + * able to directly output the same chunk. */ static private class CopyTask implements Callable<Void> { - private final BOpStats stats; + private final BOpContext<IBindingSet> context; - private final IAsynchronousIterator<IBindingSet[]> source; - - private final IBlockingBuffer<IBindingSet[]> sink; - CopyTask(final BOpContext<IBindingSet> context) { - stats = context.getStats(); + this.context = context; - this.source = context.getSource(); - - this.sink = context.getSink(); - } public Void call() throws Exception { + final IAsynchronousIterator<IBindingSet[]> source = context.getSource(); + final IBlockingBuffer<IBindingSet[]> sink = context.getSink(); try { + final BOpStats stats = context.getStats(); while (source.hasNext()) { final IBindingSet[] chunk = source.next(); stats.chunksIn.increment(); stats.unitsIn.add(chunk.length); sink.add(chunk); - stats.chunksOut.increment(); - stats.unitsOut.add(chunk.length); +// stats.chunksOut.increment(); +// stats.unitsOut.add(chunk.length); } sink.flush(); return null; } finally { sink.close(); + source.close(); } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -106,7 +106,7 @@ chunksIn.add(o.chunksIn.get()); unitsIn.add(o.unitsIn.get()); unitsOut.add(o.unitsOut.get()); - chunksOut.add(o.chunksIn.get()); + chunksOut.add(o.chunksOut.get()); } public String toString() { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -34,12 +34,15 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import org.apache.log4j.Logger; import com.bigdata.bop.BOp; +import com.bigdata.util.InnerCause; /** * The run state for a {@link RunningQuery}. This class is NOT thread-safe. @@ -83,6 +86,24 @@ private final UUID queryId; /** + * The query deadline. + * + * @see BOp.Annotations#TIMEOUT + * @see RunningQuery#getDeadline() + */ + private final long deadline; + + /** + * Set to <code>true</code> iff the query evaluation is complete due to + * normal termination. + * <p> + * Note: This is package private to expose it to {@link RunningQuery}. + * + * @see #haltOp(HaltOpMessage) + */ + /*private*/ final AtomicBoolean allDone = new AtomicBoolean(false); + + /** * The #of run state transitions which have occurred for this query. */ private long nsteps = 0; @@ -131,6 +152,8 @@ this.queryId = query.getQueryId(); + this.deadline = query.getDeadline(); + // this.nops = query.bopIndex.size(); } @@ -193,8 +216,11 @@ /** * @return <code>true</code> if this is the first time we will evaluate the * op. + * + * @throws TimeoutException + * if the deadline for the query has passed. */ - public boolean startOp(final StartOpMessage msg) { + public boolean startOp(final StartOpMessage msg) throws TimeoutException { nsteps++; @@ -257,35 +283,40 @@ // + ",fanIn=" + msg.nchunks); if (TableLog.tableLog.isInfoEnabled()) { - TableLog.tableLog -.info(getTableRow("startOp", msg.serviceId, + TableLog.tableLog.info(getTableRow("startOp", msg.serviceId, msg.bopId, msg.partitionId, msg.nchunks/* fanIn */, null/* cause */, null/* stats */)); } // check deadline. - final long deadline = query.getDeadline(); - if (deadline < System.currentTimeMillis()) { if (log.isTraceEnabled()) log.trace("expired: queryId=" + queryId + ", deadline=" + deadline); - query.future.halt(new TimeoutException()); + throw new TimeoutException(); - query.cancel(true/* mayInterruptIfRunning */); - } return firstTime; } /** - * Update termination criteria counters. @return <code>true</code> if the - * operator life cycle is over. + * Update termination criteria counters. If the query evaluation is over due + * to normal termination then {@link #allDone} is set to <code>true</code> + * as a side effect. + * + * @return <code>true</code> if the operator life cycle is over. + * + * @throws TimeoutException + * if the deadline has expired. + * @throws ExecutionException + * if the {@link HaltOpMessage#cause} was non-<code>null</code>, + * if which case it wraps {@link HaltOpMessage#cause}. */ - public boolean haltOp(final HaltOpMessage msg) { + public boolean haltOp(final HaltOpMessage msg) throws TimeoutException, + ExecutionException { nsteps++; @@ -354,9 +385,6 @@ } - // Figure out if this operator is done. - final boolean isDone = isOperatorDone(msg.bopId); - // System.err.println("haltOp : nstep=" + nsteps + ", bopId=" + msg.bopId // + ",totalRunningTaskCount=" + totalRunningTaskCount // + ",totalAvailableTaskCount=" + totalAvailableChunkCount @@ -378,41 +406,53 @@ /* * Test termination criteria */ - final long deadline = query.getDeadline(); + + // true if this operator is done. + final boolean isOpDone = isOperatorDone(msg.bopId); + // true if the entire query is done. + final boolean isAllDone = totalRunningTaskCount == 0 + && totalAvailableChunkCount == 0; + if (msg.cause != null) { - // operator failed on this chunk. - log.error("Error: Canceling query: queryId=" + queryId + ",bopId=" - + msg.bopId + ",partitionId=" + msg.partitionId, msg.cause); +// /* +// * @todo probably just wrap and throw rather than logging since this +// * class does not have enough insight into non-error exceptions +// * while Haltable does. +// */ +// if (!InnerCause.isInnerCause(msg.cause, InterruptedException.class) +// && !InnerCause.isInnerCause(msg.cause, +// TimeoutException.class)) { +// +// // operator failed on this chunk. +// log.error("Error: Canceling query: queryId=" + queryId +// + ",bopId=" + msg.bopId + ",partitionId=" +// + msg.partitionId, msg.cause); +// } - query.future.halt(msg.cause); + throw new ExecutionException(msg.cause); - query.cancel(true/* mayInterruptIfRunning */); + } else if (isAllDone) { - } else if (totalRunningTaskCount == 0 && totalAvailableChunkCount == 0) { - // success (all done). if (log.isTraceEnabled()) log.trace("success: queryId=" + queryId); - query.future.halt(query.getStats()); - - query.cancel(true/* mayInterruptIfRunning */); - + this.allDone.set(true); + } else if (deadline < System.currentTimeMillis()) { if (log.isTraceEnabled()) log.trace("expired: queryId=" + queryId + ", deadline=" + deadline); - query.future.halt(new TimeoutException()); + throw new TimeoutException(); - query.cancel(true/* mayInterruptIfRunning */); - } - return isDone; + return isOpDone; + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -38,6 +38,7 @@ import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -63,7 +64,7 @@ /** * Metadata about running queries. */ -public class RunningQuery implements Future<Map<Integer,BOpStats>>, IRunningQuery { +public class RunningQuery implements Future<Void>, IRunningQuery { private final static transient Logger log = Logger .getLogger(RunningQuery.class); @@ -75,20 +76,6 @@ .getLogger(ChunkTask.class); /** - * The run state of the query and the result of the computation iff it - * completes execution normally (without being interrupted, cancelled, etc). - * <p> - * Note: Package private in order to expose this field to {@link RunState}. - */ - final /*private*/ Haltable<Map<Integer,BOpStats>> future = new Haltable<Map<Integer,BOpStats>>(); - - /** - * The runtime statistics for each {@link BOp} in the query and - * <code>null</code> unless this is the query controller. - */ - final private ConcurrentHashMap<Integer/* bopId */, BOpStats> statsMap; - - /** * The class executing the query on this node. */ final private QueryEngine queryEngine; @@ -123,20 +110,15 @@ final private BindingSetPipelineOp query; /** - * The buffer used for the overall output of the query pipeline. - * <p> - * Note: In scale out, this only exists on the query controller. In order to - * ensure that the results are transferred to the query controller, the - * top-level operator in the query plan must specify - * {@link BOpEvaluationContext#CONTROLLER}. For example, {@link SliceOp} - * uses this {@link BOpEvaluationContext}. + * An index from the {@link BOp.Annotations#BOP_ID} to the {@link BOp}. */ - final private IBlockingBuffer<IBindingSet[]> queryBuffer; + protected final Map<Integer, BOp> bopIndex; /** - * An index from the {@link BOp.Annotations#BOP_ID} to the {@link BOp}. + * The run state of the query and the result of the computation iff it + * completes execution normally (without being interrupted, cancelled, etc). */ - protected final Map<Integer, BOp> bopIndex; + final private Haltable<Void> future = new Haltable<Void>(); /** * A collection of {@link Future}s for currently executing operators for @@ -145,6 +127,23 @@ private final ConcurrentHashMap<BSBundle, Future<?>> operatorFutures = new ConcurrentHashMap<BSBundle, Future<?>>(); /** + * The runtime statistics for each {@link BOp} in the query and + * <code>null</code> unless this is the query controller. + */ + final private ConcurrentHashMap<Integer/* bopId */, BOpStats> statsMap; + + /** + * The buffer used for the overall output of the query pipeline. + * <p> + * Note: In scale out, this only exists on the query controller. In order to + * ensure that the results are transferred to the query controller, the + * top-level operator in the query plan must specify + * {@link BOpEvaluationContext#CONTROLLER}. For example, {@link SliceOp} + * uses this {@link BOpEvaluationContext}. + */ + final private IBlockingBuffer<IBindingSet[]> queryBuffer; + + /** * A lock guarding {@link RunState#totalRunningTaskCount}, * {@link RunState#totalAvailableChunkCount}, * {@link RunState#availableChunkCountMap}. This is <code>null</code> unless @@ -159,6 +158,11 @@ * query controller. */ final private RunState runState; + + /** + * Flag used to prevent retriggering of {@link #lifeCycleTearDownQuery()}. + */ + final AtomicBoolean didQueryTearDown = new AtomicBoolean(false); /** * The chunks available for immediate processing (they must have been @@ -193,13 +197,18 @@ // set the deadline. if (!this.deadline .compareAndSet(Long.MAX_VALUE/* expect */, deadline/* update */)) { + // the deadline is already set. throw new IllegalStateException(); + } if (deadline < System.currentTimeMillis()) { + // deadline has already expired. + future.halt(new TimeoutException()); cancel(true/* mayInterruptIfRunning */); + } } @@ -252,7 +261,9 @@ * Return the operator tree for this query. */ public BindingSetPipelineOp getQuery() { + return query; + } /** @@ -276,11 +287,23 @@ } /** - * + * @param queryEngine + * The {@link QueryEngine} on which the query is running. In + * scale-out, a query is typically instantiated on many + * {@link QueryEngine}s. * @param queryId - * @param begin + * The identifier for that query. + * @param controller + * <code>true</code> iff the {@link QueryEngine} is the query + * controller for this query (the {@link QueryEngine} which will + * coordinate the query evaluation). * @param clientProxy + * The query controller. In standalone, this is the same as the + * <i>queryEngine</i>. In scale-out, this is a proxy for the + * query controller whenever the query is instantiated on a node + * other than the query controller itself. * @param query + * The query. * * @throws IllegalArgumentException * if any argument is <code>null</code>. @@ -318,20 +341,42 @@ this.query = query; - this.bopIndex = BOpUtility.getIndex(query); + bopIndex = BOpUtility.getIndex(query); - this.statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() + statsMap = controller ? new ConcurrentHashMap<Integer, BOpStats>() : null; runStateLock = controller ? new ReentrantLock() : null; runState = controller ? new RunState(this) : null; - // Note: only exists on the query controller. - this.queryBuffer = controller ? newQueryBuffer() : null; - -// System.err -// .println("new RunningQuery:: queryId=" + queryId + if (controller) { + + final BOpStats queryStats = query.newStats(); + + statsMap.put((Integer) query + .getRequiredProperty(BOp.Annotations.BOP_ID), queryStats); + + if (!query.isMutation()) { + + queryBuffer = query.newBuffer(queryStats); + + } else { + + // Note: Not used for mutation queries. + queryBuffer = null; + + } + + } else { + + // Note: only exists on the query controller. + queryBuffer = null; + + } + + // System.err + // .println("new RunningQuery:: queryId=" + queryId // + ", isController=" + controller + ", queryController=" // + clientProxy + ", queryEngine=" // + queryEngine.getServiceUUID()); @@ -339,22 +384,6 @@ } /** - * Return the buffer on which the solutions will be written (if any). This - * is based on the top-level operator in the query plan. - * - * @return The buffer for the solutions -or- <code>null</code> if the - * top-level operator in the query plan is a mutation operator. - */ - protected IBlockingBuffer<IBindingSet[]> newQueryBuffer() { - - if (query.isMutation()) - return null; - - return ((BindingSetPipelineOp) query).newBuffer(); - - } - - /** * Take a chunk generated by some pass over an operator and make it * available to the target operator. How this is done depends on whether the * query is running against a standalone database or the scale-out database. @@ -372,10 +401,10 @@ * @param sink * The intermediate results to be passed to that target operator. * - * @return The #of chunks made available for consumption by the sink. This - * will always be ONE (1) for scale-up. For scale-out, there will be - * one chunk per index partition over which the intermediate results - * were mapped. + * @return The #of {@link IChunkMessage} sent. This will always be ONE (1) + * for scale-up. For scale-out, there will be at least one + * {@link IChunkMessage} per index partition over which the + * intermediate results were mapped. */ protected <E> int handleOutputChunk(final int sinkId, final IBlockingBuffer<IBindingSet[]> sink) { @@ -478,6 +507,11 @@ if (runState.startOp(msg)) lifeCycleSetUpOperator(msg.bopId); + } catch(TimeoutException ex) { + + future.halt(ex); + cancel(true/* mayInterruptIfRunning */); + } finally { runStateLock.unlock(); @@ -508,6 +542,8 @@ if (tmp != null) tmp.add(msg.taskStats); + Throwable cause = null; + boolean allDone = false; runStateLock.lock(); try { @@ -520,14 +556,53 @@ */ lifeCycleTearDownOperator(msg.bopId); + + if(runState.allDone.get()) { + + allDone = true; + + } } - + + } catch(Throwable ex) { + + cause = ex; + } finally { runStateLock.unlock(); } + + /* + * Handle query termination once we have released the runStateLock. + * + * Note: In scale-out, query termination can involve RMI to the nodes on + * which query operators are known to be running and to nodes on which + * resources were allocated which were scoped to the query or an + * operator's evaluation. Those RMI messages should not go out while we + * are holding the runStateLock since that could cause deadlock with + * call backs on haltOp() from the query peers for that query. + */ + + if (cause != null) { + + /* + * Timeout, interrupted, operator error, or internal error in + * RunState. + */ + + future.halt(cause); + cancel(true/* mayInterruptIfRunning */); + + } else if (allDone) { + + // Normal termination. + future.halt((Void) null); + cancel(true/* mayInterruptIfRunning */); + + } } @@ -753,13 +828,15 @@ + bop); } - sink = (p == null ? queryBuffer : op.newBuffer()); + final BOpStats stats = op.newStats(); + + sink = (p == null ? queryBuffer : op.newBuffer(stats)); - altSink = altSinkId == null ? null : op.newBuffer(); + altSink = altSinkId == null ? null : op.newBuffer(stats); // context : @todo pass in IChunkMessage or IChunkAccessor context = new BOpContext<IBindingSet>(RunningQuery.this, - partitionId, op.newStats(), msg.getChunkAccessor() + partitionId, stats, msg.getChunkAccessor() .iterator(), sink, altSink); // FutureTask for operator execution (not running yet). @@ -903,11 +980,7 @@ * <p> * Since this involves RMI to the nodes, we should not issue those RMIs * while holding the {@link #runStateLock} (and this could even deadlock - * with callback from those nodes). Perhaps - * {@link RunState#haltOp(HaltOpMessage)} should throw back the - * {@link HaltOpMessage} or a {@link TimeoutException} if the deadline has - * expired and then let {@link RunningQuery#haltOp(HaltOpMessage)} handle - * the termination of the query, which it can do without holding the lock. + * with call back from those nodes). * <p> * When the controller sends a node a terminate signal for an operator, it * should not bother to RMI back to the controller (unless this is done for @@ -931,22 +1004,24 @@ // close the output sink. queryBuffer.close(); } - // life cycle hook for the end of the query. - lifeCycleTearDownQuery(); + if(didQueryTearDown.compareAndSet(false/*expect*/, true/*update*/)) { + // life cycle hook for the end of the query. + lifeCycleTearDownQuery(); + } // remove from the collection of running queries. queryEngine.runningQueries.remove(queryId, this); // true iff we cancelled something. return cancelled; } - final public Map<Integer, BOpStats> get() throws InterruptedException, + final public Void get() throws InterruptedException, ExecutionException { return future.get(); } - final public Map<Integer, BOpStats> get(long arg0, TimeUnit arg1) + final public Void get(long arg0, TimeUnit arg1) throws InterruptedException, ExecutionException, TimeoutException { return future.get(arg0, arg1); @@ -977,4 +1052,18 @@ } + public String toString() { + final StringBuilder sb = new StringBuilder(getClass().getName()); + sb.append("{queryId=" + queryId); + sb.append(",deadline=" + deadline.get()); + sb.append(",isDone=" + isDone()); + sb.append(",isCancelled=" + isCancelled()); + sb.append(",runState=" + runState); + sb.append(",controller=" + controller); + sb.append(",clientProxy=" + clientProxy); + sb.append(",query=" + query); + sb.append("}"); + return sb.toString(); + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -47,6 +47,7 @@ import com.bigdata.bop.engine.IQueryPeer; import com.bigdata.bop.engine.LocalChunkMessage; import com.bigdata.bop.engine.RunningQuery; +import com.bigdata.bop.fed.shards.MapBindingSetsOverShardsBuffer; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; import com.bigdata.journal.TemporaryStoreFactory; @@ -55,6 +56,7 @@ import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.IBuffer; +import com.bigdata.relation.rule.eval.pipeline.DistributedJoinTask; import com.bigdata.resources.ResourceManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.ResourceService; @@ -363,6 +365,14 @@ * {@link ByteBuffer} and notifying the receiving service that there are * intermediate results which it can pull when it is ready to process them. * This pattern allows the receiver to impose flow control on the producer. + * + * @todo Figure out how (or if) we will combine binding set streams emerging + * from concurrent tasks executing on a given node destined for the + * same shard/node. (There is code in the {@link DistributedJoinTask} + * which does this for the same shard, but it does it on the receiver + * side.) Pay attention to the #of threads running in the join, the + * potential concurrency of threads targeting the same (bopId,shardId) + * and how to best combine their data together. */ @Override protected <E> int handleOutputChunk(final int sinkId, @@ -405,20 +415,33 @@ * * @todo Set the capacity of the the "map" buffer to the size of the * data contained in the sink (in fact, we should just process the - * sink data in place). + * sink data in place using an expanded IChunkAccessor interface). + * + * @todo high volume operators will need different capacity + * parameters. + * + * FIXME the chunkSize will limit us to RMI w/ the payload inline + * when it is the same as the threshold for NIO chuck transfers. + * This needs to be adaptive and responsive to the actual data scale + * of the operator's outputs */ @SuppressWarnings("unchecked") final IPredicate<E> pred = ((IShardwisePipelineOp) bop).getPredicate(); final IKeyOrder<E> keyOrder = pred.getKeyOrder(); final long timestamp = pred.getTimestamp(); final int capacity = 1000;// @todo - final int capacity2 = 1000;// @todo + final int chunkOfChunksCapacity = 10;// @todo small queue + final int chunkSize = 100;// @todo modest chunks. final MapBindingSetsOverShardsBuffer<IBindingSet, E> mapper = new MapBindingSetsOverShardsBuffer<IBindingSet, E>( getFederation(), pred, keyOrder, timestamp, capacity) { @Override - IBuffer<IBindingSet[]> newBuffer(final PartitionLocator locator) { - // @todo chunkCapacity and chunkOfChunksCapacity plus timeout stuff. - return new BlockingBuffer<IBindingSet[]>(capacity2); + protected IBuffer<IBindingSet[]> newBuffer(final PartitionLocator locator) { + return new BlockingBuffer<IBindingSet[]>( + chunkOfChunksCapacity,// + chunkSize,// + BlockingBuffer.DEFAULT_CONSUMER_CHUNK_TIMEOUT,// + BlockingBuffer.DEFAULT_CONSUMER_CHUNK_TIMEOUT_UNIT// + ); } }; /* @@ -454,17 +477,11 @@ * * @todo This stage should probably be integrated with the stage * which maps the binding sets over the shards (immediately above) - * to minimize copying or visiting in the data. - * - * FIXME Review the definition of an "output chunk" from the - * perspective of the atomic query termination decision. I think - * that it probably corresponds to a "message" sent to a node. For - * each message sent, we must later observe the evaluate of the - * operator on that node+shard. If the receiver is permitted to - * combine messages, then it must tell us how many messages were - * consumed. + * to minimize copying or visiting in the data. This could be done + * by hooking the method which outputs a chunk to instead directly + * send the IChunkMessage. */ - int nchunksout = 0; + int messageSendCount = 0; for (Map.Entry<PartitionLocator, IBuffer<IBindingSet[]>> e : mapper .getSinks().entrySet()) { @@ -484,11 +501,11 @@ sendChunkMessage(locator.getDataServiceUUID(), sinkId, locator .getPartitionId(), allocationContext, shardSink); - nchunksout++; + messageSendCount++; } - return nchunksout; + return messageSendCount; } case CONTROLLER: { Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java 2010-09-16 19:40:48 UTC (rev 3572) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java 2010-09-16 19:43:08 UTC (rev 3573) @@ -1,499 +0,0 @@ -package com.bigdata.bop.fed; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.Map; - -import org.apache.log4j.Logger; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IPredicate; -import com.bigdata.bop.engine.QueryEngine; -import com.bigdata.btree.BytesUtil; -import com.bigdata.btree.IIndex; -import com.bigdata.btree.keys.IKeyBuilder; -import com.bigdata.journal.NoSuchIndexException; -import com.bigdata.journal.TimestampUtility; -import com.bigdata.mdi.IMetadataIndex; -import com.bigdata.mdi.PartitionLocator; -import com.bigdata.relation.IRelation; -import com.bigdata.relation.accesspath.AbstractUnsynchronizedArrayBuffer; -import com.bigdata.relation.accesspath.IBuffer; -import com.bigdata.relation.rule.eval.pipeline.DistributedJoinTask; -import com.bigdata.service.IBigdataFederation; -import com.bigdata.service.Split; -import com.bigdata.service.ndx.AbstractSplitter; -import com.bigdata.striterator.IKeyOrder; - -/** - * Unsynchronized (non-thread safe) buffer maps the {@link IBindingSet}s across - * the index partition(s) associated with an {@link IPredicate} and - * {@link IKeyOrder}. For each source chunk, "as bound" versions of the target - * {@link IPredicate} are constructed and the {@link IBindingSet}s in the chunk - * are reordered based on {@link IKeyOrder#getFromKey(IKeyBuilder, IPredicate)} - * for each asBound predicate. The {@link PartitionLocator}s are discovered for - * each fromKey using an ordered locator scan and the binding sets are output - * onto a shard or node specific {@link IBuffer} created by a concrete subclass. - * The subclass is responsible for getting the binding sets from this node onto - * the node associated with each output buffer. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id: UnsyncDistributedOutputBuffer.java 3448 2010-08-18 20:55:58Z - * thompsonbry $ - * @param <E> - * The generic type of the elements in the buffer. - * @param <F> - * The generic type of the elements in the relation associated with - * the {@link IPredicate}. - * - * @todo This could be refactored such that it no longer implemented - * {@link IBuffer} but instead was a {@link BOp} with binding sets - * streaming in from its source. However, unlike a normal {@link BOp} it - * would have a compound sink and it would have to be tightly integrated - * with the {@link QueryEngine} to be used. - * - * @todo Figure out how we will combine binding set streams emerging from - * concurrent tasks executing on a given node destined for the same - * shard/node. (There is code in the {@link DistributedJoinTask} which - * does this for the same shard, but it does it on the receiver side.) Pay - * attention to the #of threads running in the join, the potential - * concurrency of threads targeting the same (bopId,shardId) and how to - * best combine their data together. - * - * @todo Optimize locator lookup by caching in {@link AbstractSplitter} and look - * at the code path for obtaining {@link PartitionLocator}s from the MDI. - * <p> - * For reads, we are permitted to cache the locators just as much as we - * like (but indirection would be introduced by a shared disk - * architecture). - * <p> - * For writes (or in a shard disk architecture) it is possible that the - * target shard will have moved by the time the receiver has notice of the - * intent to write on that shard or once the receiver has accepted the - * binding sets for that shard. The logic which moves the binding sets - * around will have to handle such 'stale locator' exceptions - * automatically. - * - * @todo This is not tracking the #of output chunks or the fanOut (#of - * shards/nodes which will receive binding sets). Given that the query - * engine will be managing the buffers on which the data are written, it - * might also update the appropriate statistics. - */ -public abstract class MapBindingSetsOverShardsBuffer<E extends IBindingSet, F> - extends AbstractUnsynchronizedArrayBuffer<E> { - - private static transient final Logger log = Logger.getLogger(MapBindingSetsOverShardsBuffer.class); - - /** - * The predicate from which we generate the asBound binding sets. This - * predicate and the {@link IKeyOrder} together determine the required - * access path. - */ - private final IPredicate<F> pred; - - /** - * Identifies the index for the access path required by the {@link #pred - * predicate}. - */ - private final IKeyOrder<F> keyOrder; - - /** - * The timestamp associated with the operation on the target access path. If - * the binding sets will be used to read on the shards of the target access - * path, then this is the read timestamp. If they will be used to write on - * the target access path, then this is the write timestamp. - */ - private final long timestamp; - - /** - * The {@link IKeyBuilder} for the index associated with the access path - * required by the predicate. - */ - private final IKeyBuilder keyBuilder; - - /** - * Used to efficient assign binding sets to index partitions. - */ - private final Splitter splitter; - -// /** -// */ -// private final BOpStats stats; - - /** - * @param fed - * The federation. - * @param pred - * The predicate associated with the target operator. The - * predicate identifies which variables and/or constants form the - * key for the access path and hence selects the shards on which - * the target operator must read or write. For example, when the - * target operator is a JOIN, this is the {@link IPredicate} - * associated with the right hand operator of the join. - * @param keyOrder - * Identifies the access path for the target predicate. - * @param timestamp - * The timestamp associated with the operation on the target - * access path. If the binding sets will be used to read on the - * shards of the target access path, then this is the read - * timestamp. If they will be used to write on the target access - * path, then this is the write timestamp. - * @param capacity - * The capacity of this buffer. - */ - public MapBindingSetsOverShardsBuffer( - final IBigdataFederation<?> fed,// - final IPredicate<F> pred, // - final IKeyOrder<F> keyOrder,// - final long timestamp,// - final int capacity) { - - super(capacity); - - if (fed == null) - throw new IllegalArgumentException(); - - if (pred == null) - throw new IllegalArgumentException(); - - if (keyOrder == null) - throw new IllegalArgumentException(); - -// this.context = context; - - this.pred = pred; - - this.keyOrder = keyOrder; - - this.timestamp = timestamp; - - /* - * Note: we can use the read view of the relation to get the IKeyBuilder - * even if we will be writing on the relation since the IKeyBuilder - * semantics can not be readily changed once an index has been created. - */ - { - - @SuppressWarnings("unchecked") - final IRelation<F> relation = (IRelation<F>) fed - .getResourceLocator().locate(pred.getOnlyRelationName(), - timestamp); - - final IIndex index = relation.getIndex(keyOrder); - - this.keyBuilder = index.getIndexMetadata().getKeyBuilder(); - - } - - /* - * Resolve a scale-out view of the metadata index for the target - * predicate. - */ - { - - final String namespace = pred.getOnlyRelationName(); - - final IMetadataIndex mdi = fed.getMetadataIndex(namespace + "." - + keyOrder.getIndexName(), timestamp); - - if (mdi == null) { - - throw new NoSuchIndexException("name=" + namespace - + ", timestamp=" + TimestampUtility.toString(timestamp)); - - } - - this.splitter = new Splitter(mdi); - - } - -// this.stats = context.getStats(); - - } - - /** - * Helper class efficiently splits an array of sorted keys into groups - * associated with a specific index partition. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - */ - static private class Splitter extends AbstractSplitter { - - private final IMetadataIndex mdi; - - public Splitter(final IMetadataIndex mdi) { - - if (mdi == null) - throw new IllegalArgumentException(); - - this.mdi = mdi; - - } - - @Override - protected IMetadataIndex getMetadataIndex(long ts) { - - return mdi; - - } - - } - - /** - * Helper class used to place the binding sets into order based on the - * {@link #fromKey} associated with the {@link #asBound} predicate. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - */ - private class Bundle implements Comparable<Bundle> { - - /** The binding set. */ - final IBindingSet bindingSet; - - /** The asBound predicate. */ - final IPredicate<F> asBound; - - /** The fromKey generated from that asBound predicate. */ - final byte[] fromKey; - - public Bundle(final IBindingSet bindingSet) { - - this.bindingSet = bindingSet; - - this.asBound = pred.asBound(bindingSet); - - this.fromKey = keyOrder.getFromKey(keyBuilder, asBound); - - } - - /** - * Imposes an unsigned byte[] order on the {@link #fromKey}. - */ - public int compareTo(final Bundle o) { - - return BytesUtil.compareBytes(this.fromKey, o.fromKey); - - } - - /** - * Implemented to shut up findbugs, but not used. - */ - @SuppressWarnings("unchecked") - public boolean equals(final Object o) { - - if (this == o) - return true; - - if (!(o instanceof MapBindingSetsOverShardsBuffer.Bundle)) - return false; - - final MapBindingSetsOverShardsBuffer.Bundle t = (MapBindingSetsOverShardsBuffer.Bundle) o; - - if (compareTo(t) != 0) - return false; - - if (!bindingSet.equals(t.bindingSet)) - return false; - - if (!asBound.equals(t.asBound)) - return false; - - return true; - - } - - /** - * Implemented to shut up find bugs. - */ - public int hashCode() { - - if (hash == 0) { - - hash = Arrays.hashCode(fromKey); - - } - - return hash; - - } - private int hash = 0; - - } - - /** - * Maps the chunk of {@link IBindingSet}s across the index partition(s) for - * the sink join dimension. - * - * @param a - * A chunk of {@link IBindingSet}s. - */ - protected void handleChunk(final E[] chunk) { - - @SuppressWarnings("unchecked") - final Bundle[] bundles = new MapBindingSetsOverShardsBuffer.Bundle[chunk.length]; - - /* - * Create the asBound version of the predicate and the associated - * fromKey for each bindingSet in the chunk. - */ - for (int i = 0; i < chunk.length; i++) { - - bundles[i] = new Bundle(chunk[i]); - - } - - /* - * Sort the binding sets in the chunk by the fromKey associated with - * each asBound predicate. - */ - Arrays.sort(bundles); - - /* - * Construct a byte[][] out of the sorted fromKeys and then generate - * slices (Splits) which group the binding sets based on the target - * shard. - */ - final LinkedList<Split> splits; - { - - final byte[][] keys = new byte[bundles.length][]; - - for (int i = 0; i < bundles.length; i++) { - - keys[i] = bundles[i].fromKey; - - } - - splits = splitter.splitKeys(timestamp, 0/* fromIndex */, - bundles.length/* toIndex */, keys); - - } - - if (log.isTraceEnabled()) - log.trace("nsplits=" + splits.size() + ", pred=" + pred); - - /* - * For each split, write the binding sets in that split onto the - * corresponding buffer. - */ - for (Split split : splits) { - - // Note: pmd is a PartitionLocator, so this cast is valid. - final IBuffer<IBindingSet[]> sink = getBuffer((PartitionLocator) split.pmd); - - final IBindingSet[] slice = new IBindingSet[split.ntuples]; - - for (int j = 0, i = split.fromIndex; i < split.toIndex; i++, j++) { - - final IBindingSet bset = bundles[i].bindingSet; - - slice[j] = bset; - - if (log.isTraceEnabled()) - log - .trace("Mapping: keyOrder=" + keyOrder + ",bset=" - + bset + " onto partitionId=" - + split.pmd.getPartitionId()); - - } - -// for (int i = split.fromIndex; i < split.toIndex; i++) { -// -// final Bundle bundle = bundles[i]; -// -// sink.add(bundle.bindingSet); -// -//// stats.unitsOut.increment(); -// -// } - - sink.add(slice); - - } - - } - - /** - * Extended to flush each buffer which targets a specific index partition as - * well. - * <p> - * {@inheritDoc} - */ - @Override - public long flush() { - - final long n = super.flush(); - - for (IBuffer<IBindingSet[]> sink : sinks.values()) { - - if (!sink.isEmpty()) - sink.flush(); - - } - - return n; - - } - - /** - * The allocated sinks. - * <p> - * Note: Since the collection is not thread-safe, synchronization is - * required when adding to the collection and when visiting the elements of - * the collection. However, the {@link MapBindingSetsOverShardsBuffer} is not - * thread-safe either so this should be Ok. - */ - private final LinkedHashMap<PartitionLocator, IBuffer<IBindingSet[]>/* sink */> sinks = new LinkedHashMap<PartitionLocator, IBuffer<IBindingSet[]>>(); - - /** - * An immutable view of the si... [truncated message content] |
From: <ble...@us...> - 2010-09-16 19:40:54
|
Revision: 3572 http://bigdata.svn.sourceforge.net/bigdata/?rev=3572&view=rev Author: blevine218 Date: 2010-09-16 19:40:48 +0000 (Thu, 16 Sep 2010) Log Message: ----------- obsolete Removed Paths: ------------- branches/maven_scaleout/bigdata-integ/src/test/resources/config/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 18:00:35
|
Revision: 3571 http://bigdata.svn.sourceforge.net/bigdata/?rev=3571&view=rev Author: blevine218 Date: 2010-09-16 18:00:29 +0000 (Thu, 16 Sep 2010) Log Message: ----------- new config resources directory Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/resources/config/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 15:00:23
|
Revision: 3570 http://bigdata.svn.sourceforge.net/bigdata/?rev=3570&view=rev Author: blevine218 Date: 2010-09-16 15:00:12 +0000 (Thu, 16 Sep 2010) Log Message: ----------- obsolete Removed Paths: ------------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/start/config/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 14:59:37
|
Revision: 3569 http://bigdata.svn.sourceforge.net/bigdata/?rev=3569&view=rev Author: blevine218 Date: 2010-09-16 14:59:26 +0000 (Thu, 16 Sep 2010) Log Message: ----------- jini.start.config integration tests Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/config/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 14:54:40
|
Revision: 3568 http://bigdata.svn.sourceforge.net/bigdata/?rev=3568&view=rev Author: blevine218 Date: 2010-09-16 14:54:30 +0000 (Thu, 16 Sep 2010) Log Message: ----------- Integration test packages service.jini.start and service.jini.start.config Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/start/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/start/config/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 14:44:40
|
Revision: 3567 http://bigdata.svn.sourceforge.net/bigdata/?rev=3567&view=rev Author: blevine218 Date: 2010-09-16 14:44:29 +0000 (Thu, 16 Sep 2010) Log Message: ----------- MappedRDFDataLoadMaster integration tests now run successfully Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMasterRemote.java Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.java (from rev 3541, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.java 2010-09-16 14:44:29 UTC (rev 3567) @@ -0,0 +1,123 @@ +/** + +The Notice below must appear in each file of the Source Code of any +copy you distribute of the Licensed Product. Contributors to any +Modifications may add their own copyright notices to identify their +own contributions. + +License: + +The contents of this file are subject to the CognitiveWeb Open Source +License Version 1.1 (the License). You may not copy or use this file, +in either source code or executable form, except in compliance with +the License. You may obtain a copy of the License from + + http://www.CognitiveWeb.org/legal/license/ + +Software distributed under the License is distributed on an AS IS +basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +the License for the specific language governing rights and limitations +under the License. + +Copyrights: + +Portions created by or assigned to CognitiveWeb are Copyright +(c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact +information for CognitiveWeb is available at + + http://www.CognitiveWeb.org + +Portions Copyright (c) 2002-2003 Bryan Thompson. + +Acknowledgements: + +Special thanks to the developers of the Jabber Open Source License 1.0 +(JOSL), from which this License was derived. This License contains +terms that differ from JOSL. + +Special thanks to the CognitiveWeb Open Source Contributors for their +suggestions and support of the Cognitive Web. + +Modifications: + +*/ +/* + * Created on Oct 7, 2009 + */ + +package com.bigdata.service.jini.master; + +import java.io.File; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +import com.bigdata.DataFinder; + +import net.jini.config.ConfigurationException; + +import org.apache.zookeeper.KeeperException; +import org.junit.Test; + +import com.bigdata.rdf.load.MappedRDFDataLoadMaster; +import com.bigdata.service.jini.util.JiniServicesHelper; + +/** + * Unit tests for the {@link MappedTaskMaster}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestMappedRDFDataLoadMaster { + + protected boolean serviceImplRemote; + + /** + * + */ + public TestMappedRDFDataLoadMaster() { + this.serviceImplRemote = false; + } + + + protected TestMappedRDFDataLoadMaster(boolean serviceImplRemote) { + this.serviceImplRemote = serviceImplRemote; + } + + + private JiniServicesHelper helper; + + /** + * This runs a U1 data load. + * + * FIXME Go further and test the behavior of the pending set. + * + * @throws KeeperException + */ + @Test + public void test() throws ConfigurationException, InterruptedException, + IOException, ExecutionException, KeeperException { + + JiniServicesHelper helper = null; + + final File tempConfigFile = JiniServicesHelper + .append(new File(DataFinder.bestPath("testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config") ) ); + + final String[] args = new String[] {tempConfigFile.getPath()}; + + try { + helper = new JiniServicesHelper(args, serviceImplRemote); + helper.start(); + + new MappedRDFDataLoadMaster(helper.getFederation()).execute(); + + } finally { + + // delete the temp file containing the federation configuration. + tempConfigFile.delete(); + + if (helper != null) + helper.destroy(); + + } + } +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMasterRemote.java (from rev 3541, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMasterRemote.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMasterRemote.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/TestMappedRDFDataLoadMasterRemote.java 2010-09-16 14:44:29 UTC (rev 3567) @@ -0,0 +1,57 @@ +/** + +The Notice below must appear in each file of the Source Code of any +copy you distribute of the Licensed Product. Contributors to any +Modifications may add their own copyright notices to identify their +own contributions. + +License: + +The contents of this file are subject to the CognitiveWeb Open Source +License Version 1.1 (the License). You may not copy or use this file, +in either source code or executable form, except in compliance with +the License. You may obtain a copy of the License from + + http://www.CognitiveWeb.org/legal/license/ + +Software distributed under the License is distributed on an AS IS +basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +the License for the specific language governing rights and limitations +under the License. + +Copyrights: + +Portions created by or assigned to CognitiveWeb are Copyright +(c) 2003-2003 CognitiveWeb. All Rights Reserved. Contact +information for CognitiveWeb is available at + + http://www.CognitiveWeb.org + +Portions Copyright (c) 2002-2003 Bryan Thompson. + +Acknowledgements: + +Special thanks to the developers of the Jabber Open Source License 1.0 +(JOSL), from which this License was derived. This License contains +terms that differ from JOSL. + +Special thanks to the CognitiveWeb Open Source Contributors for their +suggestions and support of the Cognitive Web. + +Modifications: + +*/ + +package com.bigdata.service.jini.master; + +/** + * Unit tests for the {@link MappedTaskMaster} using the purely remote + * service implementations. + */ +public class TestMappedRDFDataLoadMasterRemote + extends TestMappedRDFDataLoadMaster +{ + public TestMappedRDFDataLoadMasterRemote() { + super(true); + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 13:59:03
|
Revision: 3566 http://bigdata.svn.sourceforge.net/bigdata/?rev=3566&view=rev Author: blevine218 Date: 2010-09-16 13:58:54 +0000 (Thu, 16 Sep 2010) Log Message: ----------- added jini.master package Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/master/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-16 13:37:20
|
Revision: 3565 http://bigdata.svn.sourceforge.net/bigdata/?rev=3565&view=rev Author: blevine218 Date: 2010-09-16 13:37:11 +0000 (Thu, 16 Sep 2010) Log Message: ----------- In support of the integration test framework: - It is no longer required to set the federation.name system property when running the tests. - When the federation.name system property is not set and that property is not set in the deployment properties, a fallback value is set of the form bigdata.test.group-<ipaddress>. This is done in ConfigDeployUtil.getFederationName(). - default-deploy.properties was changed to set federation.name.default to the empty string rather then the @FED@ token. - A number of places that needed to retrieve the federation name, now call ConfigDeployUtil.getFederationName() for this purpose. Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-16 10:55:26 UTC (rev 3564) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-16 13:37:11 UTC (rev 3565) @@ -117,8 +117,8 @@ <log4j.path>${log4j.configuration}</log4j.path> <default.nic>${default.nic}</default.nic> - <!-- Jini group name --> - <federation.name>${federation.name}</federation.name> + <!-- Jini group name + <federation.name>${federation.name}</federation.name> --> <!-- TODO !!!!!! <property key="java.class.path" value="${junit.classpath.text}" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-16 10:55:33
|
Revision: 3564 http://bigdata.svn.sourceforge.net/bigdata/?rev=3564&view=rev Author: thompsonbry Date: 2010-09-16 10:55:26 +0000 (Thu, 16 Sep 2010) Log Message: ----------- Working through SliceOp integration for standalone and scale-out. It currently cancels the query, which results in an exception being reported by RunningQuery.get(). That might be Ok, but the unit test needs to be updated and we need to report out the statistics anyway. I am also looking at termination conditions when a message is routed from a query peer to the query controller in scale-out, which is what happens for a SliceOp since it is evaluated at the query controller. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -168,7 +168,7 @@ * Perhaps the right thing is to expose an object with a richer API * for obtaining various kinds of iterators or even access to the * direct {@link ByteBuffer}s backing the data (for high volume joins, - * exernal merge sorts, etc). + * external merge sorts, etc). */ public final IAsynchronousIterator<E[]> getSource() { return source; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -33,10 +33,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.log4j.Logger; - import com.bigdata.bop.BOp.Annotations; import com.bigdata.bop.engine.BOpStats; import com.bigdata.btree.AbstractNode; @@ -54,7 +51,7 @@ */ public class BOpUtility { - private static final Logger log = Logger.getLogger(BOpUtility.class); +// private static final Logger log = Logger.getLogger(BOpUtility.class); /** * Pre-order recursive visitation of the operator tree (arguments only, no Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkAccessor.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -57,14 +57,14 @@ /** * Visit the binding sets in the chunk. * - * @deprecated We do not need to use {@link IAsynchronousIterator} any more. - * This could be much more flexible and should be harmonized to - * support high volume operators, GPU operators, etc. probably - * the right thing to do is introduce another interface here - * with a getChunk():IChunk where IChunk let's you access the - * chunks data in different ways (and chunks can be both - * {@link IBindingSet}[]s and element[]s so we might need to - * raise that into the interfaces and/or generics as well). + * @todo We do not need to use {@link IAsynchronousIterator} any more. This + * could be much more flexible and should be harmonized to support + * high volume operators, GPU operators, etc. probably the right thing + * to do is introduce another interface here with a getChunk():IChunk + * where IChunk let's you access the chunks data in different ways + * (and chunks can be both {@link IBindingSet}[]s and element[]s so we + * might need to raise that into the interfaces and/or generics as + * well). * * @todo It is likely that we can convert to the use of * {@link BlockingQueue} instead of {@link BlockingBuffer} in the Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -179,9 +179,9 @@ TableLog.tableLog.info("\n\nqueryId=" + queryId + "\n"); // TableLog.tableLog.info(query.getQuery().toString()+"\n"); TableLog.tableLog.info(getTableHeader()); - TableLog.tableLog - .info(getTableRow("startQ", serviceId, msg.getBOpId(), - -1/* shardId */, 1/* fanIn */, null/* stats */)); + TableLog.tableLog.info(getTableRow("startQ", serviceId, msg + .getBOpId(), -1/* shardId */, 1/* fanIn */, + null/* cause */, null/* stats */)); } // System.err.println("startQ : nstep="+nsteps+", bopId=" + bopId @@ -258,8 +258,9 @@ if (TableLog.tableLog.isInfoEnabled()) { TableLog.tableLog - .info(getTableRow("startOp", msg.serviceId, msg.bopId, - msg.partitionId, msg.nchunks/* fanIn */, null/* stats */)); +.info(getTableRow("startOp", msg.serviceId, + msg.bopId, msg.partitionId, msg.nchunks/* fanIn */, + null/* cause */, null/* stats */)); } // check deadline. @@ -363,7 +364,8 @@ if (TableLog.tableLog.isInfoEnabled()) { TableLog.tableLog.info(getTableRow("haltOp", msg.serviceId, - msg.bopId, msg.partitionId, fanOut, msg.taskStats)); + msg.bopId, msg.partitionId, fanOut, msg.cause, + msg.taskStats)); } // if (log.isTraceEnabled()) @@ -409,6 +411,7 @@ query.cancel(true/* mayInterruptIfRunning */); } + return isDone; } @@ -484,6 +487,8 @@ sb.append("\tbop"); + sb.append("\tcause"); + sb.append("\tstats"); sb.append('\n'); @@ -510,13 +515,18 @@ * specific index partition. * @param fanIO * The fanIn (startQ,startOp) or fanOut (haltOp). + * @param cause + * The {@link Throwable} in a {@link HaltOpMessage} and + * <code>null</code> for other messages or if the + * {@link Throwable} was null. * @param stats * The statistics from the operator evaluation and - * <code>null</code> unless {@link #haltOp(HaltOpMessage)} is - * the invoker. + * <code>null</code> unless {@link #haltOp(HaltOpMessage)} is the + * invoker. */ private String getTableRow(final String label, final UUID serviceId, final int bopId, final int shardId, final int fanIO, + final Throwable cause, final BOpStats stats) { final StringBuilder sb = new StringBuilder(); @@ -558,12 +568,19 @@ sb.append('\t'); sb.append(serviceId == null ? "N/A" : serviceId.toString()); + // the operator. sb.append('\t'); sb.append(query.bopIndex.get(bopId)); + + // the thrown cause. + sb.append('\t'); + if (cause != null) + sb.append(cause.getLocalizedMessage()); + // the statistics. + sb.append('\t'); if (stats != null) { // @todo use a multi-column version of stats. - sb.append('\t'); sb.append(stats.toString()); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -51,7 +51,6 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.NoSuchBOpException; import com.bigdata.bop.PipelineOp; -import com.bigdata.bop.bset.CopyBindingSetOp; import com.bigdata.bop.solutions.SliceOp; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITx; @@ -125,14 +124,12 @@ /** * The buffer used for the overall output of the query pipeline. - * - * FIXME SCALEOUT: This should only exist on the query controller. Other - * nodes will send {@link IChunkMessage}s to the query controller. s/o will - * use an operator with {@link BOpEvaluationContext#CONTROLLER} in order to - * ensure that the results are transferred to the query controller. When a - * {@link SliceOp} is used, this is redundant. The operator in other cases - * can be a {@link CopyBindingSetOp} whose {@link BOpEvaluationContext} has - * been overridden. + * <p> + * Note: In scale out, this only exists on the query controller. In order to + * ensure that the results are transferred to the query controller, the + * top-level operator in the query plan must specify + * {@link BOpEvaluationContext#CONTROLLER}. For example, {@link SliceOp} + * uses this {@link BOpEvaluationContext}. */ final private IBlockingBuffer<IBindingSet[]> queryBuffer; @@ -330,8 +327,15 @@ runState = controller ? new RunState(this) : null; - this.queryBuffer = newQueryBuffer(); + // Note: only exists on the query controller. + this.queryBuffer = controller ? newQueryBuffer() : null; +// System.err +// .println("new RunningQuery:: queryId=" + queryId +// + ", isController=" + controller + ", queryController=" +// + clientProxy + ", queryEngine=" +// + queryEngine.getServiceUUID()); + } /** @@ -619,6 +623,12 @@ /** Alias for the {@link ChunkTask}'s logger. */ private final Logger log = chunkTaskLog; + /** + * The message with the materialized chunk to be consumed by the + * operator. + */ + final IChunkMessage<IBindingSet> msg; + /** The index of the bop which is being evaluated. */ private final int bopId; @@ -682,13 +692,20 @@ * by {@link PipelineOp#eval(BOpContext)} in order to handle the outputs * written on those sinks. * - * @param chunk + * @param msg * A message containing the materialized chunk and metadata * about the operator which will consume that chunk. + * + * @throws IllegalStateException + * unless {@link IChunkMessage#isMaterialized()} is + * <code>true</code>. */ - public ChunkTask(final IChunkMessage<IBindingSet> chunk) { - bopId = chunk.getBOpId(); - partitionId = chunk.getPartitionId(); + public ChunkTask(final IChunkMessage<IBindingSet> msg) { + if(!msg.isMaterialized()) + throw new IllegalStateException(); + this.msg = msg; + bopId = msg.getBOpId(); + partitionId = msg.getPartitionId(); bop = bopIndex.get(bopId); if (bop == null) { throw new NoSuchBOpException(bopId); @@ -740,9 +757,9 @@ altSink = altSinkId == null ? null : op.newBuffer(); - // context + // context : @todo pass in IChunkMessage or IChunkAccessor context = new BOpContext<IBindingSet>(RunningQuery.this, - partitionId, op.newStats(), chunk.getChunkAccessor() + partitionId, op.newStats(), msg.getChunkAccessor() .iterator(), sink, altSink); // FutureTask for operator execution (not running yet). @@ -762,8 +779,7 @@ clientProxy.startOp(new StartOpMessage(queryId, bopId, partitionId, serviceId, fanIn)); if (log.isDebugEnabled()) - log.debug("Running chunk: queryId=" + queryId + ", bopId=" - + bopId + ", bop=" + bop); + log.debug("Running chunk: " + msg); ft.run(); // run ft.get(); // verify success if (sink != null && sink != queryBuffer && !sink.isEmpty()) { @@ -835,14 +851,20 @@ } // run() } // class ChunkTask - + /** * Return an iterator which will drain the solutions from the query. The * query will be cancelled if the iterator is * {@link ICloseableIterator#close() closed}. + * + * @throws UnsupportedOperationException + * if this is not the query controller. */ public IAsynchronousIterator<IBindingSet[]> iterator() { + if(!controller) + throw new UnsupportedOperationException(); + return queryBuffer.iterator(); } @@ -872,11 +894,35 @@ * <li>must not cause the solutions to be discarded before the client can * consume them.</li> * </ul> + * + * FIXME SCALEOUT: Each query engine peer touched by the running query (or + * known to have an operator task running at the time that the query was + * halted) must be notified that the query has been terminated and the + * receiving query engines must interrupt any running tasks which they have + * locally for that query. + * <p> + * Since this involves RMI to the nodes, we should not issue those RMIs + * while holding the {@link #runStateLock} (and this could even deadlock + * with callback from those nodes). Perhaps + * {@link RunState#haltOp(HaltOpMessage)} should throw back the + * {@link HaltOpMessage} or a {@link TimeoutException} if the deadline has + * expired and then let {@link RunningQuery#haltOp(HaltOpMessage)} handle + * the termination of the query, which it can do without holding the lock. + * <p> + * When the controller sends a node a terminate signal for an operator, it + * should not bother to RMI back to the controller (unless this is done for + * the purposes of confirmation, which is available from the RMI return in + * any case). + * + * FIXME SCALEOUT: Life cycle methods for operators must have hooks for the + * operator implementations which are evaluated on the query controller + * (here) but also on the nodes on which the query will run (for hash + * partitioned operators). */ final public boolean cancel(final boolean mayInterruptIfRunning) { // halt the query. boolean cancelled = future.cancel(mayInterruptIfRunning); - // cancel any running operators for this query. + // cancel any running operators for this query on this node. for (Future<?> f : operatorFutures.values()) { if (f.cancel(mayInterruptIfRunning)) cancelled = true; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -359,18 +359,18 @@ + getServiceUUID() + ", msg=" + msg); } - // request from the query controller. + // request from the query controller (RMI). final BindingSetPipelineOp query = msg.getQueryController() .getQuery(msg.getQueryId()); q = newRunningQuery(FederatedQueryEngine.this, queryId, - isController, msg.getQueryController(), query); + false/* controller */, msg.getQueryController(), query); final RunningQuery tmp = runningQueries.putIfAbsent(queryId, q); if(tmp != null) { - // another thread won this race. + // another thread won this race : @todo memoize, RMI is too expensive. q = (FederatedRunningQuery) tmp; } @@ -424,8 +424,8 @@ final boolean controller, final IQueryClient clientProxy, final BindingSetPipelineOp query) { - return new FederatedRunningQuery(this, queryId, true/* controller */, - this/* clientProxy */, query); + return new FederatedRunningQuery(this, queryId, controller, + clientProxy, query); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -42,13 +42,14 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; import com.bigdata.bop.IShardwisePipelineOp; -import com.bigdata.bop.engine.LocalChunkMessage; import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryPeer; +import com.bigdata.bop.engine.LocalChunkMessage; import com.bigdata.bop.engine.RunningQuery; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; +import com.bigdata.journal.TemporaryStoreFactory; import com.bigdata.mdi.PartitionLocator; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.relation.accesspath.IAsynchronousIterator; @@ -89,14 +90,24 @@ * this query. */ private final UUID queryControllerUUID; - + /** * A map associating resources with running queries. When a query halts, the * resources listed in its resource map are released. Resources can include * {@link ByteBuffer}s backing either incoming or outgoing - * {@link LocalChunkMessage}s, temporary files associated with the query, hash - * tables, etc. + * {@link LocalChunkMessage}s, temporary files associated with the query, + * hash tables, etc. * + * @todo The {@link IAllocationContext} allows us to automatically release + * native {@link ByteBuffer}s used by the query. Such buffers do not + * need to be part of this map. This means that the only real use for + * the map will be temporary persistent resources, such as graphs or + * hash tables backed by a local file or the intermediate outputs of a + * sort operator. We may be able to manage the local persistent data + * structures using the {@link TemporaryStoreFactory} and manage the + * life cycle of the intermediate results for sort within its operator + * implementation. + * * @todo This map will eventually need to be moved into {@link RunningQuery} * in order to support temporary graphs or other disk-backed resources * associated with the evaluation of a query against a standalone @@ -111,8 +122,9 @@ * * @todo Only use the values in the map for transient objects, such as a * hash table which is not backed by the disk. For {@link ByteBuffer}s - * we want to make the references go through the {@link ResourceService} - * . For files, through the {@link ResourceManager}. + * we want to make the references go through the + * {@link ResourceService} . For files, through the + * {@link ResourceManager}. * * @todo We need to track the resources in use by the query so they can be * released when the query terminates. This includes: buffers; joins @@ -292,7 +304,7 @@ if(serviceUUID.equals(getQueryEngine().getServiceUUID())) { - // Return a hard reference to the query engine (NOT a proxy). + // Return a hard reference to this query engine (NOT a proxy). return getQueryEngine(); } else if (serviceUUID.equals(queryControllerUUID)) { @@ -369,6 +381,9 @@ switch (bop.getEvaluationContext()) { case ANY: { + /* + * This operator may be evaluated anywhere. + */ return super.handleOutputChunk(sinkId, sink); } case HASHED: { @@ -490,11 +505,7 @@ sendChunkMessage(queryControllerUUID, sinkId, -1/* partitionId */, allocationContext, sink); - /* - * Chunks send to the query controller do not keep the query - * running. - */ - return 0; + return 1; } default: @@ -588,7 +599,7 @@ if (source.isEmpty()) throw new RuntimeException(); - // The peer to be notified. + // The peer to whom we send the message. final IQueryPeer peerProxy = getQueryPeer(serviceUUID); if (peerProxy == null) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/MapBindingSetsOverShardsBuffer.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -395,8 +395,10 @@ slice[j] = bset; if (log.isTraceEnabled()) - log.trace("Mapping: keyOrder=" + keyOrder + ",bset=" + bset - + " onto " + split.pmd.getPartitionId()); + log + .trace("Mapping: keyOrder=" + keyOrder + ",bset=" + + bset + " onto partitionId=" + + split.pmd.getPartitionId()); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -511,7 +511,7 @@ } public boolean isExhausted() { - return hasNext(); + return !hasNext(); } public E[] next(long timeout, TimeUnit unit) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -235,6 +235,7 @@ } + @SuppressWarnings("unchecked") public boolean hasNext() { if (current != null) @@ -293,7 +294,7 @@ } public boolean isExhausted() { - return hasNext(); + return !hasNext(); } public E[] next(long timeout, TimeUnit unit) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -97,7 +97,7 @@ public class PipelineJoin<E> extends BindingSetPipelineOp implements IShardwisePipelineOp<E> { - static private final Logger log = Logger.getLogger(PipelineJoin.class); + static private final transient Logger log = Logger.getLogger(PipelineJoin.class); /** * Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -132,25 +132,25 @@ // // if (!(args[0] instanceof BindingSetPipelineOp)) // throw new IllegalArgumentException(); - + } /** - * @see Annotations#OFFSET + * @see Annotations#OFFSET */ public long getOffset() { - - return (Long) getRequiredProperty(Annotations.OFFSET); - + + return getProperty(Annotations.OFFSET, Annotations.DEFAULT_OFFSET); + } /** - * @see Annotations#LIMIT + * @see Annotations#LIMIT */ public long getLimit() { + + return getProperty(Annotations.LIMIT, Annotations.DEFAULT_LIMIT); - return (Long) getRequiredProperty(Annotations.LIMIT); - } public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-16 10:55:26 UTC (rev 3564) @@ -89,7 +89,8 @@ #log4j.logger.com.bigdata.service=ALL #log4j.logger.com.bigdata.bop=ALL -#log4j.logger.com.bigdata.bop.join.PipelineJoin=ALL +log4j.logger.com.bigdata.bop.join.PipelineJoin=ALL + log4j.logger.com.bigdata.bop.engine=ALL log4j.logger.com.bigdata.bop.engine.QueryEngine=ALL log4j.logger.com.bigdata.bop.engine.RunningQuery=ALL @@ -98,6 +99,7 @@ log4j.logger.com.bigdata.bop.fed.FederatedQueryEngine=ALL log4j.logger.com.bigdata.bop.fed.FederatedRunningQuery=ALL log4j.logger.com.bigdata.bop.fed.MapBindingSetsOverShardsBuffer=ALL + #log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO #log4j.logger.com.bigdata.relation.rule.eval=INFO #log4j.logger.com.bigdata.relation.rule.eval.RuleState=DEBUG @@ -212,7 +214,7 @@ ## # BOp run state trace (tab delimited file). Uncomment the next line to enable. -#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender log4j.appender.queryRunStateLog.Threshold=ALL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -61,6 +61,7 @@ import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.fed.TestFederatedQueryEngine; import com.bigdata.bop.join.PipelineJoin; +import com.bigdata.bop.solutions.SliceOp; import com.bigdata.journal.BufferMode; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; @@ -139,12 +140,12 @@ final R rel = new R(store, namespace, ITx.UNISOLATED, new Properties()); rel.create(); - // data to insert. + // data to insert (in key order for convenience). final E[] a = {// - new E("John", "Mary"),// - new E("Mary", "Paul"),// - new E("Paul", "Leon"),// - new E("Leon", "Paul"),// + new E("John", "Mary"),// [0] + new E("Leon", "Paul"),// [1] + new E("Mary", "Paul"),// [2] + new E("Paul", "Leon"),// [3] }; // insert data (the records are not pre-sorted). @@ -270,7 +271,7 @@ final int predId = 3; final BindingSetPipelineOp query = new PipelineJoin<E>( // left - new CopyBindingSetOp(new BOp[] {}, NV.asMap(new NV[] {// + new StartOp(new BOp[] {}, NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, startId),// })), // right @@ -373,23 +374,141 @@ } /** - * @todo Test ability to impose a limit/offset slice on a query. - * <p> - * Note: While the logic for visiting only the solutions selected by - * the slice can be tested against a mock object, the integration by - * which a slice halts a query when it is satisfied has to be tested - * against a {@link QueryEngine}. - * <p> - * This must also be tested in scale-out to make sure that the data - * backing the solutions is not discarded before the caller can use - * those data. [This could be handled by materializing binding set - * objects out of a {@link ByteBuffer} rather than using a live decode - * of the data in that {@link ByteBuffer}.] + * Run a join with a slice. The slice is always evaluated on the query + * controller so adding it to the query plan touches a slightly different + * code path from adding another join (joins are evaluated shardwise, at + * least in scale-out). + * <p> + * Note: While the logic for visiting only the solutions selected by the + * slice can be tested against a mock object, the integration by which a + * slice halts a query when it is satisfied has to be tested against a + * {@link QueryEngine}. + * <p> + * This must also be tested in scale-out to make sure that the data backing + * the solutions is not discarded before the caller can use those data. + * [This could be handled by materializing binding set objects out of a + * {@link ByteBuffer} rather than using a live decode of the data in that + * {@link ByteBuffer}.] */ - public void test_query_slice() { + public void test_query_slice() throws Exception { - fail("write test"); + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + final int startId = 1; + final int joinId = 2; + final int predId = 3; + final int sliceId = 4; + + final StartOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + })); + + final Predicate<E> predOp = new Predicate<E>(new IVariableOrConstant[] { + x, y }, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }),// + new NV(Predicate.Annotations.PARTITION_ID, Integer + .valueOf(-1)),// + new NV(Predicate.Annotations.OPTIONAL, Boolean.FALSE),// + new NV(Predicate.Annotations.CONSTRAINT, null),// + new NV(Predicate.Annotations.EXPANDER, null),// + new NV(Predicate.Annotations.BOP_ID, predId),// + new NV(Predicate.Annotations.TIMESTAMP, + ITx.READ_COMMITTED),// + })); + + final PipelineJoin<E> joinOp = new PipelineJoin<E>(startOp/* left */, + predOp/* right */, + // join annotations + NV.asMap(new NV[] { // + new NV(Predicate.Annotations.BOP_ID, joinId),// + })// + ); + + final BindingSetPipelineOp query = new SliceOp(new BOp[] { joinOp }, + // slice annotations + NV.asMap(new NV[] { // + new NV(BOp.Annotations.BOP_ID, sliceId),// + new NV(SliceOp.Annotations.OFFSET, 0L),// + new NV(SliceOp.Annotations.LIMIT, 2L),// + })// + ); + + // the expected solutions. + final IBindingSet[] expected = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("John"), + new Constant<String>("Mary") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Leon"), + new Constant<String>("Paul") }// + ) }; + + final UUID queryId = UUID.randomUUID(); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1 /* partitionId */, + newBindingSetIterator(new HashBindingSet()))); + + // verify solutions. + assertSameSolutions(expected, runningQuery.iterator()); + + // Wait until the query is done. + final Map<Integer, BOpStats> statsMap = runningQuery.get(); + { + // validate the stats map. + assertNotNull(statsMap); + assertEquals(3, statsMap.size()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); + } + + // validate the stats for the start operator. + { + final BOpStats stats = statsMap.get(startId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("start: " + stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(1L, stats.unitsIn.get()); + assertEquals(1L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + // validate the stats for the join operator. + { + final BOpStats stats = statsMap.get(joinId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("join : " + stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(1L, stats.unitsIn.get()); + assertEquals(4L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + + // validate the stats for the slice operator. + { + final BOpStats stats = statsMap.get(sliceId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("slice: " + stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(4L, stats.unitsIn.get()); + assertEquals(2L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + } /** @@ -788,7 +907,7 @@ if (!actual.hasNext()) { fail(msg - + ": Index exhausted while expecting more object(s)" + + ": Iterator exhausted while expecting more object(s)" + ": index=" + j); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -509,6 +509,20 @@ assertEquals(1L, stats.chunksOut.get()); // @todo this depends on which index partitions we read on. } + // validate the stats for the slice operator. + { + final BOpStats stats = statsMap.get(sliceId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("slice: "+stats.toString()); + + // verify query solution stats details. + assertEquals(1L, stats.chunksIn.get()); + assertEquals(2L, stats.unitsIn.get()); + assertEquals(2L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + } + } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -70,6 +70,52 @@ } /** + * Unit test for a message with a single chunk containing a single empty + * binding set. + */ + public void test_oneChunkWithEmptyBindingSet() { + + final List<IBindingSet> data = new LinkedList<IBindingSet>(); + { + data.add(new HashBindingSet()); + } + + final IQueryClient queryController = new MockQueryController(); + final UUID queryId = UUID.randomUUID(); + final int bopId = 1; + final int partitionId = 2; + final IBlockingBuffer<IBindingSet[]> source = new BlockingBuffer<IBindingSet[]>( + 10); + + // populate the source. + source.add(data.toArray(new IBindingSet[0])); + + // close the source. + source.close(); + + // build the chunk. + final IChunkMessage<IBindingSet> msg = new ThickChunkMessage<IBindingSet>( + queryController, queryId, bopId, partitionId, source); + + assertTrue(queryController == msg.getQueryController()); + + assertEquals(queryId, msg.getQueryId()); + + assertEquals(bopId, msg.getBOpId()); + + assertEquals(partitionId, msg.getPartitionId()); + + // the data is inline with the message. + assertTrue(msg.isMaterialized()); + + // verify the iterator. + assertSameIterator(data.toArray(new IBindingSet[0]), + new Dechunkerator<IBindingSet>(msg.getChunkAccessor() + .iterator())); + + } + + /** * Unit test for a message with a single chunk of binding sets. */ public void test_oneChunk() { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java 2010-09-15 23:01:17 UTC (rev 3563) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java 2010-09-16 10:55:26 UTC (rev 3564) @@ -178,6 +178,10 @@ new NV(SliceOp.Annotations.LIMIT, 3L),// })); + assertEquals("offset", 1L, query.getOffset()); + + assertEquals("limit", 3L, query.getLimit()); + // the expected solutions final IBindingSet[] expected = new IBindingSet[] {// new ArrayBindingSet(// This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-15 23:01:28
|
Revision: 3563 http://bigdata.svn.sourceforge.net/bigdata/?rev=3563&view=rev Author: sgossard Date: 2010-09-15 23:01:17 +0000 (Wed, 15 Sep 2010) Log Message: ----------- [maven_scaleout] : Breaking all direct dependency cycles with package 'com.bigdata.io', mainly via moving BytesUtil into io package. Most files touched are just an import change. Package still has major transitive cycles, again via package 'com.bigdata.counters'. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/bfs/AtomicBlockAppendProc.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTreeTupleCursor.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractChunkedTupleIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractNode.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BigdataMap.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/FixedLengthPrefixSplits.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IRangeQuery.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyAfterPartitionException.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyBeforePartitionException.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Leaf.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Node.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/ResultSet.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/data/DefaultLeafCoder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/Advancer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/PrefixFilter.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/TupleFilter.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/IKeyBuilder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KVO.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KeyBuilder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/proc/AbstractKeyRangeIndexProcedure.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/proc/BatchRemove.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/AbstractRaba.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/MutableKeyBuffer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/ReadOnlyKeysRaba.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/ReadOnlyValuesRaba.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/codec/CanonicalHuffmanRabaCoder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/codec/FixedLengthValueRabaCoder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/raba/codec/SimpleRabaCoder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedTupleCursor.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedTupleIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/AbstractFixedByteArrayBuffer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/ByteArrayBuffer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/FileLockUtility.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/FixedByteArrayBuffer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/DumpJournal.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/mdi/LocalPartitionMetadata.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/mdi/PartitionLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/EmbeddedShardLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/lexicon/LexiconRelation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/lexicon/Term2IdWriteProc.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/load/AssignedSplits.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/spo/FastRDFValueCoder2.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/spo/SPOIndexWriteProc.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/store/AbstractTripleStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/relation/accesspath/AbstractAccessPath.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/relation/rule/eval/pipeline/JoinTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/JoinIndexPartitionTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/SplitUtility.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/search/FullTextIndex.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/search/ReadIndexTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractScaleOutFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ListIndicesTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/MetadataService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/benchmark/ThroughputMaster.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/DumpFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/AbstractSplitter.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/PartitionedTupleIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/RawDataServiceTupleIterator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/pipeline/DefaultDuplicateRemover.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/sparse/AbstractAtomicRowReadOrWrite.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/sparse/AtomicRowFilter.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/sparse/KeyDecoder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/sparse/LogicalRowSplitHandler.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/striterator/DistinctFilter.java branches/maven_scaleout/bigdata-core/src/main/native/com/bigdata/io/BytesUtil.c branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/btree/raba/codec/AbstractRabaCoderTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/AbstractBTreeTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestAll.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestChunkedIterators.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestConstrainKeys.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentMultiBlockIterators.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentWithBloomFilter.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestInsertLookupRemoveKeysInRootLeaf.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIterators.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/data/AbstractNodeOrLeafDataRecordTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/filter/TestPrefixFilter.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/isolation/TestIsolatedFusedView.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/keys/AbstractUnicodeKeyBuilderTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/keys/TestICUUnicodeKeyBuilder.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/keys/TestKeyBuilder.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/TestKeyBufferSearch.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/codec/AbstractRabaCoderTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/codec/RandomKeysGenerator.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/codec/RandomURIGenerator.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/codec/TestCanonicalHuffmanRabaCoder.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/raba/codec/TokenizeKeysGenerator.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/view/TestFusedView.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/counters/TestHistoryInstrument.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestByteArrayBuffer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestDataOutputBuffer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestFileChannelUtility.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestFixedByteArrayBuffer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestUnisolatedWriteTasks.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/lexicon/TestComparators.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/lexicon/TestId2TermTupleSerializer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/lexicon/TestSerialization.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/lexicon/TestTerm2IdTupleSerializer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/spo/TestSPOKeyOrder.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/spo/TestSPOTupleSerializer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/store/AbstractTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/store/TestInsertRate.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/TestSegSplitter.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/search/TestKeyBuilder.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/AbstractEmbeddedFederationTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/TestRangeQuery.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/io/BytesUtil.java branches/maven_scaleout/bigdata-core/src/main/native/com/bigdata/io/ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestBytesUtil.java Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BytesUtil.java branches/maven_scaleout/bigdata-core/src/main/native/com/bigdata/btree/ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestBytesUtil.java Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -35,7 +35,7 @@ import org.apache.log4j.Logger; -import com.bigdata.btree.BytesUtil; +import com.bigdata.io.BytesUtil; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.IndexSegmentBuilder; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/bfs/AtomicBlockAppendProc.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/bfs/AtomicBlockAppendProc.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/bfs/AtomicBlockAppendProc.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -12,7 +12,7 @@ import com.bigdata.btree.AbstractBTree; import com.bigdata.btree.BTree; -import com.bigdata.btree.BytesUtil; +import com.bigdata.io.BytesUtil; import com.bigdata.btree.IIndex; import com.bigdata.btree.ILinearList; import com.bigdata.btree.IRangeQuery; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -41,6 +41,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTreeTupleCursor.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTreeTupleCursor.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTreeTupleCursor.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -30,6 +30,7 @@ import java.util.NoSuchElementException; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Logger; import com.bigdata.btree.Leaf.ILeafListener; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractChunkedTupleIterator.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractChunkedTupleIterator.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractChunkedTupleIterator.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Logger; import com.bigdata.btree.filter.IFilterConstructor; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractNode.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractNode.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractNode.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -39,6 +39,7 @@ import com.bigdata.btree.raba.IRaba; import com.bigdata.btree.raba.MutableKeyBuffer; import com.bigdata.cache.HardReferenceQueue; +import com.bigdata.io.BytesUtil; import cutthecrap.utils.striterators.Expander; import cutthecrap.utils.striterators.IStriterator; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BigdataMap.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BigdataMap.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BigdataMap.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -40,6 +40,7 @@ import com.bigdata.btree.filter.FilterConstructor; import com.bigdata.btree.filter.TupleFilter; import com.bigdata.btree.keys.KeyBuilder; +import com.bigdata.io.BytesUtil; import com.bigdata.journal.ConcurrencyManager; /** Deleted: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BytesUtil.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BytesUtil.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/BytesUtil.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -1,956 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.btree; - -import it.unimi.dsi.fastutil.bytes.custom.CustomByteArrayFrontCodedList; -import it.unimi.dsi.io.InputBitStream; -import it.unimi.dsi.io.OutputBitStream; - -import java.util.Comparator; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.log4j.Logger; - -import com.bigdata.rawstore.Bytes; - -/** - * Class supporting operations on variable length byte[] keys. - * <p> - * Comparison operations that accept a starting offset are used when the byte[]s - * are known to share a leading prefix that may be skipped during comparison. - * <p> - * Comparison operations that accept a starting offset and length are used when - * immutable keys are stored in a single byte[] and an index into starting - * positions in that byte[] is maintained. - * <p> - * JNI methods are provided for unsigned byte[] comparison. However, note that - * the JNI methods do not appear to be as fast as the pure Java methods - - * presumably because of the overhead of going from Java to C. In order to - * execute using the JNI methods you MUST define the optional boolean system - * property, e.g., - * - * <pre> - * java -Dcom.bigdata.btree.BytesUtil.jni=true ... - * </pre> - * - * See BytesUtil.c in this package for instructions on compiling the JNI - * methods. - * </p> - * See {@link #main(String[])} which provides a test for the JNI integration and - * some pointers on how to get this running on your platform. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ -public class BytesUtil { - - protected static final transient Logger log = Logger.getLogger(BytesUtil.class); - - /** - * An empty <code>byte[]</code>. - */ - public static final byte[] EMPTY = new byte[0]; - - /** - * An empty <code>byte[][]</code>. - */ - public static final byte[][] EMPTY2 = new byte[0][]; - - /** - * Flag set iff JNI linking succeeds. When this flag is false we run with - * the pure Java implementations of these methods. When the flag is true, - * the JNI versions are used. - */ - static boolean linked = false; - - /** - * JNI routines are not invoked unless we will compare byte[]s with at least - * this many potential bytes to compare (the actual# may be much less of - * course since comparisons may fail fast). - */ - static public final int minlen = 100; - - static private native int _compareBytes(int alen, byte[] a, int blen, byte[] b); - static private native int _compareBytesWithOffsetAndLen(int aoff, int alen, byte[] a, int boff, int blen, byte[] b); - - static { - - final boolean jni; - - String val = System.getProperty("com.bigdata.btree.BytesUtil.jni"); - - if (val != null) { - - jni = Boolean.parseBoolean(val); - - } else { - - jni = false; // Note: We will not even try to use JNI by default! - - } - - if (jni) { - - /* - * Attempt to load the JNI library. - */ - - loadJNILibrary(); - - } - - } - - /** - * Attempt to load the JNI library. - * <p> - * Note: this is done automatically if the optional boolean system property - * <code>com.bigdata.btree.BytesUtil.jni=true</code> is specified, e.g., - * using - * - * <pre> - * java -Dcom.bigdata.btree.BytesUtil.jni=true ... - * </pre> - * - * @return True iff the JNI library was successfully linked. - */ - public static boolean loadJNILibrary() { - - if (!linked) { - - try { - - System.loadLibrary("BytesUtil"); - - if (log.isInfoEnabled()) - log.info("BytesUtil JNI linked"); - - linked = true; - - } catch (UnsatisfiedLinkError ex) { - - log.warn("BytesUtil JNI NOT linked: " + ex); - - linked = false; - - } - } - - return linked; - - } - - /** - * True iff the two arrays compare as equal. This is somewhat optimized in - * that it tests the array lengths first, assumes that it is being used on - * sorted data and therefore compares the last bytes first, and does not - * convert the bytes to unsigned integers before testing for equality. - * - * @param a - * A byte[]. - * @param b - * Another byte[]. - * - * @return If the two arrays have the same reference (including - * <code>null</code>) or if they have the same data. - */ - final public static boolean bytesEqual(final byte[] a, final byte[] b) { - - if (a == b) - return true; - - final int alen = a.length; - - final int blen = b.length; - - if (alen != blen) - return false; - - int i = alen - 1; - - while (i >= 0) { - - if (a[i] != b[i]) - return false; - - i--; - - } - -// for (int i = 0; i < alen; i++) { -// -// if( a[i] != b[i] ) return false; -// -// } - - return true; - - } - - /** - * Byte-wise comparison of byte[]s (the arrays are treated as arrays of - * unsigned bytes). - * - * @param a - * A byte[]. - * - * @param b - * A byte[]. - * - * @return a negative integer, zero, or a positive integer if the first - * argument is less than, equal to, or greater than the second. - * - * @todo Return the index of the byte at which the difference with the sign - * adjusted to indicate the relative order of the data rather than the - * difference of the bytes at that index. The index would be negative - * or positive depending on which way the comparison went. See - * {@link CustomByteArrayFrontCodedList} for an implementation - * guideline. - * <p> - * Change all implementations in this class and also BytesUtil.c, - * which needs to be recompiled for Windows. Also makes sure that it - * gets compiled and linked for Un*x. That should be tested from the - * ant installer and the result reported. Do the same for ICU4JNI. - */ - final public static int compareBytes(final byte[] a, final byte[] b) { - if(a==b) return 0; - final int alen = a.length; - final int blen = b.length; - if (linked && alen > minlen && blen > minlen) { - /* - * JNI implementation. - * - * @todo test for trade off when max(len) is short. unroll loop for - * small N. - */ - return _compareBytes(alen,a, blen,b); - } - for (int i = 0; i < alen && i < blen; i++) { - // promotes to signed integers in [0:255] for comparison. - final int ret = (a[i] & 0xff) - (b[i] & 0xff); - // int ret = a[i] - b[i]; - if (ret != 0) - return ret; - } - return alen - blen; - } - -// /** -// * Byte-wise comparison of a {@link ByteBuffer} and a byte[]. The data are -// * treated as arrays of unsigned bytes. The {@link ByteBuffer} position, -// * limit and mark are unchanged by this procedure. -// * -// * @param a -// * A {@link ByteBuffer}. -// * @param aoff -// * The offset of the starting byte in the buffer. -// * @param blen -// * The number of bytes to be compared. -// * @param b -// * A byte[]. -// * -// * @return a negative integer, zero, or a positive integer if the first -// * argument is less than, equal to, or greater than the second. -// */ -// final public static int compareBytes(final ByteBuffer a, final int aoff, -// final int alen, final byte[] b) { -// final int blen = b.length; -// for (int i = 0; i < alen && i < blen; i++) { -// // promotes to signed integers in [0:255] for comparison. -// final int ret = (a.get(aoff + i) & 0xff) - (b[i] & 0xff); -// if (ret != 0) -// return ret; -// } -// return alen - blen; -// } - -// /** -// * Byte-wise comparison of byte[]s (the arrays are treated as arrays of -// * unsigned bytes). -// * -// * @param aoff -// * The offset into <i>a</i> at which the comparison will -// * begin. -// * @param a -// * A byte[]. -// * @param boff -// * The offset into <i>b</i> at which the comparison will -// * begin. -// * @param b -// * A byte[]. -// * -// * @return a negative integer, zero, or a positive integer as the first -// * argument is less than, equal to, or greater than the second. -// */ -// final public static int compareBytes(int aoff, final byte[] a, int boff, -// final byte[] b) { -// final int alen = a.length; -// final int blen = b.length; -// for (int i = aoff, j = boff; i < alen && j < blen; i++, j++) { -// // promotes to signed integers in [0:255] for comaprison. -// int ret = (a[i] & 0xff) - (b[j] & 0xff); -// // int ret = a[i] - b[j]; -// if (ret != 0) -// return ret; -// } -// return (alen - aoff) - (blen - boff); -// } - - /** - * Byte-wise comparison of byte[]s (the arrays are treated as arrays of - * unsigned bytes). - * - * @param aoff - * The offset into <i>a</i> at which the comparison will - * begin. - * @param alen - * The #of bytes in <i>a</i> to consider starting at <i>aoff</i>. - * @param a - * A byte[]. - * @param boff - * The offset into <i>b</i> at which the comparison will - * begin. - * @param blen - * The #of bytes in <i>b</i> to consider starting at <i>boff</i>. - * @param b - * A byte[]. - * - * @return a negative integer, zero, or a positive integer as the first - * argument is less than, equal to, or greater than the second. - */ - final public static int compareBytesWithLenAndOffset(// - int aoff, int alen, final byte[] a,// - int boff, int blen, final byte[] b// - ) { - - if (linked && alen > minlen && blen > minlen) { - - // JNI implementation. - return _compareBytesWithOffsetAndLen(aoff, alen, a, boff, blen, b); - - } - - // last index to consider in a[]. - final int alimit = aoff + alen; - - // last index to consider in b[]. - final int blimit = boff + blen; - - for (int i = aoff, j = boff; i < alimit && j < blimit; i++, j++) { - - // promotes to signed integers in [0:255] for comaprison. - int ret = (a[i] & 0xff) - (b[j] & 0xff); - - if (ret != 0) - return ret; - - } - - return alen - blen; - - } - - /** - * Return the #of leading bytes in common. This is used to compute the - * prefix for a node or leaf, which is formed by the leading bytes in common - * between the first and last key for a node or leaf. - * - * @param a - * A variable length unsigned byte array. - * @param b - * A variable length unsigned byte array. - * - * @return The #of leading bytes in common (aka the index of the first byte - * in which the two arrays differ, although that index could lie - * beyond the end of one of the arrays). - */ - public final static int getPrefixLength(final byte[] a, final byte[] b) { - - final int alen = a.length; - - final int blen = b.length; - - int i; - - for (i = 0; i < alen && i < blen; i++) { - - if (a[i] != b[i]) - break; - - } - - return i; - - } - - /** - * Return a new byte[] containing the leading bytes in common between two - * byte[]s. This is often used to compute the minimum length separator key. - * - * @param a - * A variable length unsigned byte array[]. - * @param b - * A variable length unsigned byte array[]. - * - * @return A new byte[] containing the leading bytes in common between the - * two arrays. - */ - public final static byte[] getPrefix(final byte[] a, final byte[] b) { - - final int len = getPrefixLength(a, b); - - final byte[] prefix = new byte[len]; - - System.arraycopy(a, 0, prefix, 0, len); - - return prefix; - - } - - /** - * Computes the successor of a variable length byte array by appending a - * unsigned zero(0) byte to the end of the array. - * - * @param key - * A variable length unsigned byte array. - * - * @return A new unsigned byte[] that is the successor of the key. - */ - public final static byte[] successor(final byte[] key) { - - final int keylen = key.length; - - final byte[] tmp = new byte[keylen + 1]; - - System.arraycopy(key, 0, tmp, 0, keylen); - - return tmp; - - } - - /** - * <p> - * The keys in the nodes of a btree are known as <i>separator keys</i>. The - * role of the separator keys is to direct search towards the leaf in which - * a key exists or would exist by always searching the first child having a - * separator key that is greater than or equal to the search key. - * </p> - * <p> - * Separator keys separate leaves and must be choosen with that purpose in - * mind. The simplest way to choose the separator key is to just take the - * first key of the leaf - this is always correct. However, shorter - * separator keys may be choosen by defining the separator key as the - * shortest key that is less than or equal to the first key of a leaf and - * greater than the last key of the left sibling of that leaf (that is, the - * key for the entry that immediately proceeds the first entry on the leaf). - * </p> - * <p> - * There are several advantages to always choosing the shortest separator - * key. The original rationale (in "Prefix <i>B</i>-Trees" by Bayer and - * Unterauer) was to increase the branching factors for fixed size pages. - * Since we use variable size serialized record, that is not an issue. - * However, using the shortest separator keys in this implementation - * provides both smaller serialized records for nodes and faster search - * since fewer bytes must be tested. - * </p> - * <p> - * Note that this trick can not be used at higher levels in the btree - - * separator keys are always formed based on the keys in the leaves and then - * propagated through the tree. - * </p> - * <p> - * The rules are simple enough: - * <ol> - * <li>The separator contains all bytes in the shared prefix (if any) plus - * the 1st byte at which the given key differs from the prior key.</li> - * <li>If the separator key would equal the given key by value then return - * the reference to the given key.</li> - * </ol> - * </p> - * - * @param givenKey - * A key. - * - * @param priorKey - * Another key that <em>proceeds</em> the <i>givenKey</i>. - * - * @return The shortest key that is less than or equal to <i>givenKey</i> - * and greater than <i>priorKey</i>. This will be a reference to - * the <i>givenKey</i> iff that is also the shortest separator. - * - * @see http://portal.acm.org/citation.cfm?doid=320521.320530 - * - * @throws IllegalArgumentException - * if either key is <code>null</code>. - * @throws IllegalArgumentException - * if both keys are the same reference. - */ -// * @throws IllegalArgumentException -// * if the keys are equal. -// * @throws IllegalArgumentException -// * if the keys are out of order. - final public static byte[] getSeparatorKey(final byte[] givenKey, - final byte[] priorKey) { - - if (givenKey == null) - throw new IllegalArgumentException(); - - if (priorKey == null) - throw new IllegalArgumentException(); - - if (givenKey == priorKey) - throw new IllegalArgumentException(); - - final int prefixLen = getPrefixLength(givenKey, priorKey); - - if (prefixLen == givenKey.length - 1) { - - /* - * The given key is the shortest separator. Examples would include: - * - * given: 0 1 2 - * prior: 0 1 - * - * or - * - * given: 0 1 2 - * prior: 0 1 1 - * - * or - * - * given: 0 1 2 - * prior: 0 1 1 2 - */ - - return givenKey; - - } - - /* - * The separator includes all bytes in the shared prefix plus the next - * byte from the given key. - */ - - // allocate to right size. - final byte[] tmp = new byte[prefixLen+1]; - - // copy shared prefix plus the following byte. - System.arraycopy(givenKey, 0, tmp, 0, prefixLen+1); - - return tmp; - - } - - /** - * Formats a key as a series of comma delimited unsigned bytes. - * - * @param key - * The key. - * - * @return The string representation of the array as unsigned bytes. - */ - final public static String toString(final byte[] key) { - - if (key == null) - return NULL; - - return toString(key, 0, key.length); - - } - - /** - * Formats a key as a series of comma delimited unsigned bytes. - * - * @param key - * The key. - * @param off - * The index of the first byte that will be visited. - * @param len - * The #of bytes to visit. - * - * @return The string representation of the array as unsigned bytes. - */ - final public static String toString(final byte[] key, final int off, - final int len) { - - if (key == null) - return NULL; - - final StringBuilder sb = new StringBuilder(len * 4 + 2); - - sb.append("["); - - for (int i = off; i < off + len; i++) { - - if (i > 0) - sb.append(", "); - - // as an unsigned integer. -// sb.append(Integer.toHexString(key[i] & 0xff)); - sb.append(Integer.toString(key[i] & 0xff)); - - } - - sb.append("]"); - - return sb.toString(); - - } - - private static transient String NULL = "null"; - - /** - * Formats the data into a {@link String}. - * - * @param data - * An array of unsigned byte arrays. - */ - static public String toString(final byte[][] data) { - - final StringBuilder sb = new StringBuilder(); - - final int n = data.length; - - sb.append("data(n=" + n + ")={"); - - for (int i = 0; i < n; i++) { - - final byte[] a = data[i]; - - sb.append("\n"); - - sb.append("data[" + i + "]="); - - sb.append(BytesUtil.toString(a)); - - if (i + 1 < n) - sb.append(","); - - } - - sb.append("}"); - - return sb.toString(); - - } - - /** - * Binary search on an array whose members are variable length unsigned - * byte[]s. - * - * @param keys - * The buffer. - * @param base - * The offset of the base of the array within the buffer. - * @param nmem - * The #of members in the array. When [nmem == 0], the array is - * empty. - * @param key - * The key for the search. - * - * @return index of the search key, if it is contained in <i>keys</i>; - * otherwise, <code>(-(insertion point) - 1)</code>. The - * insertion point is defined as the point at which the key would be - * inserted into the array of keys. Note that this guarantees that - * the return value will be >= 0 if and only if the key is found. - */ - static final public int binarySearch(final byte[][] keys, final int base, - final int nmem, final byte[] key) { - - int low = 0; - - int high = nmem - 1; - - while (low <= high) { - - final int mid = (low + high) >> 1; - - final int offset = base + mid; - - final byte[] midVal = keys[offset]; - - // compare actual vs probe - final int tmp = BytesUtil.compareBytes(midVal, key); - - if (tmp < 0) { - - // Actual LT probe, restrict lower bound and try again. - low = mid + 1; - - } else if (tmp > 0) { - - // Actual GT probe, restrict upper bound and try again. - high = mid - 1; - - } else { - - // Actual EQ probe. Found : return offset. - - return offset; - - } - - } - - // Not found: return insertion point. - - final int offset = (base + low); - - return -(offset + 1); - - } - - /** - * Compares two unsigned byte[]s. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - public static class UnsignedByteArrayComparator implements Comparator<byte[]> { - - public static transient final Comparator<byte[]> INSTANCE = new UnsignedByteArrayComparator(); - - public int compare(final byte[] o1, final byte[] o2) { - - return BytesUtil.compareBytes(o1, o2); - - } - - } - - /** - * This method forces the load of the JNI library and tries to execute the - * JNI methods. - * <p> - * In order to use the JNI library under Windows, you must specify the JNI - * library location using the PATH environment variable, e.g., - * - * <pre> - * cd bigdata - * set PATH=%PATH%;lib - * java -cp bin com.bigdata.btree.BytesUtil - * </pre> - * - * <p> - * In order to use the JNI library under un*x, you must specify the JNI - * library location - * - * <pre> - * java -Djava.library.path=lib com.bigdata.btree.BytesUtil - * </pre> - * - * @param args - * - * @exception UnsatisfiedLinkError - * if the JNI methods can not be resolved. - * @exception AssertionError - * if the JNI methods do not produce the expected answers. - */ - public static void main(final String[] args) { - - // Force load of the JNI library. - loadJNILibrary(); - - if( 0 != BytesUtil._compareBytes(3, new byte[]{1,2,3}, 3, new byte[]{1,2,3}) ) { - - throw new AssertionError(); - - } - - if( 0 != BytesUtil._compareBytesWithOffsetAndLen(0, 3, new byte[]{1,2,3}, 0, 3, new byte[]{1,2,3}) ) { - - throw new AssertionError(); - - } - - System.out.println("JNI library routines Ok."); - - } - - /** - * Return the #of bytes required to bit code the specified #of bits. - * - * @param nbits - * The #of bit flags. - * - * @return The #of bytes required. This will be zero iff <i>nbits</i> is - * zero. - */ - final public static int bitFlagByteLength(final int nbits) { - - return nbits / 8 + (nbits % 8 == 0 ? 0 : 1); - -// return nbits>>>3; - -// if (nbits == 0) -// return 0; -// -// return ((int) ((nbits / 8) + 1)); - - } - - /** - * Return the index of the byte in which the bit with the given index is - * encoded. - * - * @param bitIndex - * The bit index. - * - * @return The byte index. - */ - final public static int byteIndexForBit(final long bitIndex) { - - return ((int) (bitIndex / 8)); - - } - - /** - * Return the offset within the byte in which the bit is coded of the bit - * (this is just the remainder <code>bitIndex % 8</code>). - * <p> - * Note, the computation of the bit offset is intentionally aligned with - * {@link OutputBitStream} and {@link InputBitStream}. - * - * @param bitIndex - * The bit index into the byte[]. - * - * @return The offset of the bit in the appropriate byte. - */ - final public static int withinByteIndexForBit(final long bitIndex) { - - return 7 - ((int) bitIndex) % 8; - - } - - /** - * Get the value of a bit. - * <p> - * Note, the computation of the bit offset is intentionally aligned with - * {@link OutputBitStream} and {@link InputBitStream}. - * - * @param bitIndex - * The index of the bit. - * - * @return The value of the bit. - */ - final public static boolean getBit(final byte[] buf, final long bitIndex) { - - final int mask = (1 << withinByteIndexForBit(bitIndex)); - - final int off = byteIndexForBit(bitIndex); - - final byte b = buf[off]; - - return (b & mask) != 0; - - } - - /** - * Set the value of a bit - this is NOT thread-safe (contention for the byte - * in the backing buffer can cause lost updates). - * <p> - * Note, the computation of the bit offset is intentionally aligned with - * {@link OutputBitStream} and {@link InputBitStream}. - * - * @param bitIndex - * The index of the bit. - * - * @return The old value of the bit. - */ - final public static boolean setBit(final byte[] buf, final long bitIndex, - final boolean value) { - - final int mask = (1 << withinByteIndexForBit(bitIndex)); - - final int off = byteIndexForBit(bitIndex); - - // current byte at that index. - byte b = buf[off]; - - final boolean oldValue = (b & mask) != 0; - - if (value) - b |= mask; - else - b &= ~mask; - - buf[off] = b; - - return oldValue; - - } - - /** - * Decode a string of the form <code>[0-9]+(k|kb|m|mb|g|gb)?</code>, - * returning the number of bytes. When a suffix indicates kilobytes, - * megabytes, or gigabytes then the returned value is scaled accordingly. - * The suffix is NOT case sensitive. - * - * @param s - * The string value. - * - * @return The byte count. - * - * @throws IllegalArgumentException - * if there is a problem with the argument (<code>null</code>, - * ill-formed, etc). - */ - static public long getByteCount(final String s) { - - if (s == null) - throw new IllegalArgumentException(); - - final Matcher m = PATTERN_BYTE_COUNT.matcher(s); - - if (!m.matches()) - throw new IllegalArgumentException(s); - - // the numeric component. - final String g1 = m.group(1); - - final long c = Long.valueOf(g1); - - // the units (null if not given). - final String g2 = m.group(2); - - final long count; - if (g2 == null) { - count = c; - } else if (g2.equalsIgnoreCase("k") || g2.equalsIgnoreCase("kb")) { - count = c * Bytes.kilobyte; - } else if (g2.equalsIgnoreCase("m") || g2.equalsIgnoreCase("mb")) { - count = c * Bytes.megabyte; - } else if (g2.equalsIgnoreCase("g") || g2.equalsIgnoreCase("gb")) { - count = c * Bytes.gigabyte; - } else { - throw new AssertionError(); - } - return count; - } - - static final private Pattern PATTERN_BYTE_COUNT = Pattern.compile( - "([0-9]+)(k|kb|m|mb|g|gb)?", Pattern.CASE_INSENSITIVE); - -} Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/FixedLengthPrefixSplits.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/FixedLengthPrefixSplits.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/FixedLengthPrefixSplits.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -6,6 +6,7 @@ import java.io.ObjectOutput; import java.io.Serializable; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Logger; import com.bigdata.rawstore.Bytes; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IRangeQuery.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IRangeQuery.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IRangeQuery.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -316,7 +316,7 @@ * @see SuccessorUtil, which may be used to compute the successor of a value * before encoding it as a component of a key. * - * @see BytesUtil#successor(byte[]), which may be used to compute the + * @see com.bigdata.io.BytesUtil#successor(byte[]), which may be used to compute the * successor of an encoded key. * * @see EntryFilter, which may be used to filter the entries visited by the @@ -361,7 +361,7 @@ * @see SuccessorUtil, which may be used to compute the successor of a value * before encoding it as a component of a key. * - * @see BytesUtil#successor(byte[]), which may be used to compute the + * @see com.bigdata.io.BytesUtil#successor(byte[]), which may be used to compute the * successor of an encoded key. * * @see IFilterConstructor, which may be used to construct an iterator stack Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -42,6 +42,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicLong; +import com.bigdata.io.*; import org.apache.log4j.Logger; import com.bigdata.LRUNexus; @@ -53,13 +54,6 @@ import com.bigdata.btree.raba.MutableValueBuffer; import com.bigdata.btree.view.FusedView; import com.bigdata.cache.IGlobalLRU.ILRUCache; -import com.bigdata.io.AbstractFixedByteArrayBuffer; -import com.bigdata.io.ByteArrayBuffer; -import com.bigdata.io.DataInputBuffer; -import com.bigdata.io.FileChannelUtility; -import com.bigdata.io.NOPReopener; -import com.bigdata.io.SerializerUtil; -import com.bigdata.io.WriteCache; import com.bigdata.journal.Journal; import com.bigdata.journal.Name2Addr; import com.bigdata.journal.TemporaryRawStore; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentMultiBlockIterator.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -4,6 +4,7 @@ import java.nio.ByteBuffer; import java.util.NoSuchElementException; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Logger; import com.bigdata.btree.IndexSegment.IndexSegmentTupleCursor; @@ -538,7 +539,7 @@ return super.toString() + // "{file=" + store.getFile() + // ",checkpoint="+store.getCheckpoint()+// - ",fromKey="+BytesUtil.toString(fromKey)+// + ",fromKey="+ BytesUtil.toString(fromKey)+// ",toKey="+BytesUtil.toString(toKey)+// ",firstLeafAddr=" + store.toString(firstLeafAddr) + // ",lastLeafAddr=" + store.toString(lastLeafAddr) + // Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyAfterPartitionException.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyAfterPartitionException.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyAfterPartitionException.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -30,6 +30,7 @@ import java.io.File; +import com.bigdata.io.BytesUtil; import com.bigdata.mdi.LocalPartitionMetadata; /** Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyBeforePartitionException.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyBeforePartitionException.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/KeyBeforePartitionException.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -30,6 +30,7 @@ import java.io.File; +import com.bigdata.io.BytesUtil; import com.bigdata.mdi.LocalPartitionMetadata; /** Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Leaf.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Leaf.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Leaf.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -30,6 +30,7 @@ import java.util.Iterator; import java.util.WeakHashMap; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Level; import com.bigdata.btree.data.DefaultLeafCoder; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Node.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Node.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Node.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -32,11 +32,11 @@ import java.util.Iterator; import java.util.Set; import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; import java.util.concurrent.FutureTask; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReferenceArray; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Level; import com.bigdata.BigdataStatics; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/ResultSet.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/ResultSet.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/ResultSet.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -24,6 +24,7 @@ */ package com.bigdata.btree; +import com.bigdata.io.BytesUtil; import it.unimi.dsi.bits.BitVector; import it.unimi.dsi.bits.LongArrayBitVector; import it.unimi.dsi.io.InputBitStream; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/data/DefaultLeafCoder.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/data/DefaultLeafCoder.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/data/DefaultLeafCoder.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -27,6 +27,7 @@ package com.bigdata.btree.data; +import com.bigdata.io.BytesUtil; import it.unimi.dsi.bits.Fast; import it.unimi.dsi.io.InputBitStream; import it.unimi.dsi.io.OutputBitStream; @@ -38,7 +39,6 @@ import org.apache.log4j.Logger; -import com.bigdata.btree.BytesUtil; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.raba.IRaba; import com.bigdata.btree.raba.codec.ICodedRaba; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/Advancer.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/Advancer.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/Advancer.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -2,9 +2,9 @@ import java.util.Iterator; +import com.bigdata.io.BytesUtil; import org.apache.log4j.Logger; -import com.bigdata.btree.BytesUtil; import com.bigdata.btree.IIndex; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleCursor; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/PrefixFilter.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/PrefixFilter.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/PrefixFilter.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -5,7 +5,7 @@ import org.apache.log4j.Logger; -import com.bigdata.btree.BytesUtil; +import com.bigdata.io.BytesUtil; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleCursor; import com.bigdata.btree.ITupleIterator; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/TupleFilter.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/TupleFilter.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/filter/TupleFilter.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -8,7 +8,7 @@ import com.bigdata.btree.AbstractBTree; import com.bigdata.btree.AbstractTuple; import com.bigdata.btree.BTree; -import com.bigdata.btree.BytesUtil; +import com.bigdata.io.BytesUtil; import com.bigdata.btree.IRangeQuery; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleCursor; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/IKeyBuilder.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/IKeyBuilder.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -34,7 +34,6 @@ import java.util.Properties; import java.util.UUID; -import com.bigdata.btree.BytesUtil; import com.bigdata.btree.keys.KeyBuilder.Options; /** @@ -129,7 +128,7 @@ * * @return A new array containing the key. * - * @see BytesUtil#compareBytes(byte[], byte[]) + * @see com.bigdata.io.BytesUtil#compareBytes(byte[], byte[]) */ public byte[] getKey(); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KVO.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KVO.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KVO.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -30,7 +30,7 @@ import java.util.Arrays; -import com.bigdata.btree.BytesUtil; +import com.bigdata.io.BytesUtil; import com.bigdata.service.ndx.pipeline.KVOC; /** Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KeyBuilder.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KeyBuilder.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/keys/KeyBuilder.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -35,7 +35,6 @@ import java.util.Properties; import java.util.UUID; import org.apache.log4j.Logger; -import com.bigdata.btree.BytesUtil; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleSerializer; @@ -53,7 +52,7 @@ * @see SuccessorUtil Compute the successor of a value before encoding it as a * component of a key. * - * @see BytesUtil#successor(byte[]) Compute the successor of an encoded key. + * @see com.bigdata.io.BytesUtil#successor(byte[]) Compute the successor of an encoded key. * * @todo introduce a mark and restore feature for generating multiple keys that * share some leading prefix. in general, this is as easy as resetting the Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/proc/AbstractKeyRangeIndexProcedure.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/proc/AbstractKeyRangeIndexProcedure.java 2010-09-15 21:51:13 UTC (rev 3562) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/proc/AbstractKeyRangeIndexProcedure.java 2010-09-15 23:01:17 UTC (rev 3563) @@ -33,7 +33,7 @@ import ... [truncated message content] |
From: <mrp...@us...> - 2010-09-15 21:51:19
|
Revision: 3562 http://bigdata.svn.sourceforge.net/bigdata/?rev=3562&view=rev Author: mrpersonick Date: 2010-09-15 21:51:13 +0000 (Wed, 15 Sep 2010) Log Message: ----------- adding Sesame to BOp conversion Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-15 21:43:28 UTC (rev 3561) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-15 21:51:13 UTC (rev 3562) @@ -38,6 +38,7 @@ import com.bigdata.bop.ap.E; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.bset.CopyBindingSetOp; +import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.join.PipelineJoin; import com.bigdata.journal.ITx; import com.bigdata.rdf.sail.BigdataSail; @@ -94,12 +95,14 @@ int bopId = 1; - BindingSetPipelineOp left = new CopyBindingSetOp(new BOp[] {}, + final BindingSetPipelineOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, bopId++),// })); Iterator<Predicate> tails = rule.getTail(); + + BindingSetPipelineOp left = startOp; while (tails.hasNext()) { @@ -121,10 +124,74 @@ System.err.println(toString(left)); +// test_query_join2(); + return left; } + public static void test_query_join2() { + + final String namespace = "ns"; + final int startId = 1; + final int joinId1 = 2; + final int predId1 = 3; + final int joinId2 = 4; + final int predId2 = 5; + + final BindingSetPipelineOp startOp = new StartOp(new BOp[] {}, + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + })); + + final Predicate<?> pred1Op = new Predicate<E>(new IVariableOrConstant[] { + Var.var("x"), Var.var("y") }, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }),// + new NV(Predicate.Annotations.PARTITION_ID, + Integer.valueOf(-1)),// + new NV(Predicate.Annotations.OPTIONAL, + Boolean.FALSE),// + new NV(Predicate.Annotations.CONSTRAINT, null),// + new NV(Predicate.Annotations.EXPANDER, null),// + new NV(Predicate.Annotations.BOP_ID, predId1),// + new NV(Predicate.Annotations.TIMESTAMP, ITx.READ_COMMITTED),// + })); + + final Predicate<?> pred2Op = new Predicate<E>(new IVariableOrConstant[] { + Var.var("y"), Var.var("z") }, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }),// + new NV(Predicate.Annotations.PARTITION_ID, + Integer.valueOf(-1)),// + new NV(Predicate.Annotations.OPTIONAL, + Boolean.FALSE),// + new NV(Predicate.Annotations.CONSTRAINT, null),// + new NV(Predicate.Annotations.EXPANDER, null),// + new NV(Predicate.Annotations.BOP_ID, predId2),// + new NV(Predicate.Annotations.TIMESTAMP, ITx.READ_COMMITTED),// + })); + + final BindingSetPipelineOp join1Op = new PipelineJoin<E>(// + startOp, pred1Op,// + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, joinId1),// + })); + + final BindingSetPipelineOp join2Op = new PipelineJoin<E>(// + join1Op, pred2Op,// + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, joinId2),// + })); + + final BindingSetPipelineOp query = join2Op; + + System.err.println(toString(query)); + + } + private static String toString(BOp bop) { StringBuilder sb = new StringBuilder(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2010-09-15 21:43:36
|
Revision: 3561 http://bigdata.svn.sourceforge.net/bigdata/?rev=3561&view=rev Author: mrpersonick Date: 2010-09-15 21:43:28 +0000 (Wed, 15 Sep 2010) Log Message: ----------- adding Sesame to BOp conversion Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Var.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/DefaultEvaluationPlan2.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataTripleSource.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Var.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Var.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/Var.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -2,7 +2,6 @@ import java.io.ObjectStreamException; import java.util.UUID; - import com.bigdata.cache.ConcurrentWeakValueCache; import com.bigdata.relation.rule.Rule; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -365,6 +365,16 @@ } + public Predicate<E> setBOpId(final int bopId) { + + final Predicate<E> tmp = this.clone(); + + tmp.annotations.put(Annotations.BOP_ID, bopId); + + return tmp; + + } + public String toString() { return toString(null/* bindingSet */); @@ -377,6 +387,8 @@ final StringBuilder sb = new StringBuilder(); + sb.append(getClass().getName()); + sb.append("("); for (int i = 0; i < arity; i++) { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -27,7 +27,19 @@ package com.bigdata.bop.engine; +import java.util.Iterator; +import java.util.List; import com.bigdata.bop.BOp; +import com.bigdata.bop.BindingSetPipelineOp; +import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.NV; +import com.bigdata.bop.Var; +import com.bigdata.bop.ap.E; +import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.bset.CopyBindingSetOp; +import com.bigdata.bop.join.PipelineJoin; +import com.bigdata.journal.ITx; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.relation.rule.IProgram; import com.bigdata.relation.rule.IRule; @@ -60,10 +72,15 @@ * * @return */ - public static BOp convert(final IStep step) { + public static BindingSetPipelineOp convert(final IStep step) { + if (step instanceof Rule) + return convert((Rule) step); + else if (step instanceof Program) + return convert((Program) step); + throw new UnsupportedOperationException(); - + } /** @@ -73,12 +90,71 @@ * * @return */ - public static BOp convert(final Rule rule) { + public static BindingSetPipelineOp convert(final Rule rule) { - throw new UnsupportedOperationException(); + int bopId = 1; + + BindingSetPipelineOp left = new CopyBindingSetOp(new BOp[] {}, + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, bopId++),// + })); + + Iterator<Predicate> tails = rule.getTail(); + + while (tails.hasNext()) { + + final int joinId = bopId++; + + final Predicate<?> pred = tails.next().setBOpId(bopId++); + + System.err.println(pred); + + final BindingSetPipelineOp joinOp = new PipelineJoin<E>(// + left, pred,// + NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, joinId),// + })); + + left = joinOp; + + } + + System.err.println(toString(left)); + + return left; + + } + + private static String toString(BOp bop) { + + StringBuilder sb = new StringBuilder(); + + toString(bop, sb, 0); + + // chop off the last \n + sb.setLength(sb.length()-1); + + return sb.toString(); + + } + + private static void toString(final BOp bop, final StringBuilder sb, + final int indent) { + + for (int i = 0; i < indent; i++) { + sb.append(' '); + } + sb.append(bop).append('\n'); + if (bop != null) { + List<BOp> args = bop.args(); + for (BOp arg : args) { + toString(arg, sb, indent+4); + } + } + } - + /** * Convert a program into an operator tree. * @@ -86,7 +162,7 @@ * * @return */ - public static BOp convert(final Program program) { + public static BindingSetPipelineOp convert(final Program program) { throw new UnsupportedOperationException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/DefaultEvaluationPlan2.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/DefaultEvaluationPlan2.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/rule/eval/DefaultEvaluationPlan2.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -31,9 +31,7 @@ import java.util.Arrays; import java.util.HashSet; import java.util.Set; - import org.apache.log4j.Logger; - import com.bigdata.bop.IPredicate; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.journal.ITx; @@ -64,7 +62,7 @@ * @todo not serializable but used by {@link #rangeCount(int)}, which is a * problem. */ - private final IJoinNexus joinNexus; + private final IRangeCountFactory rangeCountFactory; private final IRule rule; @@ -145,15 +143,31 @@ * @param rule * The rule. */ - public DefaultEvaluationPlan2(IJoinNexus joinNexus, IRule rule) { + public DefaultEvaluationPlan2(final IJoinNexus joinNexus, + final IRule rule) { - if (joinNexus == null) + this(joinNexus.getRangeCountFactory(), rule); + + } + + /** + * Computes an evaluation plan for the rule. + * + * @param rangeCountFactory + * The range count factory. + * @param rule + * The rule. + */ + public DefaultEvaluationPlan2(final IRangeCountFactory rangeCountFactory, + final IRule rule) { + + if (rangeCountFactory == null) throw new IllegalArgumentException(); if (rule == null) throw new IllegalArgumentException(); - this.joinNexus = joinNexus; + this.rangeCountFactory = rangeCountFactory; this.rule = rule; @@ -439,7 +453,7 @@ } - final long rangeCount = joinNexus.getRangeCountFactory() + final long rangeCount = rangeCountFactory .rangeCount(rule.getTail(tailIndex)); this.rangeCount[tailIndex] = rangeCount; Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -0,0 +1,309 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Oct 24, 2007 + */ + +package com.bigdata.striterator; + +import java.util.Arrays; +import java.util.NoSuchElementException; + +/** + * Fully buffered iterator. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: ChunkedArrayIterator.java 2265 2009-10-26 12:51:06Z thompsonbry $ + */ +public class ChunkedArraysIterator<E> implements IChunkedOrderedIterator<E> { + + private boolean open = true; + + /** buffer iterator. */ + private ICloseableIterator<E[]> bufferIt; + + /** current buffer. */ + private E[] buffer; + + /** The order of the elements in the buffer or <code>null</code> iff not known. */ + private final IKeyOrder<E> keyOrder; + + /** + * The index of the next entry in {@link #buffer} that will be returned by + * {@link #next()}. + */ + private int i = 0; + +// /** +// * The element most recently returned by {@link #next()}. +// */ +// private E current = null; + +// /** +// * The #of elements that this iterator buffered. +// */ +// public int getBufferCount() { +// +// return bufferCount; +// +// } + + /** + * An iterator that visits the elements in the given iterator of arrays. + * + * @param a + * The iterator of arrays of elements. + */ + public ChunkedArraysIterator(final ICloseableIterator<E[]> a) { + + this(a, null); + + } + + /** + * An iterator that visits the elements in the given iterator of arrays. + * + * @param a + * The iterator of arrays of elements. + * @param keyOrder + * The order of the elements in the buffer or <code>null</code> + * iff not known. + */ + public ChunkedArraysIterator(final ICloseableIterator<E[]> a, + final IKeyOrder<E> keyOrder) { + + if (a == null) + throw new IllegalArgumentException(); + + this.bufferIt = a; + + this.keyOrder = keyOrder; + + } + + public boolean hasNext() { + + if(!open) return false; + + if (buffer == null) { + + return bufferIt.hasNext(); + + } +// else { +// +// assert i <= buffer.length; +// +// if (i == buffer.length) { +// +// return false; +// +// } +// +// } + + return true; + + } + + public E next() { + + if (!hasNext()) { + + throw new NoSuchElementException(); + + } + + if (buffer == null) { + + buffer = bufferIt.next(); + + } + + E e = buffer[i++]; + + if (i == buffer.length) { + + buffer = null; + + i = 0; + + } + + return e; + +// current = buffer[i++]; +// +// return current; + + } + + /** + * @throws UnsupportedOperationException + */ + public void remove() { + + throw new UnsupportedOperationException(); + + } + +// /** +// * Return the backing array. +// * +// * @see #getBufferCount() +// */ +// public E[] array() { +// +// assertOpen(); +// +// return buffer; +// +// } + + /** + * Returns the remaining statements. + * + * @throws NoSuchElementException + * if {@link #hasNext()} returns false. + */ + @SuppressWarnings("unchecked") + public E[] nextChunk() { + + if (!hasNext()) { + + throw new NoSuchElementException(); + + } + + final E[] ret; + + if (buffer == null) { + + /* + * We need to fetch the next buffer from the source iterator, and + * then we can just return it directly. + */ + buffer = bufferIt.next(); + + ret = buffer; + + } else if (i == 0) { + + /* + * Nothing has been returned to the caller by next() so we can just + * return the current buffer in this case. + */ + ret = buffer; + + } else { + + /* + * We have a buffer but we've already started return elements from + * it via next(), so we need to create a new buffer to return. + */ + final int remaining = buffer.length - i; + + /* + * Dynamically instantiation an array of the same component type + * as the objects that we are visiting. + */ + + ret = (E[]) java.lang.reflect.Array.newInstance(buffer.getClass() + .getComponentType(), remaining); + + + System.arraycopy(buffer, i, ret, 0, remaining); + + } + + // reset the current buffer + + buffer = null; + + i = 0; + + return ret; + + } + + public IKeyOrder<E> getKeyOrder() { + + return keyOrder; + + } + + public E[] nextChunk(IKeyOrder<E> keyOrder) { + + if (keyOrder == null) + throw new IllegalArgumentException(); + + final E[] chunk = nextChunk(); + + if (!keyOrder.equals(getKeyOrder())) { + + // sort into the required order. + + Arrays.sort(chunk, 0, chunk.length, keyOrder.getComparator()); + + } + + return chunk; + + } + + /* + * Note: Do NOT eagerly close the iterator since the makes it impossible to + * implement {@link #remove()}. + */ + public void close() { + + if (!open) { + + // already closed. + + return; + + } + + bufferIt.close(); + + open = false; + + buffer = null; + + i = 0; + + } + +// private final void assertOpen() { +// +// if (!open) { +// +// throw new IllegalStateException(); +// +// } +// +// } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/internal/IVUtility.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -136,10 +136,8 @@ return iv1.compareTo(iv2); // otherwise we need to try to convert them into comparable numbers - final AbstractLiteralIV num1 = - (AbstractLiteralIV) iv1; - final AbstractLiteralIV num2 = - (AbstractLiteralIV) iv2; + final AbstractLiteralIV num1 = (AbstractLiteralIV) iv1; + final AbstractLiteralIV num2 = (AbstractLiteralIV) iv2; // if one's a BigDecimal we should use the BigDecimal comparator for both if (dte1 == DTE.XSDDecimal || dte2 == DTE.XSDDecimal) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/rules/RDFJoinNexus.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -605,7 +605,7 @@ } - final SPORelation r = (SPORelation) (IMutableRelation<?>) getRelation(); + final SPORelation r = (SPORelation) (IMutableRelation) getRelation(); /* * Use a thread pool to write out the statement and the Added: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/BigdataBindingSetResolverator.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -0,0 +1,222 @@ +package com.bigdata.rdf.store; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import org.openrdf.model.Value; + +import com.bigdata.bop.Constant; +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstant; +import com.bigdata.bop.IVariable; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.model.BigdataValue; +import com.bigdata.relation.accesspath.BlockingBuffer; +import com.bigdata.relation.rule.eval.ISolution; +import com.bigdata.striterator.AbstractChunkedResolverator; +import com.bigdata.striterator.IChunkedOrderedIterator; + +/** + * Efficiently resolve term identifiers in Bigdata {@link ISolution}s to RDF + * {@link BigdataValue}s. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: BigdataSolutionResolverator.java 3448 2010-08-18 20:55:58Z thompsonbry $ + */ +public class BigdataBindingSetResolverator + extends + AbstractChunkedResolverator<IBindingSet, IBindingSet, AbstractTripleStore> { + + /** + * + * @param db + * Used to resolve term identifiers to {@link Value} objects. + * @param src + * The source iterator (will be closed when this iterator is + * closed). + * + * FIXME must accept reverse bnodes map (from term identifier to + * blank nodes) for resolution of blank nodes within a Sesame + * connection context. + */ + public BigdataBindingSetResolverator(final AbstractTripleStore db, + final IChunkedOrderedIterator<IBindingSet> src) { + + super(db, src, new BlockingBuffer<IBindingSet[]>( + db.getChunkOfChunksCapacity(), + db.getChunkCapacity(), + db.getChunkTimeout(), + TimeUnit.MILLISECONDS)); + + } + + /** + * Strengthens the return type. + */ + public BigdataBindingSetResolverator start(ExecutorService service) { + + return (BigdataBindingSetResolverator) super.start(service); + + } + + /** + * Resolve a chunk of {@link ISolution}s into a chunk of + * {@link IBindingSet}s in which term identifiers have been resolved to + * {@link BigdataValue}s. + */ + protected IBindingSet[] resolveChunk(final IBindingSet[] chunk) { + + if (log.isInfoEnabled()) + log.info("Fetched chunk: size=" + chunk.length); + + /* + * Create a collection of the distinct term identifiers used in this + * chunk. + */ + + final Collection<IV> ids = new HashSet<IV>(chunk.length + * state.getSPOKeyArity()); + + for (IBindingSet solution : chunk) { + + final IBindingSet bindingSet = solution; + + assert bindingSet != null; + + final Iterator<Map.Entry<IVariable, IConstant>> itr = bindingSet + .iterator(); + + while (itr.hasNext()) { + + final Map.Entry<IVariable, IConstant> entry = itr.next(); + + final IV iv = (IV) entry.getValue().get(); + + if (iv == null) { + + throw new RuntimeException("NULL? : var=" + entry.getKey() + + ", " + bindingSet); + + } + + ids.add(iv); + + } + + } + + if (log.isInfoEnabled()) + log.info("Resolving " + ids.size() + " term identifiers"); + + // batch resolve term identifiers to terms. + final Map<IV, BigdataValue> terms = state.getLexiconRelation() + .getTerms(ids); + + /* + * Assemble a chunk of resolved elements. + */ + { + + final IBindingSet[] chunk2 = new IBindingSet[chunk.length]; + int i = 0; + for (IBindingSet e : chunk) { + + final IBindingSet f = getBindingSet(e, terms); + + chunk2[i++] = f; + + } + + // return the chunk of resolved elements. + return chunk2; + + } + + } + + /** + * Resolve the term identifiers in the {@link ISolution} using the map + * populated when we fetched the current chunk and return the + * {@link IBindingSet} for that solution in which term identifiers have been + * resolved to their corresponding {@link BigdataValue}s. + * + * @param solution + * A solution whose {@link Long}s will be interpreted as term + * identifiers and resolved to the corresponding + * {@link BigdataValue}s. + * + * @return The corresponding {@link IBindingSet} in which the term + * identifiers have been resolved to {@link BigdataValue}s. + * + * @throws IllegalStateException + * if the {@link IBindingSet} was not materialized with the + * {@link ISolution}. + * + * @todo this points out a problem where we would be better off strongly + * typing the term identifiers with their own class rather than using + * {@link Long} since we can not distinguish a {@link Long} + * materialized by a join against some non-RDF relation from a + * {@link Long} that is a term identifier. + */ + private IBindingSet getBindingSet(final IBindingSet solution, + final Map<IV, BigdataValue> terms) { + + if (solution == null) + throw new IllegalArgumentException(); + + if (terms == null) + throw new IllegalArgumentException(); + + final IBindingSet bindingSet = solution; + + if(bindingSet == null) { + + throw new IllegalStateException("BindingSet was not materialized"); + + } + + final Iterator<Map.Entry<IVariable, IConstant>> itr = bindingSet + .iterator(); + + while (itr.hasNext()) { + + final Map.Entry<IVariable, IConstant> entry = itr.next(); + + final Object boundValue = entry.getValue().get(); + + if (!(boundValue instanceof IV)) { + + continue; + + } + + final IV iv = (IV) boundValue; + + final BigdataValue value = terms.get(iv); + + if (value == null) { + + throw new RuntimeException("Could not resolve termId=" + + iv); + } + + /* + * Replace the binding. + * + * FIXME This probably needs to strip out the BigdataSail#NULL_GRAPH + * since that should not become bound. + */ + bindingSet.set(entry.getKey(), new Constant<BigdataValue>( + value)); + + } + + return bindingSet; + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -13,6 +13,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; import org.openrdf.model.Literal; @@ -48,7 +49,9 @@ import org.openrdf.query.algebra.evaluation.iterator.FilterIterator; import org.openrdf.query.algebra.helpers.QueryModelVisitorBase; import com.bigdata.BigdataStatics; +import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.Constant; +import com.bigdata.bop.HashBindingSet; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; @@ -61,6 +64,10 @@ import com.bigdata.bop.constraint.NE; import com.bigdata.bop.constraint.NEConstant; import com.bigdata.bop.constraint.OR; +import com.bigdata.bop.engine.LocalChunkMessage; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.engine.Rule2BOpUtility; +import com.bigdata.bop.engine.RunningQuery; import com.bigdata.bop.solutions.ISortOrder; import com.bigdata.btree.keys.IKeyBuilderFactory; import com.bigdata.rdf.internal.DummyIV; @@ -74,7 +81,6 @@ import com.bigdata.rdf.internal.constraints.InlineNE; import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.model.BigdataValue; -import com.bigdata.rdf.rules.RuleContextEnum; import com.bigdata.rdf.sail.BigdataSail.Options; import com.bigdata.rdf.spo.DefaultGraphSolutionExpander; import com.bigdata.rdf.spo.ExplicitSPOFilter; @@ -84,11 +90,13 @@ import com.bigdata.rdf.spo.SPOStarJoin; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.rdf.store.BD; -import com.bigdata.rdf.store.BigdataSolutionResolverator; +import com.bigdata.rdf.store.BigdataBindingSetResolverator; import com.bigdata.rdf.store.IRawTripleStore; import com.bigdata.relation.accesspath.IAccessPath; +import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBuffer; import com.bigdata.relation.accesspath.IElementFilter; +import com.bigdata.relation.accesspath.ThickAsynchronousIterator; import com.bigdata.relation.rule.IProgram; import com.bigdata.relation.rule.IQueryOptions; import com.bigdata.relation.rule.IRule; @@ -97,17 +105,13 @@ import com.bigdata.relation.rule.Program; import com.bigdata.relation.rule.QueryOptions; import com.bigdata.relation.rule.Rule; -import com.bigdata.relation.rule.eval.ActionEnum; -import com.bigdata.relation.rule.eval.DefaultEvaluationPlanFactory2; -import com.bigdata.relation.rule.eval.IEvaluationPlanFactory; -import com.bigdata.relation.rule.eval.IJoinNexus; -import com.bigdata.relation.rule.eval.IJoinNexusFactory; import com.bigdata.relation.rule.eval.IRuleTaskFactory; import com.bigdata.relation.rule.eval.ISolution; import com.bigdata.relation.rule.eval.NestedSubqueryWithJoinThreadsTask; import com.bigdata.relation.rule.eval.RuleStats; import com.bigdata.search.FullTextIndex; import com.bigdata.search.IHit; +import com.bigdata.striterator.ChunkedArraysIterator; import com.bigdata.striterator.DistinctFilter; import com.bigdata.striterator.IChunkedOrderedIterator; @@ -473,6 +477,18 @@ return super.evaluate(union, bindings); + } catch (Exception ex) { + + // Use Sesame 2 evaluation + + ex.printStackTrace(); + + if (log.isInfoEnabled()) { + log.info("could not evaluate natively, punting to Sesame"); + } + + return super.evaluate(union, bindings); + } } @@ -590,6 +606,18 @@ return super.evaluate(join, bindings); + } catch (Exception ex) { + + // Use Sesame 2 evaluation + + ex.printStackTrace(); + + if (log.isInfoEnabled()) { + log.info("could not evaluate natively, punting to Sesame"); + } + + return super.evaluate(join, bindings); + } } @@ -682,6 +710,18 @@ return super.evaluate(join, bindings); + } catch (Exception ex) { + + // Use Sesame 2 evaluation + + ex.printStackTrace(); + + if (log.isInfoEnabled()) { + log.info("could not evaluate natively, punting to Sesame"); + } + + return super.evaluate(join, bindings); + } } @@ -1598,64 +1638,91 @@ */ protected CloseableIteration<BindingSet, QueryEvaluationException> execute( final IStep step) - throws QueryEvaluationException { + throws Exception { - final boolean backchain = // - tripleSource.getDatabase().getAxioms().isRdfSchema() - && tripleSource.includeInferred - && tripleSource.conn.isQueryTimeExpander(); + final BindingSetPipelineOp query = Rule2BOpUtility.convert(step); - if (log.isDebugEnabled()) { - log.debug("Running tupleExpr as native rule:\n" + step); - log.debug("backchain: " + backchain); + if (log.isInfoEnabled()) { + log.info(query); } - // run the query as a native rule. - final IChunkedOrderedIterator<ISolution> itr1; - try { - final IEvaluationPlanFactory planFactory = - DefaultEvaluationPlanFactory2.INSTANCE; - - /* - * alternative evaluation orders for LUBM Q9 (default is 1 4, 2, 3, - * 0, 5). All three evaluation orders are roughly as good as one - * another. Note that tail[2] (z rdf:type ...) is entailed by the - * ontology and could be dropped from evaluation. - */ - // final IEvaluationPlanFactory planFactory = new - // FixedEvaluationPlanFactory( - // // new int[] { 1, 4, 3, 0, 5, 2 } good - // // new int[] { 1, 3, 0, 4, 5, 2 } good - // ); - - final IJoinNexusFactory joinNexusFactory = database - .newJoinNexusFactory(RuleContextEnum.HighLevelQuery, - ActionEnum.Query, IJoinNexus.BINDINGS, - null, // filter - false, // justify - backchain, // - planFactory, // - queryHints - ); - - final IJoinNexus joinNexus = joinNexusFactory.newInstance(database - .getIndexManager()); - itr1 = joinNexus.runQuery(step); - - } catch (Exception ex) { - throw new QueryEvaluationException(ex); - } + final int startId = query.getProperty(Predicate.Annotations.BOP_ID); - /* - * Efficiently resolve term identifiers in Bigdata ISolutions to RDF - * Values in Sesame 2 BindingSets and align the resulting iterator with - * the Sesame 2 API. - */ + final QueryEngine queryEngine = tripleSource.getSail().getQueryEngine(); + + final UUID queryId = UUID.randomUUID(); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1/* partitionId */, + newBindingSetIterator(new HashBindingSet()))); + + final IAsynchronousIterator<IBindingSet[]> it1 = + runningQuery.iterator(); + + final IChunkedOrderedIterator<IBindingSet> it2 = + new ChunkedArraysIterator<IBindingSet>(it1); + CloseableIteration<BindingSet, QueryEvaluationException> result = new Bigdata2Sesame2BindingSetIterator<QueryEvaluationException>( - new BigdataSolutionResolverator(database, itr1).start(database + new BigdataBindingSetResolverator(database, it2).start(database .getExecutorService())); +// final boolean backchain = // +// tripleSource.getDatabase().getAxioms().isRdfSchema() +// && tripleSource.includeInferred +// && tripleSource.conn.isQueryTimeExpander(); +// +// if (log.isDebugEnabled()) { +// log.debug("Running tupleExpr as native rule:\n" + step); +// log.debug("backchain: " + backchain); +// } +// +// // run the query as a native rule. +// final IChunkedOrderedIterator<ISolution> itr1; +// try { +// final IEvaluationPlanFactory planFactory = +// DefaultEvaluationPlanFactory2.INSTANCE; +// +// /* +// * alternative evaluation orders for LUBM Q9 (default is 1 4, 2, 3, +// * 0, 5). All three evaluation orders are roughly as good as one +// * another. Note that tail[2] (z rdf:type ...) is entailed by the +// * ontology and could be dropped from evaluation. +// */ +// // final IEvaluationPlanFactory planFactory = new +// // FixedEvaluationPlanFactory( +// // // new int[] { 1, 4, 3, 0, 5, 2 } good +// // // new int[] { 1, 3, 0, 4, 5, 2 } good +// // ); +// +// final IJoinNexusFactory joinNexusFactory = database +// .newJoinNexusFactory(RuleContextEnum.HighLevelQuery, +// ActionEnum.Query, IJoinNexus.BINDINGS, +// null, // filter +// false, // justify +// backchain, // +// planFactory, // +// queryHints +// ); +// +// final IJoinNexus joinNexus = joinNexusFactory.newInstance(database +// .getIndexManager()); +// itr1 = joinNexus.runQuery(step); +// +// } catch (Exception ex) { +// throw new QueryEvaluationException(ex); +// } +// +// /* +// * Efficiently resolve term identifiers in Bigdata ISolutions to RDF +// * Values in Sesame 2 BindingSets and align the resulting iterator with +// * the Sesame 2 API. +// */ +// CloseableIteration<BindingSet, QueryEvaluationException> result = +// new Bigdata2Sesame2BindingSetIterator<QueryEvaluationException>( +// new BigdataSolutionResolverator(database, itr1).start(database +// .getExecutorService())); + // use the basic filter iterator for remaining filters if (step instanceof ProxyRuleWithSesameFilters) { Collection<Filter> filters = @@ -1675,6 +1742,21 @@ } + /** + * Return an {@link IAsynchronousIterator} that will read a single, + * empty {@link IBindingSet}. + * + * @param bindingSet + * the binding set. + */ + protected ThickAsynchronousIterator<IBindingSet[]> newBindingSetIterator( + final IBindingSet bindingSet) { + + return new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { new IBindingSet[] { bindingSet } }); + + } + @SuppressWarnings("serial") private class UnknownOperatorException extends RuntimeException { private TupleExpr operator; Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -112,6 +112,7 @@ import org.openrdf.sail.SailConnectionListener; import org.openrdf.sail.SailException; +import com.bigdata.bop.engine.QueryEngine; import com.bigdata.journal.IIndexManager; import com.bigdata.journal.ITransactionService; import com.bigdata.journal.ITx; @@ -513,6 +514,11 @@ * {@link BigdataSailConnection} instances and across all transactions. */ private Map<String, String> namespaces; + + /** + * The query engine. + */ + final private QueryEngine queryEngine; /** * When true, the RDFS closure will be maintained by the <em>SAIL</em> @@ -915,6 +921,10 @@ namespaces = Collections.synchronizedMap(new LinkedHashMap<String, String>()); + queryEngine = new QueryEngine(database.getIndexManager()); + + queryEngine.init(); + } /** @@ -1332,7 +1342,13 @@ } + public QueryEngine getQueryEngine() { + + return queryEngine; + + } + /** * Inner class implements the {@link SailConnection}. Some additional * functionality is available on this class, including @@ -1406,6 +1422,13 @@ */ private Lock lock; + + public BigdataSail getBigdataSail() { + + return BigdataSail.this; + + } + /** * Return the assertion buffer. * <p> Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataTripleSource.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataTripleSource.java 2010-09-15 20:45:14 UTC (rev 3560) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataTripleSource.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -42,6 +42,12 @@ } + public BigdataSail getSail() { + + return conn.getBigdataSail(); + + } + /** * This wraps * {@link BigdataSailConnection#getStatements(Resource, URI, Value, boolean, Resource[])}. Added: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java 2010-09-15 21:43:28 UTC (rev 3561) @@ -0,0 +1,170 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 16, 2009 + */ + +package com.bigdata.rdf.sail; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.Properties; +import org.apache.log4j.Logger; +import org.openrdf.model.Literal; +import org.openrdf.model.URI; +import org.openrdf.model.ValueFactory; +import org.openrdf.model.impl.LiteralImpl; +import org.openrdf.model.impl.URIImpl; +import org.openrdf.model.vocabulary.RDF; +import org.openrdf.model.vocabulary.RDFS; +import org.openrdf.query.Binding; +import org.openrdf.query.BindingSet; +import org.openrdf.query.QueryLanguage; +import org.openrdf.query.TupleQuery; +import org.openrdf.query.TupleQueryResult; +import org.openrdf.query.impl.BindingImpl; +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.store.BD; +import com.bigdata.rdf.vocab.NoVocabulary; + +/** + * @author <a href="mailto:mrp...@us...">Mike Personick</a> + * @version $Id$ + */ +public class TestBOps extends ProxyBigdataSailTestCase { + + protected static final Logger log = Logger.getLogger(TestBOps.class); + + @Override + public Properties getProperties() { + + Properties props = super.getProperties(); + + props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false"); + props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName()); + props.setProperty(BigdataSail.Options.VOCABULARY_CLASS, NoVocabulary.class.getName()); + props.setProperty(BigdataSail.Options.JUSTIFY, "false"); + props.setProperty(BigdataSail.Options.TEXT_INDEX, "false"); + + return props; + + } + + /** + * + */ + public TestBOps() { + } + + /** + * @param arg0 + */ + public TestBOps(String arg0) { + super(arg0); + } + + public void testSimpleJoin() throws Exception { + + final BigdataSail sail = getSail(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + try { + + final ValueFactory vf = sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + URI mike = new URIImpl(ns+"Mike"); + URI bryan = new URIImpl(ns+"Bryan"); + URI person = new URIImpl(ns+"Person"); + URI likes = new URIImpl(ns+"likes"); + URI rdf = new URIImpl(ns+"RDF"); + Literal l1 = new LiteralImpl("Mike"); + Literal l2 = new LiteralImpl("Bryan"); +/**/ + cxn.setNamespace("ns", ns); + + cxn.add(mike, RDF.TYPE, person); + cxn.add(mike, likes, rdf); + cxn.add(mike, RDFS.LABEL, l1); + cxn.add(bryan, RDF.TYPE, person); + cxn.add(bryan, likes, rdf); + cxn.add(bryan, RDFS.LABEL, l2); + + /* + * Note: The either flush() or commit() is required to flush the + * statement buffers to the database before executing any operations + * that go around the sail. + */ + cxn.flush();//commit(); + + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore()); + } + + { + + String query = + "PREFIX rdf: <"+RDF.NAMESPACE+"> " + + "PREFIX rdfs: <"+RDFS.NAMESPACE+"> " + + "PREFIX ns: <"+ns+"> " + + + "select * " + + "WHERE { " + + " ?s rdf:type ns:Person . " + + " ?s ns:likes ?likes . " + + " ?s rdfs:label ?label . " + + "}"; + + final TupleQuery tupleQuery = + cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + TupleQueryResult result = tupleQuery.evaluate(); + + Collection<BindingSet> solution = new LinkedList<BindingSet>(); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", mike), + new BindingImpl("likes", rdf), + new BindingImpl("label", l1) + })); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", bryan), + new BindingImpl("likes", rdf), + new BindingImpl("label", l2) + })); + + compare(result, solution); + + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-15 20:45:22
|
Revision: 3560 http://bigdata.svn.sourceforge.net/bigdata/?rev=3560&view=rev Author: sgossard Date: 2010-09-15 20:45:14 +0000 (Wed, 15 Sep 2010) Log Message: ----------- [maven_scaleout] : Breaking all direct dependency cycles with package 'com.bigdata.util.concurrent', but still appears to have transitive cycles via package 'com.bigdata.counters'. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/ActiveProcess.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ConcurrencyManager.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/AbstractHaltableProcess.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/ThreadPoolExecutorStatisticsTask.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorServiceStatisticsTask.java Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/ActiveProcess.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/ActiveProcess.java 2010-09-15 20:04:06 UTC (rev 3559) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/ActiveProcess.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -34,10 +34,11 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Logger; -import com.bigdata.util.concurrent.DaemonThreadFactory; /** * Command manages the execution and termination of a native process and an @@ -55,9 +56,19 @@ * by the {@link #process}. */ protected final ExecutorService readService = Executors - .newSingleThreadExecutor(new DaemonThreadFactory(getClass() - .getName() - + ".readService")); + .newSingleThreadExecutor( + //Don't use com.bigdata.util.concurrent.DaemonThreadFactory here, trying to break a package loop. -gossard + new ThreadFactory() { + String prefix = getClass().getName()+ ".readService"; + AtomicInteger count = new AtomicInteger(0); + public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setName( prefix + count.incrementAndGet() ); + t.setDaemon(true); + return t; + } + } + ); protected Process process = null; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ConcurrencyManager.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ConcurrencyManager.java 2010-09-15 20:04:06 UTC (rev 3559) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/ConcurrencyManager.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -879,7 +879,7 @@ final long delay = 1000; // delay in ms. final TimeUnit unit = TimeUnit.MILLISECONDS; - writeServiceQueueStatisticsTask = new ThreadPoolExecutorStatisticsTask("writeService", + writeServiceQueueStatisticsTask = new WriteExecutorServiceStatisticsTask("writeService", writeService, countersUN, w); txWriteServiceQueueStatisticsTask = new ThreadPoolExecutorStatisticsTask("txWriteService", @@ -934,7 +934,7 @@ * Sampling instruments for the various queues giving us the moving average * of the queue length. */ - private final ThreadPoolExecutorStatisticsTask writeServiceQueueStatisticsTask; + private final WriteExecutorServiceStatisticsTask writeServiceQueueStatisticsTask; private final ThreadPoolExecutorStatisticsTask txWriteServiceQueueStatisticsTask; private final ThreadPoolExecutorStatisticsTask readServiceQueueStatisticsTask; @@ -1066,7 +1066,7 @@ * (exceptions may be thrown if the task fails or the commit fails). The * purpose of group commits is to provide higher throughput for writes on * the store by only syncing the data to disk periodically rather than after - * every write. Group commits are scheduled by the {@link #commitService}. + * every write. Group commits are scheduled by the commitService. * The trigger conditions for group commits may be configured using * {@link ConcurrencyManager.Options}. If you are using the store in a * single threaded context then you may set Added: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorServiceStatisticsTask.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorServiceStatisticsTask.java (rev 0) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorServiceStatisticsTask.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -0,0 +1,326 @@ +/* + * User: gossard + * Date: Sep 15, 2010 + * Time: 1:44:48 PM + */ +package com.bigdata.journal; + +import com.bigdata.counters.CounterSet; +import com.bigdata.counters.Instrument; +import com.bigdata.util.concurrent.IQueueCounters; +import com.bigdata.util.concurrent.ThreadPoolExecutorStatisticsTask; +import com.bigdata.util.concurrent.WriteTaskCounters; + +/** + * Extension of {@link com.bigdata.util.concurrent.ThreadPoolExecutorStatisticsTask} that will collect additional + * information about {@link WriteExecutorService} pools. + * + */ +public class WriteExecutorServiceStatisticsTask extends ThreadPoolExecutorStatisticsTask<WriteExecutorService,WriteTaskCounters> { + //TODO: The chaining on the calculateXXX methods is probably too fragile. In order to guarantee the exact same behavior, + //TODO: I chained the calls off methods that executed directly before them. If there is no ordering dependency, the calculations + //TODO: could be done after the parents calculateAll(). -gossard + + protected double averageActiveCountWithLocksHeld = 0d; + /** time waiting for resource locks. */ + protected double averageLockWaitingTime = 0d; + protected double averageCommitWaitingTime = 0d; + protected double averageCommitServiceTime = 0d; + protected double averageCommitGroupSize = 0d; + protected double averageByteCountPerCommit = 0d; + protected long lockWaitingTime = 0L; + protected long commitWaitingTime = 0L; + protected long commitServiceTime = 0L; + protected double averageReadyCount; + + + public WriteExecutorServiceStatisticsTask(String serviceName, WriteExecutorService service) { + super(serviceName, service); + } + + public WriteExecutorServiceStatisticsTask(String serviceName, WriteExecutorService service, WriteTaskCounters taskCounters) { + super(serviceName, service, taskCounters); + } + + public WriteExecutorServiceStatisticsTask(String serviceName, WriteExecutorService service, WriteTaskCounters taskCounters, double w) { + super(serviceName, service, taskCounters, w); + } + + + @Override + protected void calculateBasicQueueInfo() { + super.calculateBasicQueueInfo(); + calculateActiveCountWithLocks(); + } + + @Override + protected void calculateAverageQueueWait() { + super.calculateAverageQueueWait(); + calculateAverageLockWaitTime(); + } + + @Override + protected void calculateAverageCheckpointTime() { + super.calculateAverageCheckpointTime(); + calculateWriteSpecificValues(); + } + + protected void calculateWriteSpecificValues() { + + final WriteExecutorService tmp = service; + + final WriteTaskCounters finalWriteTaskCounters = taskCounters; + + final long groupCommitCount = tmp.getGroupCommitCount(); + + if (groupCommitCount > 0) { + + // Time waiting for the commit. + { + + final long newValue = finalWriteTaskCounters.commitWaitingNanoTime + .get(); + + final long delta = newValue - commitWaitingTime; + + assert delta >= 0 : "" + delta; + + commitWaitingTime = newValue; + + averageCommitWaitingTime = getMovingAverage( + averageCommitWaitingTime, + (delta * scalingFactor / groupCommitCount), + w); + + } + + // Time servicing the commit. + { + + final long newValue = finalWriteTaskCounters.commitServiceNanoTime + .get(); + + final long delta = newValue - commitServiceTime; + + assert delta >= 0 : "" + delta; + + commitServiceTime = newValue; + + averageCommitServiceTime = getMovingAverage( + averageCommitServiceTime, + (delta * scalingFactor / groupCommitCount), + w); + + } + + } + + // moving average of the size nready. + averageReadyCount = getMovingAverage( + averageReadyCount, tmp.getReadyCount(), w); + + // moving average of the size of the commit groups. + averageCommitGroupSize = getMovingAverage( + averageCommitGroupSize, tmp.getCommitGroupSize(), w); + + // moving average of the #of bytes written since the + // previous commit. + averageByteCountPerCommit = getMovingAverage( + averageByteCountPerCommit, tmp + .getByteCountPerCommit(), w); + + } + + protected void calculateAverageLockWaitTime() { + /* + * Time waiting on resource lock(s). + */ + + final long newValue = taskCounters.lockWaitingNanoTime.get(); + + final long delta = newValue - lockWaitingTime; + + assert delta >= 0 : "" + delta; + + lockWaitingTime = newValue; + + averageLockWaitingTime = getMovingAverage( + averageLockWaitingTime, + (delta * scalingFactor / taskCounters.taskCompleteCount.get()), + w); + + } + + protected void calculateActiveCountWithLocks() { + /* + * Note: For the WriteExecutorService we compute a variant of + * [activeCount] the which only counts tasks that are currently + * holding their exclusive resource lock(s). This is the real + * concurrency of the write service since tasks without locks + * are waiting on other tasks so that they can obtain their + * lock(s) and "run". + */ + + final int activeCountWithLocksHeld = service.getActiveTaskCountWithLocksHeld(); + + averageActiveCountWithLocksHeld = getMovingAverage( + averageActiveCountWithLocksHeld, activeCountWithLocksHeld, w); + + } + + + + + @Override + protected void fillCounterSet(CounterSet counterSet) { + super.fillCounterSet(counterSet);//fills out all the basic executor queue info. + + final WriteExecutorService writeService = service; + + /* + * Simple counters. + */ + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.CommitCount, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getGroupCommitCount()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.AbortCount, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getAbortCount()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.OverflowCount, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getOverflowCount()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.RejectedExecutionCount, + new Instrument<Long>() { + public void sample() { + setValue(writeService + .getRejectedExecutionCount()); + } + }); + + /* + * Maximum observed values. + */ + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.MaxCommitWaitingTime, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getMaxCommitWaitingTime()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.MaxCommitServiceTime, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getMaxCommitServiceTime()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.MaxCommitGroupSize, + new Instrument<Long>() { + public void sample() { + setValue((long) writeService + .getMaxCommitGroupSize()); + } + }); + + counterSet.addCounter(IQueueCounters.IWriteServiceExecutorCounters.MaxRunning, + new Instrument<Long>() { + public void sample() { + setValue(writeService.getMaxRunning()); + } + }); + + /* + * Moving averages available only for the write executor + * service. + */ + + counterSet + .addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageActiveCountWithLocksHeld, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageActiveCountWithLocksHeld); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageReadyCount, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageReadyCount); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageCommitGroupSize, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageCommitGroupSize); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageLockWaitingTime, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageLockWaitingTime); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageCheckpointTime, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageCheckpointTime); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageCommitWaitingTime, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageCommitWaitingTime); + } + }); + + counterSet.addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageCommitServiceTime, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageCommitServiceTime); + } + }); + + counterSet + .addCounter( + IQueueCounters.IWriteServiceExecutorCounters.AverageByteCountPerCommit, + new Instrument<Double>() { + @Override + protected void sample() { + setValue(averageByteCountPerCommit); + } + }); + + } +} Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WriteExecutorServiceStatisticsTask.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java 2010-09-15 20:04:06 UTC (rev 3559) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -40,6 +40,7 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; +import com.bigdata.util.InnerCause; import org.apache.log4j.Logger; import com.bigdata.btree.keys.KVO; @@ -1222,6 +1223,13 @@ } + @Override + protected <T extends Throwable> void logInnerCause(T cause) { + //avoid logging warnings on BufferClosedException. Not done in AbstractHaltableProcess to avoid introducing a package cycle. -gossard + if (!InnerCause.isInnerCause(cause, BufferClosedException.class)) + super.logInnerCause(cause); + } + /** * This timeout is used to log warning messages when a sink is slow. */ Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/AbstractHaltableProcess.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/AbstractHaltableProcess.java 2010-09-15 20:04:06 UTC (rev 3559) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/AbstractHaltableProcess.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -36,7 +36,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import com.bigdata.relation.accesspath.BufferClosedException; import com.bigdata.util.InnerCause; /** @@ -101,36 +100,14 @@ halt = true; - final boolean isFirstCause = firstCause.compareAndSet( - null/* expect */, cause); + firstCause.compareAndSet( null/* expect */, cause); if (log.isEnabledFor(Level.WARN)) try { - if (!InnerCause.isInnerCause(cause, InterruptedException.class) - && !InnerCause.isInnerCause(cause, - CancellationException.class) - && !InnerCause.isInnerCause(cause, - ClosedByInterruptException.class) - && !InnerCause.isInnerCause(cause, - RejectedExecutionException.class) - && !InnerCause.isInnerCause(cause, - BufferClosedException.class)) { + logInnerCause(cause); - /* - * This logs all unexpected causes, not just the first one - * to be reported for this join task. - * - * Note: The master will log the firstCause that it receives - * as an error. - */ - - log.warn(this + " : isFirstCause=" + isFirstCause + " : " - + cause, cause); - - } - } catch (Throwable ex) { // error in logging system - ignore. @@ -141,4 +118,27 @@ } + protected <T extends Throwable> void logInnerCause(T cause) { + if (!InnerCause.isInnerCause(cause, InterruptedException.class) + && !InnerCause.isInnerCause(cause, + CancellationException.class) + && !InnerCause.isInnerCause(cause, + ClosedByInterruptException.class) + && !InnerCause.isInnerCause(cause, + RejectedExecutionException.class) ) + { + /* + * This logs all unexpected causes, not just the first one + * to be reported for this join task. + * + * Note: The master will log the firstCause that it receives + * as an error. + */ + + log.warn(this + " : isFirstCause=" + (firstCause.get() == cause) + " : " + + cause, cause); + + } + } + } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/ThreadPoolExecutorStatisticsTask.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/ThreadPoolExecutorStatisticsTask.java 2010-09-15 20:04:06 UTC (rev 3559) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/concurrent/ThreadPoolExecutorStatisticsTask.java 2010-09-15 20:45:14 UTC (rev 3560) @@ -20,25 +20,24 @@ * including the moving average of its queue length, queuing times, etc. * * @todo refactor to layer {@link QueueSizeMovingAverageTask} then - * {@link ThreadPoolExecutorBaseStatisticsTask}, then this class, then a - * derived class for the {@link WriteServiceExecutor}. + * {@link ThreadPoolExecutorBaseStatisticsTask}, then this class, then sub-classes. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -public class ThreadPoolExecutorStatisticsTask implements Runnable { +public class ThreadPoolExecutorStatisticsTask<EXEC extends ThreadPoolExecutor,COUNTERS extends TaskCounters> implements Runnable { protected static final Logger log = Logger.getLogger(ThreadPoolExecutorStatisticsTask.class); /** * The label for the executor service (used in log messages). */ - private final String serviceName; + protected final String serviceName; /** * The executor service that is being monitored. */ - private final ThreadPoolExecutor service; + protected final EXEC service; // /** // * The time when we started to collect data about the {@link #service} (set by the ctor). @@ -48,26 +47,25 @@ /** * The weight used to compute the moving average. */ - private final double w; + protected final double w; /** * #of samples taken so far. */ - private long nsamples = 0; + protected long nsamples = 0; /* * There are several different moving averages which are computed. */ // private double averageQueueSize = 0d; - private double averageActiveCount = 0d; - private double averageQueueLength = 0d; - private double averageActiveCountWithLocksHeld = 0d; + protected double averageActiveCount = 0d; + protected double averageQueueLength = 0d; /** * Data collected about {@link AbstractTask}s run on a service (optional). */ - private final TaskCounters taskCounters; + protected final COUNTERS taskCounters; /* * These are moving averages based on the optional TaskCounters. @@ -80,40 +78,29 @@ */ /** time waiting on the queue until the task begins to execute. */ - private double averageQueueWaitingTime = 0d; - /** time waiting for resource locks. */ - private double averageLockWaitingTime = 0d; + protected double averageQueueWaitingTime = 0d; + /** time doing work (does not include time to acquire resources locks or commit time). */ - private double averageServiceTime = 0d; + protected double averageServiceTime = 0d; /** time checkpointing indices (included in the {@link #averageServiceTime}). */ - private double averageCheckpointTime = 0d; + protected double averageCheckpointTime = 0d; + /** total time from submit to completion. */ - private double averageQueuingTime = 0d; + protected double averageQueuingTime = 0d; - private double averageCommitWaitingTime = 0d; - private double averageCommitServiceTime = 0d; - private double averageCommitGroupSize = 0d; - private double averageByteCountPerCommit = 0d; - /* * private variables used to compute the delta in various counters since * they were last sampled. */ - private long queueWaitingTime = 0L; - private long lockWaitingTime = 0L; - private long serviceTime = 0L; - private long checkpointTime = 0L; // Note: checkpointTime is included in the serviceTime. - private long queuingTime = 0L; + protected long queueWaitingTime = 0L; + protected long serviceTime = 0L; + protected long checkpointTime = 0L; // Note: checkpointTime is included in the serviceTime. + protected long queuingTime = 0L; - private long commitWaitingTime = 0L; - private long commitServiceTime = 0L; - - private double averageReadyCount; - /** * Scaling factor converts nanoseconds to milliseconds. */ - static final double scalingFactor = 1d / TimeUnit.NANOSECONDS.convert(1, + static final protected double scalingFactor = 1d / TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); /** @@ -150,7 +137,7 @@ * @param service * The service to be monitored. */ - public ThreadPoolExecutorStatisticsTask(String serviceName, ThreadPoolExecutor service) { + public ThreadPoolExecutorStatisticsTask(String serviceName, EXEC service) { this(serviceName, service, null/* taskCounters */, DEFAULT_WEIGHT); @@ -168,8 +155,8 @@ * The per-task counters used to compute the latency data for * tasks run on that service. */ - public ThreadPoolExecutorStatisticsTask(String serviceName, ThreadPoolExecutor service, - TaskCounters taskCounters) { + public ThreadPoolExecutorStatisticsTask(String serviceName, EXEC service, + COUNTERS taskCounters) { this(serviceName, service, taskCounters, DEFAULT_WEIGHT); @@ -189,8 +176,8 @@ * The weight to be used by * {@link #getMovingAverage(double, double, double)} */ - public ThreadPoolExecutorStatisticsTask(String serviceName, ThreadPoolExecutor service, - TaskCounters taskCounters, double w) { + public ThreadPoolExecutorStatisticsTask(String serviceName, EXEC service, + COUNTERS taskCounters, double w) { if (serviceName == null) throw new IllegalArgumentException(); @@ -240,7 +227,7 @@ /** * The moving average of the queue size. */ - private final MovingAverageTask queueSizeTask = new MovingAverageTask( + protected final MovingAverageTask queueSizeTask = new MovingAverageTask( "queueSize", new Callable<Integer>() { public Integer call() { return service.getQueue().size(); @@ -252,7 +239,7 @@ * * @see TaskCounters#interArrivalNanoTime */ - private DeltaMovingAverageTask interArrivalNanoTimeTask = new DeltaMovingAverageTask( + protected DeltaMovingAverageTask interArrivalNanoTimeTask = new DeltaMovingAverageTask( "interArrivalTime", new Callable<Long>() { public Long call() { return taskCounters.interArrivalNanoTime.get(); @@ -264,7 +251,7 @@ * * @see TaskCounters#serviceNanoTime */ - private DeltaMovingAverageTask serviceNanoTimeTask = new DeltaMovingAverageTask( + protected DeltaMovingAverageTask serviceNanoTimeTask = new DeltaMovingAverageTask( "serviceNanoTime", new Callable<Long>() { public Long call() { return taskCounters.serviceNanoTime.get(); @@ -282,281 +269,213 @@ try { - { + calculateAll(); + + nsamples++; - queueSizeTask.run(); - - // queueSize := #of tasks in the queue. - final int queueSize = service.getQueue().size(); + } catch (Exception ex) { - // activeCount := #of tasks assigned a worker thread - final int activeCount = service.getActiveCount(); + log.warn(serviceName, ex); -//// This is just the tasks that are currently waiting to run (not -//// assigned to any thread). -// averageQueueSize = getMovingAverage(averageQueueSize, -// queueSize, w); + } + + } - // This is just the tasks that are currently running (assigned - // to a worker thread). - averageActiveCount = getMovingAverage(averageActiveCount, - activeCount, w); + /** + * + * Calculates all averages and updates the task counters if provided. + * Currently this method calculates the basic executor queue info, and if task counters are present, + * will also calculate queue wait time and checkpoint times. All of these calculations are done in seperate methods + * so that subclasses can override them if needed. + * + */ + protected void calculateAll() { + calculateBasicQueueInfo(); + calculateTaskCountersIfPresent(); + } - /* - * Note: this is the primary average of interest - it includes - * both the tasks waiting to be run and those that are currently - * running in the definition of the "queue length". - */ - averageQueueLength = getMovingAverage(averageQueueLength, - (activeCount + queueSize), w); + /** + * Calculates average queue wait and checkpoint times if a task counter was provided. Sub-classes may override this + * method to introduce additional task counter calculations. + */ + protected void calculateTaskCountersIfPresent() { + if (taskCounters != null) { - } + /* + * Compute some latency data that relies on the task counters. + */ - if (service instanceof WriteExecutorService) { + // #of tasks that have been submitted so far. + final long taskCount = taskCounters.taskCompleteCount.get(); - /* - * Note: For the WriteExecutorService we compute a variant of - * [activeCount] the which only counts tasks that are currently - * holding their exclusive resource lock(s). This is the real - * concurrency of the write service since tasks without locks - * are waiting on other tasks so that they can obtain their - * lock(s) and "run". - */ - - final int activeCountWithLocksHeld = ((WriteExecutorService) service) - .getActiveTaskCountWithLocksHeld(); + if (taskCount > 0) { - averageActiveCountWithLocksHeld = getMovingAverage( - averageActiveCountWithLocksHeld, activeCountWithLocksHeld, w); + calculateAverageQueueWait(); + calculateAverageCheckpointTime(); + } - if (taskCounters != null) { - - /* - * Compute some latency data that relies on the task counters. - */ + } + } - // #of tasks that have been submitted so far. - final long taskCount = taskCounters.taskCompleteCount.get(); + protected void calculateAverageCheckpointTime() { + /* + * Time that the task is being serviced (after its obtained + * any locks). + */ + { - if (taskCount > 0) { + final long newValue = taskCounters.serviceNanoTime.get(); - /* - * Time waiting on the queue to begin execution. - */ - { + final long delta = newValue - serviceTime; - final long newValue = taskCounters.queueWaitingNanoTime.get(); + assert delta >= 0 : "" + delta; - final long delta = newValue - queueWaitingTime; + serviceTime = newValue; - assert delta >= 0 : "" + delta; + averageServiceTime = getMovingAverage( + averageServiceTime, + (delta * scalingFactor / taskCounters.taskCompleteCount.get()), + w); - queueWaitingTime = newValue; + } - averageQueueWaitingTime = getMovingAverage( - averageQueueWaitingTime, - (delta * scalingFactor / taskCounters.taskCompleteCount.get()), - w); + /* + * The moving average of the change in the cumulative + * inter-arrival time. + */ + interArrivalNanoTimeTask.run(); - } + /* + * The moving average of the change in the total task + * service time. + */ + serviceNanoTimeTask.run(); - /* - * Time waiting on resource lock(s). - */ - if(service instanceof WriteExecutorService) { - - final long newValue = ((WriteTaskCounters) taskCounters).lockWaitingNanoTime - .get(); + /* + * Time that the task is busy checkpoint its indices (this + * is already reported as part of the service time but which + * is broken out here as a detail). + */ + { - final long delta = newValue - lockWaitingTime; + final long newValue = taskCounters.checkpointNanoTime.get(); - assert delta >= 0 : "" + delta; + final long delta = newValue - checkpointTime; - lockWaitingTime = newValue; + assert delta >= 0 : "" + delta; - averageLockWaitingTime = getMovingAverage( - averageLockWaitingTime, - (delta * scalingFactor / taskCounters.taskCompleteCount.get()), - w); + checkpointTime = newValue; - } - - /* - * Time that the task is being serviced (after its obtained - * any locks). - */ - { + averageCheckpointTime = getMovingAverage( + averageCheckpointTime, + (delta * scalingFactor / taskCounters.taskCompleteCount.get()), + w); - final long newValue = taskCounters.serviceNanoTime.get(); + } - final long delta = newValue - serviceTime; + /* + * Queuing time (elapsed time from submit until completion). + */ + { - assert delta >= 0 : "" + delta; + final long newValue = taskCounters.queuingNanoTime.get(); - serviceTime = newValue; + final long delta = newValue - queuingTime; - averageServiceTime = getMovingAverage( - averageServiceTime, - (delta * scalingFactor / taskCounters.taskCompleteCount.get()), - w); + assert delta >= 0 : "" + delta; - } + queuingTime = newValue; - /* - * The moving average of the change in the cumulative - * inter-arrival time. - */ - interArrivalNanoTimeTask.run(); + averageQueuingTime = getMovingAverage( + averageQueuingTime, + (delta * scalingFactor / taskCounters.taskCompleteCount.get()), + w); - /* - * The moving average of the change in the total task - * service time. - */ - serviceNanoTimeTask.run(); - - /* - * Time that the task is busy checkpoint its indices (this - * is already reported as part of the service time but which - * is broken out here as a detail). - */ - { + } + } - final long newValue = taskCounters.checkpointNanoTime.get(); - final long delta = newValue - checkpointTime; - assert delta >= 0 : "" + delta; + protected void calculateAverageQueueWait() { + /* + * Time waiting on the queue to begin execution. + */ + { - checkpointTime = newValue; + final long newValue = taskCounters.queueWaitingNanoTime.get(); - averageCheckpointTime = getMovingAverage( - averageCheckpointTime, - (delta * scalingFactor / taskCounters.taskCompleteCount.get()), - w); + final long delta = newValue - queueWaitingTime; - } + assert delta >= 0 : "" + delta; - /* - * Queuing time (elapsed time from submit until completion). - */ - { + queueWaitingTime = newValue; - final long newValue = taskCounters.queuingNanoTime.get(); + averageQueueWaitingTime = getMovingAverage( + averageQueueWaitingTime, + (delta * scalingFactor / taskCounters.taskCompleteCount.get()), + w); - final long delta = newValue - queuingTime; + } + } - assert delta >= 0 : "" + delta; + protected void calculateBasicQueueInfo() { + queueSizeTask.run(); - queuingTime = newValue; + // queueSize := #of tasks in the queue. + final int queueSize = service.getQueue().size(); - averageQueuingTime = getMovingAverage( - averageQueuingTime, - (delta * scalingFactor / taskCounters.taskCompleteCount.get()), - w); + // activeCount := #of tasks assigned a worker thread + final int activeCount = service.getActiveCount(); - } +//// This is just the tasks that are currently waiting to run (not +//// assigned to any thread). +// averageQueueSize = getMovingAverage(averageQueueSize, +// queueSize, w); - } + // This is just the tasks that are currently running (assigned + // to a worker thread). + averageActiveCount = getMovingAverage(averageActiveCount, + activeCount, w); - if (service instanceof WriteExecutorService) { + /* + * Note: this is the primary average of interest - it includes + * both the tasks waiting to be run and those that are currently + * running in the definition of the "queue length". + */ + averageQueueLength = getMovingAverage(averageQueueLength, + (activeCount + queueSize), w); + } - final WriteExecutorService tmp = (WriteExecutorService) service; + /** + * Convenience call to generate a counter set. Currently creates a new CounterSet and calls fillCounterSet + * to populate the data. + * + * @return A newly created and filled <i>counterSet</i> + */ + public CounterSet getCounters() { - final WriteTaskCounters writeTaskCounters = (WriteTaskCounters) taskCounters; - - final long groupCommitCount = tmp.getGroupCommitCount(); + final CounterSet counterSet = new CounterSet(); + fillCounterSet(counterSet); - if (groupCommitCount > 0) { - // Time waiting for the commit. - { + return counterSet; - final long newValue = writeTaskCounters.commitWaitingNanoTime - .get(); +} - final long delta = newValue - commitWaitingTime; - - assert delta >= 0 : "" + delta; - - commitWaitingTime = newValue; - - averageCommitWaitingTime = getMovingAverage( - averageCommitWaitingTime, - (delta * scalingFactor / groupCommitCount), - w); - - } - - // Time servicing the commit. - { - - final long newValue = writeTaskCounters.commitServiceNanoTime - .get(); - - final long delta = newValue - commitServiceTime; - - assert delta >= 0 : "" + delta; - - commitServiceTime = newValue; - - averageCommitServiceTime = getMovingAverage( - averageCommitServiceTime, - (delta * scalingFactor / groupCommitCount), - w); - - } - - } - - // moving average of the size nready. - averageReadyCount = getMovingAverage( - averageReadyCount, tmp.getReadyCount(), w); - - // moving average of the size of the commit groups. - averageCommitGroupSize = getMovingAverage( - averageCommitGroupSize, tmp.getCommitGroupSize(), w); - - // moving average of the #of bytes written since the - // previous commit. - averageByteCountPerCommit = getMovingAverage( - averageByteCountPerCommit, tmp - .getByteCountPerCommit(), w); - - } // end (if service instanceof WriteExecutorService ) - - } - - nsamples++; - - } catch (Exception ex) { - - log.warn(serviceName, ex); - - } - - } - /** * Adds counters for all innate variables defined for a * {@link ThreadPoolExecutor} and for each of the variables computed by this - * class. Note that some variables (e.g., the lock waiting time) are only - * available when the <i>service</i> specified to the ctor is a - * {@link WriteExecutorService}. - * - * @param counterSet - * The counters will be added to this {@link CounterSet}. - * - * @return The caller's <i>counterSet</i> + * class. Sub-classes can override this method to fill in additional counters in the provided counter set. + * + * @param counterSet the set that will have the counters added to it. + * */ - public CounterSet getCounters() { - - final CounterSet counterSet = new CounterSet(); - + protected void fillCounterSet(CounterSet counterSet) { /* - * Defined for ThreadPoolExecutor. - */ + * Defined for ThreadPoolExecutor. + */ // Note: reported as moving average instead. // counterSet.addCounter("#active", @@ -565,7 +484,7 @@ // setValue(service.getActiveCount()); // } // }); -// +// // Note: reported as moving average instead. // counterSet.addCounter("#queued", // new Instrument<Integer>() { @@ -783,7 +702,7 @@ setValue(1d / t); } }); - + counterSet .addCounter( IThreadPoolExecutorTaskCounters.AverageQueueWaitingTime, @@ -813,163 +732,7 @@ }); } + } - /* - * These data are available only for the write service. - */ - if (service instanceof WriteExecutorService) { - final WriteExecutorService writeService = (WriteExecutorService) service; - - /* - * Simple counters. - */ - - counterSet.addCounter(IWriteServiceExecutorCounters.CommitCount, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getGroupCommitCount()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.AbortCount, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getAbortCount()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.OverflowCount, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getOverflowCount()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.RejectedExecutionCount, - new Instrument<Long>() { - public void sample() { - setValue(writeService - .getRejectedExecutionCount()); - } - }); - - /* - * Maximum observed values. - */ - - counterSet.addCounter(IWriteServiceExecutorCounters.MaxCommitWaitingTime, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getMaxCommitWaitingTime()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.MaxCommitServiceTime, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getMaxCommitServiceTime()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.MaxCommitGroupSize, - new Instrument<Long>() { - public void sample() { - setValue((long) writeService - .getMaxCommitGroupSize()); - } - }); - - counterSet.addCounter(IWriteServiceExecutorCounters.MaxRunning, - new Instrument<Long>() { - public void sample() { - setValue(writeService.getMaxRunning()); - } - }); - - /* - * Moving averages available only for the write executor - * service. - */ - - counterSet - .addCounter( - IWriteServiceExecutorCounters.AverageActiveCountWithLocksHeld, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageActiveCountWithLocksHeld); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageReadyCount, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageReadyCount); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageCommitGroupSize, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageCommitGroupSize); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageLockWaitingTime, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageLockWaitingTime); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageCheckpointTime, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageCheckpointTime); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageCommitWaitingTime, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageCommitWaitingTime); - } - }); - - counterSet.addCounter( - IWriteServiceExecutorCounters.AverageCommitServiceTime, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageCommitServiceTime); - } - }); - - counterSet - .addCounter( - IWriteServiceExecutorCounters.AverageByteCountPerCommit, - new Instrument<Double>() { - @Override - protected void sample() { - setValue(averageByteCountPerCommit); - } - }); - - } - - return counterSet; - } - -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-15 20:04:12
|
Revision: 3559 http://bigdata.svn.sourceforge.net/bigdata/?rev=3559&view=rev Author: thompsonbry Date: 2010-09-15 20:04:06 +0000 (Wed, 15 Sep 2010) Log Message: ----------- Cleaning up System.err usage. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 19:52:00 UTC (rev 3558) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 20:04:06 UTC (rev 3559) @@ -538,8 +538,8 @@ */ protected void lifeCycleSetUpOperator(final int bopId) { - System.err.println("lifeCycleSetUpOperator: queryId=" + queryId - + ", bopId=" + bopId); + if (log.isTraceEnabled()) + log.trace("queryId=" + queryId + ", bopId=" + bopId); } @@ -556,8 +556,8 @@ */ protected void lifeCycleTearDownOperator(final int bopId) { - System.err.println("lifeCycleTearDownOperator: queryId=" + queryId - + ", bopId=" + bopId); + if (log.isTraceEnabled()) + log.trace("queryId=" + queryId + ", bopId=" + bopId); } @@ -567,7 +567,8 @@ */ protected void lifeCycleSetUpQuery() { - System.err.println("lifeCycleSetUpQuery: queryId=" + queryId); + if (log.isTraceEnabled()) + log.trace("queryId=" + queryId); } @@ -577,7 +578,8 @@ */ protected void lifeCycleTearDownQuery() { - System.err.println("lifeCycleTearDownQuery: queryId=" + queryId); + if (log.isTraceEnabled()) + log.trace("queryId=" + queryId); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 19:52:00 UTC (rev 3558) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 20:04:06 UTC (rev 3559) @@ -224,14 +224,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(1, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the query solution stats. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println(stats.toString()); + if (log.isInfoEnabled()) + log.info(stats.toString()); // query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -314,14 +316,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(2, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: "+stats.toString()); + if (log.isInfoEnabled()) + log.info("start: "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -330,24 +334,12 @@ assertEquals(1L, stats.chunksOut.get()); } -// // validate the stats for the access path. -// { -// final BOpStats stats = statsMap.get(predId); -// assertNotNull(stats); -// System.err.println("pred : "+stats.toString()); -// -// // verify query solution stats details. -// assertEquals(1L, stats.chunksIn.get()); -// assertEquals(1L, stats.unitsIn.get()); -// assertEquals(1L, stats.unitsOut.get()); -// assertEquals(1L, stats.chunksOut.get()); -// } - // validate the stats for the join operator. { final BOpStats stats = statsMap.get(joinId); assertNotNull(stats); - System.err.println("join : "+stats.toString()); + if (log.isInfoEnabled()) + log.info("join : "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -525,14 +517,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(3, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("start: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -541,24 +535,12 @@ assertEquals(1L, stats.chunksOut.get()); } - // // validate the stats for the access path. - // { - // final BOpStats stats = statsMap.get(predId); - // assertNotNull(stats); - // System.err.println("pred : "+stats.toString()); - // - // // verify query solution stats details. - // assertEquals(1L, stats.chunksIn.get()); - // assertEquals(1L, stats.unitsIn.get()); - // assertEquals(1L, stats.unitsOut.get()); - // assertEquals(1L, stats.chunksOut.get()); - // } - // validate the stats for the 1st join operator. { final BOpStats stats = statsMap.get(joinId1); assertNotNull(stats); - System.err.println("join1: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join1: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -571,7 +553,8 @@ { final BOpStats stats = statsMap.get(joinId2); assertNotNull(stats); - System.err.println("join2: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join2: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-15 19:52:00 UTC (rev 3558) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-15 20:04:06 UTC (rev 3559) @@ -368,14 +368,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(1, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the query solution stats. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println(stats.toString()); + if (log.isInfoEnabled()) + log.info(stats.toString()); // query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -475,14 +477,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(2, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: "+stats.toString()); + if (log.isInfoEnabled()) + log.info("start: "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -491,24 +495,12 @@ assertEquals(1L, stats.chunksOut.get()); } -// // validate the stats for the access path. -// { -// final BOpStats stats = statsMap.get(predId); -// assertNotNull(stats); -// System.err.println("pred : "+stats.toString()); -// -// // verify query solution stats details. -// assertEquals(1L, stats.chunksIn.get()); -// assertEquals(1L, stats.unitsIn.get()); -// assertEquals(1L, stats.unitsOut.get()); -// assertEquals(1L, stats.chunksOut.get()); -// } - // validate the stats for the join operator. { final BOpStats stats = statsMap.get(joinId); assertNotNull(stats); - System.err.println("join : "+stats.toString()); + if (log.isInfoEnabled()) + log.info("join : "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -698,14 +690,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(3, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("start: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -714,24 +708,12 @@ assertEquals(1L, stats.chunksOut.get()); } - // // validate the stats for the access path. - // { - // final BOpStats stats = statsMap.get(predId); - // assertNotNull(stats); - // System.err.println("pred : "+stats.toString()); - // - // // verify query solution stats details. - // assertEquals(1L, stats.chunksIn.get()); - // assertEquals(1L, stats.unitsIn.get()); - // assertEquals(1L, stats.unitsOut.get()); - // assertEquals(1L, stats.chunksOut.get()); - // } - // validate the stats for the 1st join operator. { final BOpStats stats = statsMap.get(joinId1); assertNotNull(stats); - System.err.println("join1: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join1: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -744,7 +726,8 @@ { final BOpStats stats = statsMap.get(joinId2); assertNotNull(stats); - System.err.println("join2: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join2: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); // @todo depends on where the shards are. @@ -757,7 +740,8 @@ { final BOpStats stats = statsMap.get(sliceId); assertNotNull(stats); - System.err.println("slice: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("slice: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); // @todo? Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java 2010-09-15 19:52:00 UTC (rev 3558) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java 2010-09-15 20:04:06 UTC (rev 3559) @@ -407,14 +407,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(1, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the query solution stats. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println(stats.toString()); + if (log.isInfoEnabled()) + log.info(stats.toString()); // query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -514,14 +516,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(2, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: "+stats.toString()); + if (log.isInfoEnabled()) + log.info("start: "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -530,24 +534,12 @@ assertEquals(1L, stats.chunksOut.get()); } -// // validate the stats for the access path. -// { -// final BOpStats stats = statsMap.get(predId); -// assertNotNull(stats); -// System.err.println("pred : "+stats.toString()); -// -// // verify query solution stats details. -// assertEquals(1L, stats.chunksIn.get()); -// assertEquals(1L, stats.unitsIn.get()); -// assertEquals(1L, stats.unitsOut.get()); -// assertEquals(1L, stats.chunksOut.get()); -// } - // validate the stats for the join operator. { final BOpStats stats = statsMap.get(joinId); assertNotNull(stats); - System.err.println("join : "+stats.toString()); + if (log.isInfoEnabled()) + log.info("join : "+stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -737,14 +729,16 @@ // validate the stats map. assertNotNull(statsMap); assertEquals(3, statsMap.size()); - System.err.println(statsMap.toString()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); } // validate the stats for the start operator. { final BOpStats stats = statsMap.get(startId); assertNotNull(stats); - System.err.println("start: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("start: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -753,24 +747,12 @@ assertEquals(1L, stats.chunksOut.get()); } - // // validate the stats for the access path. - // { - // final BOpStats stats = statsMap.get(predId); - // assertNotNull(stats); - // System.err.println("pred : "+stats.toString()); - // - // // verify query solution stats details. - // assertEquals(1L, stats.chunksIn.get()); - // assertEquals(1L, stats.unitsIn.get()); - // assertEquals(1L, stats.unitsOut.get()); - // assertEquals(1L, stats.chunksOut.get()); - // } - // validate the stats for the 1st join operator. { final BOpStats stats = statsMap.get(joinId1); assertNotNull(stats); - System.err.println("join1: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join1: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); @@ -783,7 +765,8 @@ { final BOpStats stats = statsMap.get(joinId2); assertNotNull(stats); - System.err.println("join2: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("join2: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); // @todo depends on where the shards are. @@ -796,7 +779,8 @@ { final BOpStats stats = statsMap.get(sliceId); assertNotNull(stats); - System.err.println("slice: " + stats.toString()); + if (log.isInfoEnabled()) + log.info("slice: " + stats.toString()); // verify query solution stats details. assertEquals(1L, stats.chunksIn.get()); // @todo? This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-15 19:52:07
|
Revision: 3558 http://bigdata.svn.sourceforge.net/bigdata/?rev=3558&view=rev Author: thompsonbry Date: 2010-09-15 19:52:00 +0000 (Wed, 15 Sep 2010) Log Message: ----------- Added stress tests and tracked down the query termination problem. In fact, it was an assertion on totalAvailableChunkCount in RunState. The total #of available chunks can transiently become negative during query evaluation due to the interleaving of operators for a given query. The assertion has been removed (conditionally disabled). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-15 19:52:00 UTC (rev 3558) @@ -72,7 +72,7 @@ * operator. */ final public int altSinkChunksOut; - + /** * The statistics for the execution of the bop against the partition on the * service. @@ -119,4 +119,21 @@ this.taskStats = taskStats; } + public String toString() { + final StringBuilder sb = new StringBuilder(getClass().getName()); + sb.append("{queryId=" + queryId); + sb.append(",bopId=" + bopId); + sb.append(",partitionId=" + partitionId); + sb.append(",serviceId=" + serviceId); + if (cause != null) + sb.append(",cause=" + cause); + sb.append(",sinkId=" + sinkId); + sb.append(",sinkChunksOut=" + sinkChunksOut); + sb.append(",altSinkId=" + altSinkId); + sb.append(",altSinkChunksOut=" + altSinkChunksOut); + sb.append(",stats=" + taskStats); + sb.append("}"); + return sb.toString(); + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-15 19:52:00 UTC (rev 3558) @@ -42,7 +42,9 @@ import com.bigdata.bop.BOp; /** - * The run state for a {@link RunningQuery}. + * The run state for a {@link RunningQuery}. This class is NOT thread-safe. + * {@link RunningQuery} uses an internal lock to serialize requests against the + * public methods of this class. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ @@ -61,6 +63,16 @@ } /** + * Note: Due to concurrency, it is possible for an {@link IChunkMessage} to + * be accepted and the corresponding chunk task started, before a + * {@link RunState#startOp(StartOpMessage)} transition has been fully + * processed. This means that the {@link RunState#totalAvailableChunkCount} + * can become transiently negative. This flag disables asserts which would + * otherwise fail on legal transient negatives. + */ + static private boolean availableChunkCountMayBeNegative = true; + + /** * The query. */ private final RunningQuery query; @@ -76,19 +88,13 @@ private long nsteps = 0; /** - * The #of tasks for this query which have started but not yet halted and - * ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. + * The #of tasks for this query which have started but not yet halted. */ private long totalRunningTaskCount = 0; /** * The #of chunks for this query of which a running task has made available - * but which have not yet been accepted for processing by another task and - * ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. + * but which have not yet been accepted for processing by another task. */ private long totalAvailableChunkCount = 0; @@ -100,27 +106,22 @@ * <p> * The movement of the intermediate binding set chunks forms an acyclic * directed graph. This map is used to track the #of chunks available for - * each bop in the pipeline. When a bop has no more incoming chunks, we send - * an asynchronous message to all nodes on which that bop had executed - * informing the {@link QueryEngine} on that node that it should immediately - * release all resources associated with that bop. - * <p> - * This is guarded by the {@link #runningStateLock}. + * each {@link BOp} in the pipeline. When a {@link BOp} has no more incoming + * chunks, we send an asynchronous message to all nodes on which that + * {@link BOp} had executed informing the {@link QueryEngine} on that node + * that it should immediately release all resources associated with that + * {@link BOp}. */ private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); /** * A collection reporting on the #of instances of a given {@link BOp} which * are concurrently executing. - * <p> - * This is guarded by the {@link #runningStateLock}. */ private final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningTaskCountMap = new LinkedHashMap<Integer, AtomicLong>(); /** * A collection of the operators which have executed at least once. - * <p> - * This is guarded by the {@link #runningStateLock}. */ private final Set<Integer/* bopId */> startedSet = new LinkedHashSet<Integer>(); @@ -140,6 +141,9 @@ // query.lifeCycleSetUpQuery(); + if (log.isInfoEnabled()) + log.info(msg.toString()); + final Integer bopId = Integer.valueOf(msg.getBOpId()); totalAvailableChunkCount++; @@ -161,11 +165,6 @@ } - if (log.isInfoEnabled()) - log.info("queryId=" + queryId + ",totalRunningTaskCount=" - + totalRunningTaskCount + ",totalAvailableChunkCount=" - + totalAvailableChunkCount); - if (TableLog.tableLog.isInfoEnabled()) { /* * Note: RunState is only used by the query controller so this will @@ -180,13 +179,14 @@ TableLog.tableLog.info("\n\nqueryId=" + queryId + "\n"); // TableLog.tableLog.info(query.getQuery().toString()+"\n"); TableLog.tableLog.info(getTableHeader()); - TableLog.tableLog.info(getTableRow("startQ", serviceId, - -1/* shardId */, 1/* fanIn */)); + TableLog.tableLog + .info(getTableRow("startQ", serviceId, msg.getBOpId(), + -1/* shardId */, 1/* fanIn */, null/* stats */)); } - System.err.println("startQ : nstep="+nsteps+", bopId=" + bopId - + ",totalRunningTaskCount=" + totalRunningTaskCount - + ",totalAvailableTaskCount=" + totalAvailableChunkCount); +// System.err.println("startQ : nstep="+nsteps+", bopId=" + bopId +// + ",totalRunningTaskCount=" + totalRunningTaskCount +// + ",totalAvailableTaskCount=" + totalAvailableChunkCount); } @@ -206,7 +206,9 @@ totalRunningTaskCount++; assert totalRunningTaskCount >= 1 : "runningTaskCount=" - + totalRunningTaskCount + " :: msg=" + msg; + + totalRunningTaskCount + " :: runState=" + this + ", msg=" + + msg; + final boolean firstTime; { @@ -218,7 +220,7 @@ final long tmp = n.incrementAndGet(); assert tmp >= 0 : "runningTaskCount=" + tmp + " for bopId=" - + msg.bopId + " :: msg=" + msg; + + msg.bopId + " :: runState=" + this + ", msg=" + msg; firstTime = startedSet.add(bopId); // @@ -231,35 +233,38 @@ totalAvailableChunkCount -= msg.nchunks; - assert totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" - + totalAvailableChunkCount + " :: msg=" + msg; + assert availableChunkCountMayBeNegative || totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" + + totalAvailableChunkCount + " :: runState=" + this + ", msg=" + + msg; { AtomicLong n = availableChunkCountMap.get(bopId); if (n == null) - throw new AssertionError(); + availableChunkCountMap.put(bopId, n = new AtomicLong()); final long tmp = n.addAndGet(-msg.nchunks); - assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" - + msg.bopId + " :: msg=" + msg; + assert availableChunkCountMayBeNegative || tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.bopId + " :: runState=" + this + ", msg=" + msg; } - System.err.println("startOp: nstep="+nsteps+", bopId=" + bopId - + ",totalRunningTaskCount=" + totalRunningTaskCount - + ",totalAvailableChunkCount=" + totalAvailableChunkCount - + ",fanIn=" + msg.nchunks); +// System.err.println("startOp: nstep=" + nsteps + ", bopId=" + bopId +// + ",totalRunningTaskCount=" + totalRunningTaskCount +// + ",totalAvailableChunkCount=" + totalAvailableChunkCount +// + ",fanIn=" + msg.nchunks); if (TableLog.tableLog.isInfoEnabled()) { - TableLog.tableLog.info(getTableRow("startOp", msg.serviceId, - msg.partitionId, msg.nchunks/* fanIn */)); + TableLog.tableLog + .info(getTableRow("startOp", msg.serviceId, msg.bopId, + msg.partitionId, msg.nchunks/* fanIn */, null/* stats */)); } // check deadline. final long deadline = query.getDeadline(); + if (deadline < System.currentTimeMillis()) { if (log.isTraceEnabled()) @@ -271,6 +276,7 @@ query.cancel(true/* mayInterruptIfRunning */); } + return firstTime; } @@ -291,8 +297,9 @@ totalAvailableChunkCount += fanOut; - assert totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" - + totalAvailableChunkCount + " :: msg=" + msg; + assert availableChunkCountMayBeNegative || totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" + + totalAvailableChunkCount + " :: runState=" + this + + ", msg=" + msg; if (msg.sinkId != null) { AtomicLong n = availableChunkCountMap.get(msg.sinkId); @@ -302,8 +309,8 @@ final long tmp = n.addAndGet(msg.sinkChunksOut); - assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" - + msg.sinkId + " :: msg=" + msg; + assert availableChunkCountMayBeNegative || tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.sinkId + " :: runState=" + this + ", msg=" + msg; } @@ -317,8 +324,9 @@ final long tmp = n.addAndGet(msg.altSinkChunksOut); - assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" - + msg.altSinkId + " :: msg=" + msg; + assert availableChunkCountMayBeNegative || tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.altSinkId + " :: runState=" + this + ", msg=" + + msg; } @@ -328,7 +336,8 @@ totalRunningTaskCount--; assert totalRunningTaskCount >= 0 : "runningTaskCount=" - + totalRunningTaskCount + " :: msg=" + msg; + + totalRunningTaskCount + " :: runState=" + this + ", msg=" + + msg; { @@ -340,32 +349,35 @@ final long tmp = n.decrementAndGet(); assert tmp >= 0 : "runningTaskCount=" + tmp + " for bopId=" - + msg.bopId + " :: msg=" + msg; + + msg.bopId + " :: runState=" + this + ", msg=" + msg; } // Figure out if this operator is done. final boolean isDone = isOperatorDone(msg.bopId); - System.err.println("haltOp : nstep=" + nsteps + ", bopId=" + msg.bopId - + ",totalRunningTaskCount=" + totalRunningTaskCount - + ",totalAvailableTaskCount=" + totalAvailableChunkCount - + ",fanOut=" + fanOut); +// System.err.println("haltOp : nstep=" + nsteps + ", bopId=" + msg.bopId +// + ",totalRunningTaskCount=" + totalRunningTaskCount +// + ",totalAvailableTaskCount=" + totalAvailableChunkCount +// + ",fanOut=" + fanOut); if (TableLog.tableLog.isInfoEnabled()) { TableLog.tableLog.info(getTableRow("haltOp", msg.serviceId, - msg.partitionId, fanOut)); + msg.bopId, msg.partitionId, fanOut, msg.taskStats)); } - if (log.isTraceEnabled()) - log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId - + ",serviceId=" + query.getQueryEngine().getServiceUUID() - + ", nchunks=" + fanOut + " : totalRunningTaskCount=" - + totalRunningTaskCount + ", totalAvailableChunkCount=" - + totalAvailableChunkCount); +// if (log.isTraceEnabled()) +// log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId +// + ",serviceId=" + query.getQueryEngine().getServiceUUID() +// + ", nchunks=" + fanOut + " : totalRunningTaskCount=" +// + totalRunningTaskCount + ", totalAvailableChunkCount=" +// + totalAvailableChunkCount); - // test termination criteria + /* + * Test termination criteria + */ final long deadline = query.getDeadline(); + if (msg.cause != null) { // operator failed on this chunk. @@ -458,7 +470,7 @@ Arrays.sort(bopIds); // header 2. - sb.append("step\tlabel\tshardId\tfanIO\tavail\trun"); + sb.append("step\tlabel\tbopId\tshardId\tfanIO\tavail\trun"); for (int i = 0; i < bopIds.length; i++) { @@ -470,6 +482,10 @@ sb.append("\tserviceId"); + sb.append("\tbop"); + + sb.append("\tstats"); + sb.append('\n'); return sb.toString(); @@ -485,17 +501,23 @@ * @param label * The state change level (startQ, startOp, haltOp). * @param serviceId - * The node on which the operator is/was executed. + * The node on which the operator will be / was executed. + * @param bopId + * The identifier for the bop which will be / was executed. * @param shardId * The index partition against which the operator was running and * <code>-1</code> if the operator was not evaluated against a * specific index partition. - * @param * @param fanIO * The fanIn (startQ,startOp) or fanOut (haltOp). + * @param stats + * The statistics from the operator evaluation and + * <code>null</code> unless {@link #haltOp(HaltOpMessage)} is + * the invoker. */ private String getTableRow(final String label, final UUID serviceId, - final int shardId, final int fanIO) { + final int bopId, final int shardId, final int fanIO, + final BOpStats stats) { final StringBuilder sb = new StringBuilder(); @@ -503,6 +525,8 @@ sb.append('\t'); sb.append(label); sb.append('\t'); + sb.append(Integer.toString(bopId)); + sb.append('\t'); sb.append(Integer.toString(shardId)); sb.append('\t'); sb.append(Integer.toString(fanIO)); @@ -534,6 +558,15 @@ sb.append('\t'); sb.append(serviceId == null ? "N/A" : serviceId.toString()); + sb.append('\t'); + sb.append(query.bopIndex.get(bopId)); + + if (stats != null) { + // @todo use a multi-column version of stats. + sb.append('\t'); + sb.append(stats.toString()); + } + sb.append('\n'); return sb.toString(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 19:52:00 UTC (rev 3558) @@ -776,7 +776,7 @@ if (altSink != null && altSink != queryBuffer && !altSink.isEmpty()) { /* - * Handle alt sink output , sending appropriate chunk + * Handle alt sink output, sending appropriate chunk * message(s). * * Note: This maps output over shards/nodes in s/o. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java 2010-09-15 19:52:00 UTC (rev 3558) @@ -48,4 +48,10 @@ this.nchunks = nchunks; } + public String toString() { + return getClass().getName() + "{queryId=" + queryId + ",bopId=" + bopId + + ",partitionId=" + partitionId + ",serviceId=" + serviceId + + ",nchunks=" + nchunks + "}"; + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-15 19:52:00 UTC (rev 3558) @@ -211,8 +211,8 @@ log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n ## -# Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender log4j.appender.queryRunStateLog.Threshold=ALL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 15:54:52 UTC (rev 3557) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 19:52:00 UTC (rev 3558) @@ -38,6 +38,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import junit.framework.TestCase2; @@ -639,6 +640,12 @@ protected int doStressTest(final long timeout, final int ntrials, final int poolSize) throws Exception { + // start time in nanos. + final long begin = System.nanoTime(); + + // timeout in nanos. + final long nanos = TimeUnit.MILLISECONDS.toNanos(timeout); + final Executor service = new LatchedExecutor(jnl.getExecutorService(), poolSize); @@ -646,9 +653,12 @@ for (int i = 0; i < ntrials; i++) { + final int trial = i; final FutureTask<Void> ft = new FutureTask<Void>(new Runnable() { public void run() { try { + if (log.isInfoEnabled()) + log.info("trial=" + trial); test_query_join2(); } catch (Exception e) { // wrap exception. @@ -662,26 +672,30 @@ service.execute(ft); } - - Thread.sleep(timeout); - + int nerror = 0; int ncancel = 0; + int ntimeout = 0; int nsuccess = 0; for (FutureTask<Void> ft : futures) { - ft.cancel(true/* mayInterruptIfRunning */); + // remaining nanoseconds. + final long remaining = nanos - (System.nanoTime() - begin); + if (remaining <= 0) + ft.cancel(true/* mayInterruptIfRunning */); try { - ft.get(); + ft.get(remaining, TimeUnit.NANOSECONDS); nsuccess++; } catch (CancellationException ex) { ncancel++; + } catch (TimeoutException ex) { + ntimeout++; } catch (ExecutionException ex) { nerror++; } } final String msg = "nerror=" + nerror + ", ncancel=" + ncancel - + ", nsuccess=" + nsuccess; + + ", ntimeout=" + ntimeout + ", nsuccess=" + nsuccess; if(log.isInfoEnabled()) log.info(msg); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-15 15:55:04
|
Revision: 3557 http://bigdata.svn.sourceforge.net/bigdata/?rev=3557&view=rev Author: thompsonbry Date: 2010-09-15 15:54:52 +0000 (Wed, 15 Sep 2010) Log Message: ----------- Changed queryId from 'long' to UUID, which is what the existing scale-out query code is using. Moved the 2DS distributed query test suite into the bigdata-jini module since it has a dependency on JiniClient. Extracted RunState from RunningQuery into its own class. Provided a logger for a table view of the RunState of a query as it evolves. Added some stress tests for concurrent query. Currently working through a concurrency issue in com.bigdata.bop.queryEngine.RunState. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/QueryContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ServiceContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ShardContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata-jini/src/test/com/bigdata/bop/fed/jini/TestJiniFederatedQueryEngine.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -18,7 +18,7 @@ private static final long serialVersionUID = 1L; /** The identifier of the query. */ - final public long queryId; + final public UUID queryId; /** The identifier of the operator. */ final public int bopId; @@ -101,7 +101,7 @@ */ public HaltOpMessage( // - final long queryId, final int bopId, final int partitionId, + final UUID queryId, final int bopId, final int partitionId, final UUID serviceId, Throwable cause, // final Integer sinkId, final int sinkChunksOut,// final Integer altSinkId, final int altSinkChunksOut,// Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IChunkMessage.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -1,13 +1,10 @@ package com.bigdata.bop.engine; -import java.nio.ByteBuffer; -import java.util.concurrent.BlockingQueue; +import java.util.UUID; import com.bigdata.bop.BOp; -import com.bigdata.bop.IBindingSet; import com.bigdata.bop.fed.FederatedRunningQuery; -import com.bigdata.relation.accesspath.BlockingBuffer; -import com.bigdata.relation.accesspath.IAsynchronousIterator; +import com.bigdata.btree.raba.IRaba; import com.bigdata.service.ResourceService; /** @@ -32,7 +29,7 @@ IQueryClient getQueryController(); /** The query identifier. */ - long getQueryId(); + UUID getQueryId(); /** The identifier for the target {@link BOp}. */ int getBOpId(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -1,6 +1,7 @@ package com.bigdata.bop.engine; import java.rmi.RemoteException; +import java.util.UUID; import com.bigdata.bop.BindingSetPipelineOp; @@ -19,7 +20,7 @@ * @throws IllegalArgumentException * if there is no such query. */ - BindingSetPipelineOp getQuery(long queryId) throws RemoteException; + BindingSetPipelineOp getQuery(UUID queryId) throws RemoteException; /** * Notify the client that execution has started for some query, operator, Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryDecl.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -1,5 +1,7 @@ package com.bigdata.bop.engine; +import java.util.UUID; + import com.bigdata.bop.BindingSetPipelineOp; /** @@ -15,7 +17,7 @@ /** * The query identifier. */ - long getQueryId(); + UUID getQueryId(); /** * The query. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -55,24 +55,6 @@ */ IIndexManager getIndexManager(); -// /** -// * The timestamp or transaction identifier against which the query is -// * reading. -// * -// * @deprecated move into the individual operator. See -// * {@link BOp.Annotations#TIMESTAMP} -// */ -// long getReadTimestamp(); -// -// /** -// * The timestamp or transaction identifier against which the query is -// * writing. -// * -// * @deprecated moved into the individual operator. See -// * {@link BOp.Annotations#TIMESTAMP} -// */ -// long getWriteTimestamp(); - /** * Terminate query evaluation */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -1,6 +1,7 @@ package com.bigdata.bop.engine; import java.io.Serializable; +import java.util.UUID; import com.bigdata.bop.BOp; import com.bigdata.bop.fed.FederatedRunningQuery; @@ -21,7 +22,7 @@ /** * The query identifier. */ - private final long queryId; + private final UUID queryId; /** * The target {@link BOp}. @@ -42,7 +43,7 @@ return queryController; } - public long getQueryId() { + public UUID getQueryId() { return queryId; } @@ -59,12 +60,15 @@ } public LocalChunkMessage(final IQueryClient queryController, - final long queryId, final int bopId, final int partitionId, + final UUID queryId, final int bopId, final int partitionId, final IAsynchronousIterator<E[]> source) { if (queryController == null) throw new IllegalArgumentException(); + if (queryId == null) + throw new IllegalArgumentException(); + if (source == null) throw new IllegalArgumentException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryDecl.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -28,6 +28,7 @@ package com.bigdata.bop.engine; import java.io.Serializable; +import java.util.UUID; import com.bigdata.bop.BindingSetPipelineOp; @@ -44,18 +45,21 @@ */ private static final long serialVersionUID = 1L; - private final long queryId; + private final UUID queryId; private final IQueryClient clientProxy; private final BindingSetPipelineOp query; - public QueryDecl(final IQueryClient clientProxy, final long queryId, + public QueryDecl(final IQueryClient clientProxy, final UUID queryId, final BindingSetPipelineOp query) { if (clientProxy == null) throw new IllegalArgumentException(); + if (queryId == null) + throw new IllegalArgumentException(); + if (query == null) throw new IllegalArgumentException(); @@ -75,7 +79,7 @@ return clientProxy; } - public long getQueryId() { + public UUID getQueryId() { return queryId; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -28,10 +28,13 @@ package com.bigdata.bop.engine; import java.rmi.RemoteException; +import java.util.Comparator; import java.util.UUID; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.atomic.AtomicReference; @@ -412,13 +415,23 @@ /** * The currently executing queries. */ - final protected ConcurrentHashMap<Long/* queryId */, RunningQuery> runningQueries = new ConcurrentHashMap<Long, RunningQuery>(); + final protected ConcurrentHashMap<UUID/* queryId */, RunningQuery> runningQueries = new ConcurrentHashMap<UUID, RunningQuery>(); /** - * A priority queue of {@link RunningQuery}s having binding set chunks - * available for consumption. + * A queue of {@link RunningQuery}s having binding set chunks available for + * consumption. + * + * @todo Be careful when testing out a {@link PriorityBlockingQueue} here. + * First, that collection is intrinsically bounded (it is backed by an + * array) so it will BLOCK under heavy load and could be expected to + * have some resize costs if the queue size becomes too large. Second, + * either {@link RunningQuery} needs to implement an appropriate + * {@link Comparator} or we need to pass one into the constructor for + * the queue. */ - final private PriorityBlockingQueue<RunningQuery> priorityQueue = new PriorityBlockingQueue<RunningQuery>(); + final private BlockingQueue<RunningQuery> priorityQueue = new LinkedBlockingQueue<RunningQuery>(); +// final private BlockingQueue<RunningQuery> priorityQueue = new PriorityBlockingQueue<RunningQuery>( +// ); /** * @@ -480,7 +493,10 @@ * if the query engine is shutting down. */ protected void assertRunning() { - + + if (engineFuture.get() == null) + throw new IllegalStateException("Not initialized."); + if (shutdown) throw new IllegalStateException("Shutting down."); @@ -517,7 +533,7 @@ while (true) { try { final RunningQuery q = priorityQueue.take(); - final long queryId = q.getQueryId(); + final UUID queryId = q.getQueryId(); if (q.isCancelled()) continue; final IChunkMessage<IBindingSet> chunk = q.chunksIn.poll(); @@ -553,7 +569,7 @@ * chunk will be attached to the query and the query will be scheduled for * execution. * - * @param chunk + * @param msg * A chunk of intermediate results. * * @throws IllegalArgumentException @@ -561,25 +577,27 @@ * @throws IllegalStateException * if the chunk is not materialized. */ - void acceptChunk(final IChunkMessage<IBindingSet> chunk) { + protected void acceptChunk(final IChunkMessage<IBindingSet> msg) { - if (chunk == null) + if (msg == null) throw new IllegalArgumentException(); - if (!chunk.isMaterialized()) + if (!msg.isMaterialized()) throw new IllegalStateException(); - final RunningQuery q = runningQueries.get(chunk.getQueryId()); + final RunningQuery q = runningQueries.get(msg.getQueryId()); if(q == null) throw new IllegalStateException(); // add chunk to the query's input queue on this node. - q.acceptChunk(chunk); + q.acceptChunk(msg); + + assertRunning(); // add query to the engine's task queue. priorityQueue.add(q); - + } /** @@ -697,20 +715,6 @@ * IQueryClient */ -// public BOp getQuery(final long queryId) throws RemoteException { -// -// final RunningQuery q = runningQueries.get(queryId); -// -// if (q != null) { -// -// return q.getQuery(); -// -// } -// -// return null; -// -// } - public void startOp(final StartOpMessage msg) throws RemoteException { final RunningQuery q = runningQueries.get(msg.queryId); @@ -770,17 +774,20 @@ * needs to talk to a federation. There should be nothing DS * specific about the {@link FederatedQueryEngine}. */ - public RunningQuery eval(final long queryId, + public RunningQuery eval(final UUID queryId, final BindingSetPipelineOp query, final IChunkMessage<IBindingSet> msg) throws Exception { + if (queryId == null) + throw new IllegalArgumentException(); + if (query == null) throw new IllegalArgumentException(); if (msg == null) throw new IllegalArgumentException(); - if (queryId != msg.getQueryId()) // @todo use equals() to compare UUIDs. + if (!queryId.equals(msg.getQueryId())) throw new IllegalArgumentException(); final RunningQuery runningQuery = newRunningQuery(this, queryId, @@ -813,6 +820,8 @@ runningQuery.startQuery(msg); + acceptChunk(msg); + return runningQuery; } @@ -826,13 +835,13 @@ * @return The {@link RunningQuery} -or- <code>null</code> if there is no * query associated with that query identifier. */ - protected RunningQuery getRunningQuery(final long queryId) { + protected RunningQuery getRunningQuery(final UUID queryId) { return runningQueries.get(queryId); } - public BindingSetPipelineOp getQuery(final long queryId) { + public BindingSetPipelineOp getQuery(final UUID queryId) { final RunningQuery q = getRunningQuery(queryId); @@ -851,9 +860,12 @@ * @param runningQuery * The {@link RunningQuery}. */ - protected void putRunningQuery(final long queryId, + protected void putRunningQuery(final UUID queryId, final RunningQuery runningQuery) { + if (queryId == null) + throw new IllegalArgumentException(); + if (runningQuery == null) throw new IllegalArgumentException(); @@ -865,7 +877,7 @@ * Factory for {@link RunningQuery}s. */ protected RunningQuery newRunningQuery(final QueryEngine queryEngine, - final long queryId, final boolean controller, + final UUID queryId, final boolean controller, final IQueryClient clientProxy, final BindingSetPipelineOp query) { return new RunningQuery(this, queryId, true/* controller */, Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -0,0 +1,543 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 15, 2010 + */ + +package com.bigdata.bop.engine; + +import java.rmi.RemoteException; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.log4j.Logger; + +import com.bigdata.bop.BOp; + +/** + * The run state for a {@link RunningQuery}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +class RunState { + + static private final Logger log = Logger.getLogger(RunState.class); + + /** + * Inner class provides a 2nd logger used for tabular representations. + */ + static private class TableLog { + + static private final Logger tableLog = Logger.getLogger(TableLog.class); + + } + + /** + * The query. + */ + private final RunningQuery query; + + /** + * The query identifier. + */ + private final UUID queryId; + + /** + * The #of run state transitions which have occurred for this query. + */ + private long nsteps = 0; + + /** + * The #of tasks for this query which have started but not yet halted and + * ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long totalRunningTaskCount = 0; + + /** + * The #of chunks for this query of which a running task has made available + * but which have not yet been accepted for processing by another task and + * ZERO (0) if this is not the query coordinator. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private long totalAvailableChunkCount = 0; + + /** + * A map reporting the #of chunks available for each operator in the + * pipeline (we only report chunks for pipeline operators). The total #of + * chunks available across all operators in the pipeline is reported by + * {@link #totalAvailableChunkCount}. + * <p> + * The movement of the intermediate binding set chunks forms an acyclic + * directed graph. This map is used to track the #of chunks available for + * each bop in the pipeline. When a bop has no more incoming chunks, we send + * an asynchronous message to all nodes on which that bop had executed + * informing the {@link QueryEngine} on that node that it should immediately + * release all resources associated with that bop. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection reporting on the #of instances of a given {@link BOp} which + * are concurrently executing. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningTaskCountMap = new LinkedHashMap<Integer, AtomicLong>(); + + /** + * A collection of the operators which have executed at least once. + * <p> + * This is guarded by the {@link #runningStateLock}. + */ + private final Set<Integer/* bopId */> startedSet = new LinkedHashSet<Integer>(); + + public RunState(final RunningQuery query) { + + this.query = query; + + this.queryId = query.getQueryId(); + + // this.nops = query.bopIndex.size(); + + } + + public void startQuery(final IChunkMessage<?> msg) { + + nsteps++; + + // query.lifeCycleSetUpQuery(); + + final Integer bopId = Integer.valueOf(msg.getBOpId()); + + totalAvailableChunkCount++; + + assert totalAvailableChunkCount == 1 : "totalAvailableChunkCount=" + + totalAvailableChunkCount + " :: msg=" + msg; + + { + + AtomicLong n = availableChunkCountMap.get(bopId); + + if (n == null) + availableChunkCountMap.put(bopId, n = new AtomicLong()); + + final long tmp = n.incrementAndGet(); + + assert tmp == 1 : "availableChunkCount=" + tmp + " for bopId=" + + msg.getBOpId() + " :: msg=" + msg; + + } + + if (log.isInfoEnabled()) + log.info("queryId=" + queryId + ",totalRunningTaskCount=" + + totalRunningTaskCount + ",totalAvailableChunkCount=" + + totalAvailableChunkCount); + + if (TableLog.tableLog.isInfoEnabled()) { + /* + * Note: RunState is only used by the query controller so this will + * not do an RMI and the RemoteException will not be thrown. + */ + final UUID serviceId; + try { + serviceId = msg.getQueryController().getServiceUUID(); + } catch (RemoteException ex) { + throw new AssertionError(ex); + } + TableLog.tableLog.info("\n\nqueryId=" + queryId + "\n"); + // TableLog.tableLog.info(query.getQuery().toString()+"\n"); + TableLog.tableLog.info(getTableHeader()); + TableLog.tableLog.info(getTableRow("startQ", serviceId, + -1/* shardId */, 1/* fanIn */)); + } + + System.err.println("startQ : nstep="+nsteps+", bopId=" + bopId + + ",totalRunningTaskCount=" + totalRunningTaskCount + + ",totalAvailableTaskCount=" + totalAvailableChunkCount); + + } + + /** + * @return <code>true</code> if this is the first time we will evaluate the + * op. + */ + public boolean startOp(final StartOpMessage msg) { + + nsteps++; + + if (log.isTraceEnabled()) + log.trace(msg.toString()); + + final Integer bopId = Integer.valueOf(msg.bopId); + + totalRunningTaskCount++; + + assert totalRunningTaskCount >= 1 : "runningTaskCount=" + + totalRunningTaskCount + " :: msg=" + msg; + final boolean firstTime; + { + + AtomicLong n = runningTaskCountMap.get(bopId); + + if (n == null) + runningTaskCountMap.put(bopId, n = new AtomicLong()); + + final long tmp = n.incrementAndGet(); + + assert tmp >= 0 : "runningTaskCount=" + tmp + " for bopId=" + + msg.bopId + " :: msg=" + msg; + + firstTime = startedSet.add(bopId); + // + // // first evaluation pass for this operator. + // query.lifeCycleSetUpOperator(bopId); + // + // } + + } + + totalAvailableChunkCount -= msg.nchunks; + + assert totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" + + totalAvailableChunkCount + " :: msg=" + msg; + + { + + AtomicLong n = availableChunkCountMap.get(bopId); + + if (n == null) + throw new AssertionError(); + + final long tmp = n.addAndGet(-msg.nchunks); + + assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.bopId + " :: msg=" + msg; + + } + + System.err.println("startOp: nstep="+nsteps+", bopId=" + bopId + + ",totalRunningTaskCount=" + totalRunningTaskCount + + ",totalAvailableChunkCount=" + totalAvailableChunkCount + + ",fanIn=" + msg.nchunks); + + if (TableLog.tableLog.isInfoEnabled()) { + TableLog.tableLog.info(getTableRow("startOp", msg.serviceId, + msg.partitionId, msg.nchunks/* fanIn */)); + } + + // check deadline. + final long deadline = query.getDeadline(); + if (deadline < System.currentTimeMillis()) { + + if (log.isTraceEnabled()) + log.trace("expired: queryId=" + queryId + ", deadline=" + + deadline); + + query.future.halt(new TimeoutException()); + + query.cancel(true/* mayInterruptIfRunning */); + + } + return firstTime; + } + + /** + * Update termination criteria counters. @return <code>true</code> if the + * operator life cycle is over. + */ + public boolean haltOp(final HaltOpMessage msg) { + + nsteps++; + + if (log.isTraceEnabled()) + log.trace(msg.toString()); + + // chunks generated by this task. + final int fanOut = msg.sinkChunksOut + msg.altSinkChunksOut; + { + + totalAvailableChunkCount += fanOut; + + assert totalAvailableChunkCount >= 0 : "totalAvailableChunkCount=" + + totalAvailableChunkCount + " :: msg=" + msg; + + if (msg.sinkId != null) { + AtomicLong n = availableChunkCountMap.get(msg.sinkId); + if (n == null) + availableChunkCountMap + .put(msg.sinkId, n = new AtomicLong()); + + final long tmp = n.addAndGet(msg.sinkChunksOut); + + assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.sinkId + " :: msg=" + msg; + + } + + if (msg.altSinkId != null) { + + AtomicLong n = availableChunkCountMap.get(msg.altSinkId); + + if (n == null) + availableChunkCountMap.put(msg.altSinkId, + n = new AtomicLong()); + + final long tmp = n.addAndGet(msg.altSinkChunksOut); + + assert tmp >= 0 : "availableChunkCount=" + tmp + " for bopId=" + + msg.altSinkId + " :: msg=" + msg; + + } + + } + + // one less task is running. + totalRunningTaskCount--; + + assert totalRunningTaskCount >= 0 : "runningTaskCount=" + + totalRunningTaskCount + " :: msg=" + msg; + + { + + final AtomicLong n = runningTaskCountMap.get(msg.bopId); + + if (n == null) + throw new AssertionError(); + + final long tmp = n.decrementAndGet(); + + assert tmp >= 0 : "runningTaskCount=" + tmp + " for bopId=" + + msg.bopId + " :: msg=" + msg; + + } + + // Figure out if this operator is done. + final boolean isDone = isOperatorDone(msg.bopId); + + System.err.println("haltOp : nstep=" + nsteps + ", bopId=" + msg.bopId + + ",totalRunningTaskCount=" + totalRunningTaskCount + + ",totalAvailableTaskCount=" + totalAvailableChunkCount + + ",fanOut=" + fanOut); + + if (TableLog.tableLog.isInfoEnabled()) { + TableLog.tableLog.info(getTableRow("haltOp", msg.serviceId, + msg.partitionId, fanOut)); + } + + if (log.isTraceEnabled()) + log.trace("bopId=" + msg.bopId + ",partitionId=" + msg.partitionId + + ",serviceId=" + query.getQueryEngine().getServiceUUID() + + ", nchunks=" + fanOut + " : totalRunningTaskCount=" + + totalRunningTaskCount + ", totalAvailableChunkCount=" + + totalAvailableChunkCount); + + // test termination criteria + final long deadline = query.getDeadline(); + if (msg.cause != null) { + + // operator failed on this chunk. + log.error("Error: Canceling query: queryId=" + queryId + ",bopId=" + + msg.bopId + ",partitionId=" + msg.partitionId, msg.cause); + + query.future.halt(msg.cause); + + query.cancel(true/* mayInterruptIfRunning */); + + } else if (totalRunningTaskCount == 0 && totalAvailableChunkCount == 0) { + + // success (all done). + if (log.isTraceEnabled()) + log.trace("success: queryId=" + queryId); + + query.future.halt(query.getStats()); + + query.cancel(true/* mayInterruptIfRunning */); + + } else if (deadline < System.currentTimeMillis()) { + + if (log.isTraceEnabled()) + log.trace("expired: queryId=" + queryId + ", deadline=" + + deadline); + + query.future.halt(new TimeoutException()); + + query.cancel(true/* mayInterruptIfRunning */); + + } + return isDone; + } + + /** + * Return <code>true</code> the specified operator can no longer be + * triggered by the query. The specific criteria are that no operators which + * are descendants of the specified operator are running or have chunks + * available against which they could run. Under those conditions it is not + * possible for a chunk to show up which would cause the operator to be + * executed. + * + * @param bopId + * Some operator identifier. + * + * @return <code>true</code> if the operator can not be triggered given the + * current query activity. + * + * @throws IllegalMonitorStateException + * unless the {@link #runStateLock} is held by the caller. + */ + protected boolean isOperatorDone(final int bopId) { + + return PipelineUtility.isDone(bopId, query.getQuery(), query.bopIndex, + runningTaskCountMap, availableChunkCountMap); + + } + + /* + * Human readable representations of the query run state. + */ + + /** + * Human readable summary of the current {@link RunState}. + *<p> + * Note: You must holding the lock guarding the {@link RunState} to + * guarantee that will return a consistent representation. + */ + public String toString() { + + final StringBuilder sb = new StringBuilder(); + + sb.append(getClass().getName()); + sb.append("{nsteps=" + nsteps); + sb.append(",totalRunningTaskCount=" + totalRunningTaskCount); + sb.append(",totalAvailableTaskCount=" + totalAvailableChunkCount); + sb.append("}"); + + return sb.toString(); + + } + + private String getTableHeader() { + + final StringBuilder sb = new StringBuilder(); + + final Integer[] bopIds = query.bopIndex.keySet() + .toArray(new Integer[0]); + + Arrays.sort(bopIds); + + // header 2. + sb.append("step\tlabel\tshardId\tfanIO\tavail\trun"); + + for (int i = 0; i < bopIds.length; i++) { + + final Integer id = bopIds[i]; + + sb.append("\trun#" + id + "\tavail#" + id); + + } + + sb.append("\tserviceId"); + + sb.append('\n'); + + return sb.toString(); + + } + + /** + * Return a tabular representation of the query {@link RunState}. + *<p> + * Note: You must holding the lock guarding the {@link RunState} to + * guarantee that will return a consistent representation. + * + * @param label + * The state change level (startQ, startOp, haltOp). + * @param serviceId + * The node on which the operator is/was executed. + * @param shardId + * The index partition against which the operator was running and + * <code>-1</code> if the operator was not evaluated against a + * specific index partition. + * @param + * @param fanIO + * The fanIn (startQ,startOp) or fanOut (haltOp). + */ + private String getTableRow(final String label, final UUID serviceId, + final int shardId, final int fanIO) { + + final StringBuilder sb = new StringBuilder(); + + sb.append(Long.toString(nsteps)); + sb.append('\t'); + sb.append(label); + sb.append('\t'); + sb.append(Integer.toString(shardId)); + sb.append('\t'); + sb.append(Integer.toString(fanIO)); + sb.append('\t'); + sb.append(Long.toString(totalAvailableChunkCount)); + sb.append('\t'); + sb.append(Long.toString(totalRunningTaskCount)); + + final Integer[] bopIds = query.bopIndex.keySet() + .toArray(new Integer[0]); + + Arrays.sort(bopIds); + + for (int i = 0; i < bopIds.length; i++) { + + final Integer id = bopIds[i]; + + final AtomicLong nrunning = runningTaskCountMap.get(id); + + final AtomicLong navailable = availableChunkCountMap.get(id); + + sb.append("\t" + (navailable == null ? "N/A" : navailable.get())); + + sb.append("\t" + (nrunning == null ? "N/A" : nrunning.get())); + + } + + // Note: At the end to keep the table pretty. Will be null unless s/o. + sb.append('\t'); + sb.append(serviceId == null ? "N/A" : serviceId.toString()); + + sb.append('\n'); + + return sb.toString(); + + } + +} // class RunState Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -28,10 +28,7 @@ package com.bigdata.bop.engine; import java.rmi.RemoteException; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.Map; -import java.util.Set; import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -81,8 +78,10 @@ /** * The run state of the query and the result of the computation iff it * completes execution normally (without being interrupted, cancelled, etc). + * <p> + * Note: Package private in order to expose this field to {@link RunState}. */ - final private Haltable<Map<Integer,BOpStats>> future = new Haltable<Map<Integer,BOpStats>>(); + final /*private*/ Haltable<Map<Integer,BOpStats>> future = new Haltable<Map<Integer,BOpStats>>(); /** * The runtime statistics for each {@link BOp} in the query and @@ -96,7 +95,7 @@ final private QueryEngine queryEngine; /** The unique identifier for this query. */ - final private long queryId; + final private UUID queryId; /** * The query deadline. The value is the system clock time in milliseconds @@ -149,8 +148,8 @@ private final ConcurrentHashMap<BSBundle, Future<?>> operatorFutures = new ConcurrentHashMap<BSBundle, Future<?>>(); /** - * A lock guarding {@link RunState#runningTaskCount}, - * {@link RunState#availableChunkCount}, + * A lock guarding {@link RunState#totalRunningTaskCount}, + * {@link RunState#totalAvailableChunkCount}, * {@link RunState#availableChunkCountMap}. This is <code>null</code> unless * this is the query controller. * @@ -209,6 +208,19 @@ } /** + * Return the query deadline (the time at which it will terminate regardless + * of its run state). + * + * @return The query deadline (milliseconds since the epoch) and + * {@link Long#MAX_VALUE} if no explicit deadline was specified. + */ + public long getDeadline() { + + return deadline.get(); + + } + + /** * The class executing the query on this node. */ public QueryEngine getQueryEngine() { @@ -233,7 +245,7 @@ /** * The unique identifier for this query. */ - public long getQueryId() { + public UUID getQueryId() { return queryId; @@ -283,15 +295,16 @@ * {@link ITx#UNISOLATED} nor a read-write transaction * identifier. */ - public RunningQuery(final QueryEngine queryEngine, final long queryId, -// final long begin, - final boolean controller, - final IQueryClient clientProxy, final BindingSetPipelineOp query - ) { + public RunningQuery(final QueryEngine queryEngine, final UUID queryId, + final boolean controller, final IQueryClient clientProxy, + final BindingSetPipelineOp query) { if (queryEngine == null) throw new IllegalArgumentException(); + if (queryId == null) + throw new IllegalArgumentException(); + if (clientProxy == null) throw new IllegalArgumentException(); @@ -392,6 +405,12 @@ if (!msg.isMaterialized()) throw new IllegalStateException(); + if (isCancelled()) + throw new IllegalStateException("Cancelled"); + + if (isDone()) + throw new IllegalStateException("Done"); + // verify still running. future.halted(); @@ -399,252 +418,11 @@ chunksIn.add(msg); if (log.isDebugEnabled()) - log.debug("queryId=" + queryId + ", chunksIn.size()=" - + chunksIn.size() + ", msg=" + msg); + log.debug("chunksIn.size()=" + chunksIn.size() + ", msg=" + msg); } /** - * The run state for the query. - */ - static private class RunState { - - /** - * The query. - */ - private final RunningQuery query; - - /** - * The query identifier. - */ - private final long queryId; - - /** - * The #of tasks for this query which have started but not yet halted - * and ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private long runningTaskCount = 0; - - /** - * The #of chunks for this query of which a running task has made - * available but which have not yet been accepted for processing by - * another task and ZERO (0) if this is not the query coordinator. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private long availableChunkCount = 0; - - /** - * A map reporting the #of chunks available for each operator in the - * pipeline (we only report chunks for pipeline operators). The total - * #of chunks available across all operators in the pipeline is reported - * by {@link #availableChunkCount}. - * <p> - * The movement of the intermediate binding set chunks forms an acyclic - * directed graph. This map is used to track the #of chunks available - * for each bop in the pipeline. When a bop has no more incoming chunks, - * we send an asynchronous message to all nodes on which that bop had - * executed informing the {@link QueryEngine} on that node that it - * should immediately release all resources associated with that bop. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); - - /** - * A collection reporting on the #of instances of a given {@link BOp} - * which are concurrently executing. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningCountMap = new LinkedHashMap<Integer, AtomicLong>(); - - /** - * A collection of the operators which have executed at least once. - * <p> - * This is guarded by the {@link #runningStateLock}. - */ - private final Set<Integer/* bopId */> startedSet = new LinkedHashSet<Integer>(); - - public RunState(final RunningQuery query) { - - this.query = query; - - this.queryId = query.queryId; - - } - - public void startQuery(final IChunkMessage<?> msg) { - - query.lifeCycleSetUpQuery(); - - final Integer bopId = Integer.valueOf(msg.getBOpId()); - - availableChunkCount++; - { - AtomicLong n = availableChunkCountMap.get(bopId); - if (n == null) - availableChunkCountMap.put(bopId, n = new AtomicLong()); - n.incrementAndGet(); - } - - if (log.isInfoEnabled()) - log.info("queryId=" + queryId + ",runningTaskCount=" - + runningTaskCount + ",availableChunks=" - + availableChunkCount); - - System.err.println("startQ : bopId=" + bopId + ",running=" - + runningTaskCount + ",available=" + availableChunkCount); - - } - - public void startOp(final StartOpMessage msg) { - - final Integer bopId = Integer.valueOf(msg.bopId); - - runningTaskCount++; - { - AtomicLong n = runningCountMap.get(bopId); - if (n == null) - runningCountMap.put(bopId, n = new AtomicLong()); - n.incrementAndGet(); - if (startedSet.add(bopId)) { - // first evaluation pass for this operator. - query.lifeCycleSetUpOperator(bopId); - } - } - - availableChunkCount -= msg.nchunks; - - { - AtomicLong n = availableChunkCountMap.get(bopId); - if (n == null) - throw new AssertionError(); - n.addAndGet(-msg.nchunks); - } - - System.err.println("startOp: bopId=" + bopId + ",running=" - + runningTaskCount + ",available=" + availableChunkCount - + ",fanIn=" + msg.nchunks); - - // check deadline. - if (query.deadline.get() < System.currentTimeMillis()) { - - if (log.isTraceEnabled()) - log.trace("expired: queryId=" + queryId + ", deadline=" - + query.deadline); - - query.future.halt(new TimeoutException()); - - query.cancel(true/* mayInterruptIfRunning */); - - } - - } - - /** - * Update termination criteria counters. - */ - public void haltOp(final HaltOpMessage msg) { - - // chunks generated by this task. - final int fanOut = msg.sinkChunksOut + msg.altSinkChunksOut; - availableChunkCount += fanOut; - if (msg.sinkId != null) { - AtomicLong n = availableChunkCountMap.get(msg.sinkId); - if (n == null) - availableChunkCountMap - .put(msg.sinkId, n = new AtomicLong()); - n.addAndGet(msg.sinkChunksOut); - } - if (msg.altSinkId != null) { - AtomicLong n = availableChunkCountMap.get(msg.altSinkId); - if (n == null) - availableChunkCountMap.put(msg.altSinkId, - n = new AtomicLong()); - n.addAndGet(msg.altSinkChunksOut); - } - // one less task is running. - runningTaskCount--; - { - final AtomicLong n = runningCountMap.get(msg.bopId); - if (n == null) - throw new AssertionError(); - n.decrementAndGet(); - } - // Figure out if this operator is done. - if (isOperatorDone(msg.bopId)) { - /* - * No more chunks can appear for this operator so invoke its end - * of life cycle hook. - */ - query.lifeCycleTearDownOperator(msg.bopId); - } - System.err.println("haltOp : bopId=" + msg.bopId + ",running=" - + runningTaskCount + ",available=" + availableChunkCount - + ",fanOut=" + fanOut); - assert runningTaskCount >= 0 : "runningTaskCount=" - + runningTaskCount; - assert availableChunkCount >= 0 : "availableChunkCount=" - + availableChunkCount; - if (log.isTraceEnabled()) - log.trace("bopId=" + msg.bopId + ",partitionId=" - + msg.partitionId + ",serviceId=" - + query.queryEngine.getServiceUUID() + ", nchunks=" - + fanOut + " : runningTaskCount=" + runningTaskCount - + ", availableChunkCount=" + availableChunkCount); - // test termination criteria - if (msg.cause != null) { - // operator failed on this chunk. - log.error("Error: Canceling query: queryId=" + queryId - + ",bopId=" + msg.bopId + ",partitionId=" - + msg.partitionId, msg.cause); - query.future.halt(msg.cause); - query.cancel(true/* mayInterruptIfRunning */); - } else if (runningTaskCount == 0 && availableChunkCount == 0) { - // success (all done). - if (log.isTraceEnabled()) - log.trace("success: queryId=" + queryId); - query.future.halt(query.getStats()); - query.cancel(true/* mayInterruptIfRunning */); - } else if (query.deadline.get() < System.currentTimeMillis()) { - if (log.isTraceEnabled()) - log.trace("expired: queryId=" + queryId + ", deadline=" - + query.deadline); - query.future.halt(new TimeoutException()); - query.cancel(true/* mayInterruptIfRunning */); - } - } - - /** - * Return <code>true</code> the specified operator can no longer be - * triggered by the query. The specific criteria are that no operators - * which are descendants of the specified operator are running or have - * chunks available against which they could run. Under those conditions - * it is not possible for a chunk to show up which would cause the - * operator to be executed. - * - * @param bopId - * Some operator identifier. - * - * @return <code>true</code> if the operator can not be triggered given - * the current query activity. - * - * @throws IllegalMonitorStateException - * unless the {@link #runStateLock} is held by the caller. - */ - protected boolean isOperatorDone(final int bopId) { - - return PipelineUtility.isDone(bopId, query.getQuery(), - query.bopIndex, runningCountMap, availableChunkCountMap); - - } - - } // class RunState - - /** * Invoked once by the query controller with the initial * {@link IChunkMessage} which gets the query moving. */ @@ -656,17 +434,17 @@ if (msg == null) throw new IllegalArgumentException(); - if (msg.getQueryId() != queryId) // @todo equals() if queryId is UUID. + if (!queryId.equals(msg.getQueryId())) throw new IllegalArgumentException(); runStateLock.lock(); try { - + + lifeCycleSetUpQuery(); + runState.startQuery(msg); - queryEngine.acceptChunk(msg); - } finally { runStateLock.unlock(); @@ -693,8 +471,9 @@ try { - runState.startOp(msg); - + if (runState.startOp(msg)) + lifeCycleSetUpOperator(msg.bopId); + } finally { runStateLock.unlock(); @@ -729,7 +508,16 @@ try { - runState.haltOp(msg); + if (runState.haltOp(msg)) { + + /* + * No more chunks can appear for this operator so invoke its end + * of life cycle hook. + */ + + lifeCycleTearDownOperator(msg.bopId); + + } } finally { @@ -996,19 +784,49 @@ altSinkChunksOut += handleOutputChunk(altSinkId, altSink); } - clientProxy.haltOp(new HaltOpMessage(queryId, bopId, - partitionId, serviceId, null/* cause */, - sinkId, sinkChunksOut, altSinkId, - altSinkChunksOut, context.getStats())); + final HaltOpMessage msg = new HaltOpMessage(queryId, bopId, + partitionId, serviceId, null/* cause */, sinkId, + sinkChunksOut, altSinkId, altSinkChunksOut, context + .getStats()); + clientProxy.haltOp(msg); } catch (Throwable t) { - try { - clientProxy.haltOp(new HaltOpMessage(queryId, - bopId, partitionId, serviceId, - t/* cause */, sinkId, sinkChunksOut, altSinkId, - altSinkChunksOut, context.getStats())); - } catch (RemoteException e) { - cancel(true/* mayInterruptIfRunning */); - log.error("queryId=" + queryId + ", bopId=" + bopId, e); + /* + * Mark the query as halted on this node regardless of whether + * we are able to communicate with the query controller. + * + * Note: Invoking halt(t) here will log an error. This logged + * error message is necessary in order to catch errors in + * clientProxy.haltOp() (above and below). + */ + // Note: uncomment if paranoid about masked errors after the 1st reported error. +// log.error("queryId=" + queryId + ", bopId=" + bopId, t); + + if (t == future.halt(t)) { + /* + * Send the halt message to the query controller. + * + * Note: Since the exception return from halt(t) is our + * exception, we are responsible for communicating this + * exception to the query controller. If that message does + * not arrive then the query controller will not know that + * we have terminated the query. This can result in a long + * running query which must be explicitly cancelled on the + * query controller. + * + * @todo if we are unable to send the message to the query + * controller then we could retry each time an error is + * thrown for this query. + */ + final HaltOpMessage msg = new HaltOpMessage(queryId, bopId, + partitionId, serviceId, t/* cause */, sinkId, + sinkChunksOut, altSinkId, altSinkChunksOut, context + .getStats()); + try { + clientProxy.haltOp(msg); + } catch (RemoteException e) { + cancel(true/* mayInterruptIfRunning */); + log.error("queryId=" + queryId + ", bopId=" + bopId, e); + } } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -22,7 +22,7 @@ private static final long serialVersionUID = 1L; /** The query identifier. */ - final public long queryId; + final public UUID queryId; /** The operator identifier. */ final public int bopId; @@ -39,7 +39,7 @@ */ final public int nchunks; - public StartOpMessage(final long queryId, final int opId, + public StartOpMessage(final UUID queryId, final int opId, final int partitionId, final UUID serviceId, final int nchunks) { this.queryId = queryId; this.bopId = opId; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-15 14:30:14 UTC (rev 3556) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-15 15:54:52 UTC (rev 3557) @@ -70,6 +70,11 @@ .getLogger(FederatedQueryEngine.class); /** + * The {@link UUID} associated with this service. + */ + private final UUID serviceUUID; + + /** * The {@link IBigdataFederation} iff running in scale-out. * <p> * Note: The {@link IBigdataFederation} is required in scale-out in order to @@ -99,7 +104,7 @@ @Override public UUID getServiceUUID() { - return fed.getServiceUUID(); + return serviceUUID; } @@ -127,7 +132,7 @@ * {@inheritDoc} */ @Override - protected FederatedRunningQuery getRunningQuery(final long queryId) { + protected FederatedRunningQuery getRunningQuery(final UUID queryId) { return (FederatedRunningQuery) super.getRunningQuery(queryId); @@ -147,10 +152,10 @@ */ public FederatedQueryEngine(final DataService dataService) { - this(dataService.getFederation(), + this(dataService.getServiceUUID(), dataService.getFederation(), new DelegateIndexManager(dataService), dataService .getResourceManager().getResourceService()); - + } /** @@ -164,6 +169,7 @@ * @param resourceService */ public FederatedQueryEngine(// + final UUID thisService, final IBigdataFederation<?> fed,// final IIndexManager indexManager,// final ManagedResourceService resourceService// @@ -179,6 +185,8 @@ this.fed = fed; + this.serviceUUID = thisService; + this.resourceService = resourceService; } @@ -277,6 +285,7 @@ if(!accept(msg)) { if(log.isDebugEnabled()) log.debug("dropping: " + msg); + continue; } if(log.isDebugEnabled()) log.debug("accepted: " + msg); @@ -287,7 +296,7 @@ * etc. */ FederatedQueryEngine.this - .bufferReady((IChunkMessage) msg); + .acceptChunk((IChunkMessage) msg); } catch(Throwable t) { if(InnerCause.isInnerCause(t, InterruptedException.class)) { log.warn("Interrupted."); @@ -318,7 +327,7 @@ */ private boolean accept(final IChunkMessage<?> msg) throws RemoteException { - final long queryId = msg.getQueryId(); + final UUID queryId = msg.getQueryId(); // lookup query by id. FederatedRunningQuery q = getRunningQuery(queryId); @@ -385,7 +394,7 @@ public void declareQuery(final IQueryDecl queryDecl) { - final long queryId = queryDecl.getQueryId(); + final UUID queryId = queryDecl.getQueryId(); putRunningQuery(queryId, newRunningQuery(this, queryId, false/* controller */, queryDecl.getQueryController(), @@ -411,7 +420,7 @@ */ @Override protected FederatedRunningQuery newRunningQuery( - final QueryEngine queryEngine, final long queryId, + final QueryEngine queryEngine, final UUID queryId, final boolean controller, final IQueryC... [truncated message content] |
From: <sgo...@us...> - 2010-09-15 14:30:23
|
Revision: 3556 http://bigdata.svn.sourceforge.net/bigdata/?rev=3556&view=rev Author: sgossard Date: 2010-09-15 14:30:14 +0000 (Wed, 15 Sep 2010) Log Message: ----------- [maven_scaleout] : Finished breaking dependency cycles with 'com.bigdata.util' by moving Util class to 'com.bigdata.jini' package. Also moved unit test TestChecksumUtility to 'com.bigdata.io', should have been moved in r3547 with ChecksumUtility class. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/DiscoveryTool.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/process/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/quorum/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/shard/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceImpl.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestAll.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestAll.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/Util.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/io/TestChecksumUtility.java Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/util/TestChecksumUtility.java Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/DiscoveryTool.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/DiscoveryTool.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/disco/DiscoveryTool.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -24,7 +24,7 @@ */ package com.bigdata.disco; -import com.bigdata.util.Util; +import com.bigdata.jini.Util; import com.sun.jini.config.Config; import net.jini.config.Configuration; @@ -34,7 +34,6 @@ import net.jini.core.discovery.LookupLocator; import net.jini.core.entry.Entry; import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.ConstrainableLookupLocator; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/executor/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -28,8 +28,8 @@ import static com.bigdata.executor.Constants.*; import com.bigdata.attr.ServiceInfo; +import com.bigdata.jini.Util; import com.bigdata.util.BootStateUtil; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -37,26 +37,20 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import com.sun.jini.config.Config; import com.sun.jini.start.LifeCycle; import com.sun.jini.thread.ReadyState; import net.jini.config.Configuration; import net.jini.config.ConfigurationProvider; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.jeri.BasicILFactory; @@ -65,7 +59,6 @@ import net.jini.jeri.ServerEndpoint; import net.jini.jeri.tcp.TcpServerEndpoint; import net.jini.lookup.JoinManager; -import net.jini.lookup.ServiceDiscoveryManager; import java.io.IOException; import java.rmi.RemoteException; Copied: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/Util.java (from rev 3547, branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java) =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/Util.java (rev 0) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/Util.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -0,0 +1,527 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ + +package com.bigdata.jini; + +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.LogUtil; +import com.bigdata.service.proxy.ClientFuture; +import com.bigdata.service.proxy.RemoteFuture; +import com.bigdata.service.proxy.RemoteFutureImpl; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +import com.sun.jini.config.Config; +import com.sun.jini.thread.InterruptedStatusThread; +import net.jini.config.Configuration; +import net.jini.config.ConfigurationException; +import net.jini.config.NoSuchEntryException; +import net.jini.core.lookup.ServiceID; +import net.jini.discovery.DiscoveryManagement; +import net.jini.discovery.DiscoveryGroupManagement; +import net.jini.discovery.DiscoveryLocatorManagement; +import net.jini.discovery.LookupDiscoveryManager; +import net.jini.export.Exporter; +import net.jini.jeri.BasicILFactory; +import net.jini.jeri.BasicJeriExporter; +import net.jini.jeri.InvocationLayerFactory; +import net.jini.jeri.ServerEndpoint; +import net.jini.jeri.tcp.TcpServerEndpoint; + +import net.jini.lookup.JoinManager; +import net.jini.lookup.ServiceDiscoveryEvent; +import net.jini.lookup.ServiceDiscoveryManager; + +import java.io.IOException; +import java.rmi.server.ExportException; +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Future; + +/** + * Miscellaneous, convenient utility methods. + * + */ + +//TODO: Not the best home for this class, but having it in 'com.bigdata.util' caused circular package dependencies that were giving me issues. -gossard +public class Util { + + public static <T extends Comparable<T>> T max(final T... elements){ + T max = elements[0]; + for (final T element : elements) { + if(0 < element.compareTo(max)) { + max = element; + } + } + return max; + } + + public static <T extends Comparable<T>> T min(final T... elements){ + T min = elements[0]; + for (final T element : elements) { + if(0 > element.compareTo(min)) { + min = element; + } + } + return min; + } + + /* Convenience method that can be called when a service exits, or + * when failure occurs during the service's initialization process. + * This method un-does any work that may have already been completed; + * for example, un-exports the service if it has already been + * exported, closes any open sockets or file descriptors, terminates + * threads that may have been started, etc. + * <p> + * Note that multiple versions of this method are provided. One version + * is intended to be used by entities that act only as a service (that + * is, entities that export a proxy and use a <code>JoinManager</code>). + * One version is intended to be used by entities that act only as a client + * (that is, entites that use a <code>ServiceDiscoveryManager</code>). + * And the final version can be used by entities that act as both a + * service and as a client. + */ + public static void cleanupOnExit + (Object innerProxy, + Exporter serverExporter, + JoinManager joinManager, + DiscoveryManagement discoveryManager) + { + cleanupOnExit(innerProxy, serverExporter, null, joinManager, + null, discoveryManager); + + } + + public static void cleanupOnExit + (ServiceDiscoveryManager serviceDiscoveryManager, + DiscoveryManagement discoveryManager) + { + cleanupOnExit(null, null, null, null, + serviceDiscoveryManager, discoveryManager); + } + + + public static void cleanupOnExit + (Object innerProxy, + Exporter serverExporter, + JoinManager joinManager, + ServiceDiscoveryManager serviceDiscoveryManager, + DiscoveryManagement discoveryManager) + { + cleanupOnExit(innerProxy, serverExporter, null, joinManager, + serviceDiscoveryManager, discoveryManager); + } + + public static void cleanupOnExit + (Object innerProxy, + Exporter serverExporter, + Set<Exporter> futureExporters, + JoinManager joinManager, + ServiceDiscoveryManager serviceDiscoveryManager, + DiscoveryManagement discoveryManager) + { + if(innerProxy != null) { + try { + if(serverExporter != null) serverExporter.unexport(true); + } catch(Throwable t) { } + } + + if(futureExporters != null) { + for(Exporter exporter : futureExporters) { + if(exporter != null) { + try { + exporter.unexport(true); + exporter = null; + } catch(Throwable t) { } + } + } + synchronized(futureExporters) { + for(Iterator<Exporter> itr = futureExporters.iterator(); + itr.hasNext(); ) + { + itr.next(); + itr.remove(); + } + } + } + + if(joinManager != null) { + try { + joinManager.terminate(); + } catch(Throwable t) { } + } + + if(serviceDiscoveryManager != null) { + try { + serviceDiscoveryManager.terminate(); + } catch(Throwable t) { } + } + + if(discoveryManager != null) { + try { + discoveryManager.terminate(); + } catch(Throwable t) { } + } + } + + + /** + * Unexports the remote object that was exported by the given + * <code>Exporter</code> parameter; which removes the object + * from the RMI runtime so that the object can no longer accept + * incoming remote calls.er accept incoming RMI calls. + * <P> + * This method first makes an attempt to unexport the object + * 'gracefully'. That is, for a finite period of time, an attempt + * is made to allow all calls to the object that are in progress, + * or pending, to complete before the object is actually unexported. + * If, after that finite period of time, the object has not been + * successfully unexported, the object is then 'forcibly' unexported; + * that is, the object is unexported even if there are calls to + * the object that are in progress or still pending. + * <P> + * Upon successfully unexporting the given <code>Exporter</code>, + * <code>true</code> is returned. If the given <code>Exporter</code> + * cannot be unexported, or if the value input for that parameter + * is <code>null</code> or has not exported any interfaces, then + * <code>false</code> is returned. + */ + public static boolean unexportRemoteObject(Exporter exporter) { + if (exporter == null) return false; + + // delay no more than 1 minute + final long endTime = System.currentTimeMillis() + (1L*60L*1000L); + boolean unexported = false; + try { + // Unexport only if there are no pending or in-progress calls + while (!unexported && System.currentTimeMillis() < endTime) { + unexported = exporter.unexport(false);//do not force + if (!unexported) Thread.yield(); + }//end loop + if (!unexported) unexported = exporter.unexport(true);//force + } catch ( IllegalStateException e ) { + // Thrown if no object has been exported with the + // Exporter instance + return false; + } + return unexported; + } + + + /** + * Convenience method that can be called in an entity's constructor + * when failure occurs during the initialization process. This + * method simply rethrows the given <code>Throwable</code> so the + * constructor doesn't have to. + */ + public static void handleInitThrowable(Throwable t, Logger logger) + throws IOException, + ConfigurationException + { + if( logger != null ) { + logger.log(Level.FATAL, "initialization failure ... ", t); + } else { + System.err.println("FATAL: initialization failure ... "+t); + }//endif + if (t instanceof IOException) { + throw (IOException)t; + } else if (t instanceof ConfigurationException) { + throw (ConfigurationException)t; + } else if (t instanceof RuntimeException) { + throw (RuntimeException)t; + } else if (t instanceof Error) { + throw (Error)t; + }//endif + } + + /** + * Convenience method that returns a <code>String</code> containing + * a common-separated list the elements (group names) of the given + * array. + */ + public static String writeGroupArrayToString(String[] groups) { + if(groups == null) { + return new String("[ALL_GROUPS]"); + }//endif + if(groups.length <= 0) { + return new String("[]"); + }//endif + StringBuffer strBuf = null; + if(groups[0].compareTo("") == 0) { + strBuf = new StringBuffer("[The PUBLIC Group"); + } else { + strBuf = new StringBuffer("["+groups[0]); + }//endif + for(int i=1;i<groups.length;i++) { + if(groups[i].compareTo("") == 0) { + strBuf.append(", The PUBLIC Group"); + } else { + strBuf.append(", ").append(groups[i]); + }//endif + }//end loop + strBuf.append("]"); + return strBuf.toString(); + } + + /** + * Convenience method that returns a <code>String</code> containing + * a common-separated list the elements (locators) of the given + * array. + */ + public static String writeArrayElementsToString(Object[] arr) { + if(arr == null) return new String("[]"); + if(arr.length <= 0) { + return new String("[]"); + }//endif + StringBuffer strBuf = new StringBuffer("["+arr[0]); + for(int i=1;i<arr.length;i++){ + strBuf.append(", ").append(arr[i]); + }//end loop + strBuf.append("]"); + return strBuf.toString(); + } + + /** + * Convenience method to simplify the throwing of exceptions with embedded + * causes (avoids having to cast the return value of Throwable.initCause + * back to the exception's type). Use as follows: + * <pre> + * throw Util.initCause(new SomeException("foo"), cause); + * </pre> + */ + public static <T extends Throwable> T initCause(T t, Throwable cause) { + t.initCause(cause); + return t; + } + + /** + * Verifies that all non-<code>null</code> elements of the given + * <code>Collection</code> are assignable to the specified type, + * throwing a <code>ClassCastException</code> if any are not. + */ + public static void checkElementTypes(Collection<?> c, Class<?> type) { + for (Object elt : c) { + if (!type.isInstance(elt)) { + throw new ClassCastException( + elt + " not assignable to " + type); + } + } + } + + /** + * Returns a UUID with the same bit value as the given + * <code>ServiceID</code>. + */ + public static UUID toUUID(ServiceID serviceId) { + return new UUID( serviceId.getMostSignificantBits(), + serviceId.getLeastSignificantBits() ); + } + + /** + * Returns a string representation of the given + * <code>ServiceDiscoveryEvent</code> (since + * <code>ServiceDiscoveryEvent</code> doesn't define + * its own <code>toString</code> method). + */ + public static String eventToString(ServiceDiscoveryEvent event) { + return "ServiceDiscoveryEvent[source=" + event.getSource() + + ",preEventItem=" + event.getPreEventServiceItem() + + ",postEventItem=" + event.getPostEventServiceItem() + "]"; + } + + /** + * Convenience method that encapsulates common functions that services + * or clients may wish to perform to be able to discover lookup services + * in the system. + * <p> + * This method retrieves and returns a lookup discovery manager from + * the given <code>Configuration</code>. If no lookup discovery manager + * has been configured, this method will return an instance of the + * <code>LookupDiscoveryManager</code> helper utility class, + * initialized to discover NO_GROUPS and no locators. When such a + * discovery manager is returned, the calling entity can call the + * <code>setGroups</code> and/or </code>setLocators</code> method + * to initiate the lookup discovery process. + * <p> + * Note that this method expects that the discovery manager + * that has been configured is an instance of both + * <code>DiscoveryGroupManagement</code> and + * <code>DiscoveryLocatorManagement</code>. + * + * @param config The calling service's <code>Configuration</code> + * from which this method will retrieve the items + * needed to perform the desired initialization. + * + * @param componentName <code>String</code> whose value is the name of + * the <i>component</i> used to index the calling + * service's configuration <i>entries</i>. + * + * @param entryName <code>String</code> whose value is the name of + * the configuration entry that references the + * the desired lookup discovery manager instance + * specified in the configuration. + * + * @return An instance of <code>DiscoveryManagement</code> that supports + * both group and locator discovery; where the instance returned + * is either retrieved from the given <code>Configuration</code>, + * or is a default instance of <code>LookupDiscoveryManager</code>. + * + * @throws <code>ConfigurationException</code> when there is a problem + * retrieving the desired entry from the configuration. + * + * @throws IOException when there is a problem with multicast discovery. + */ + public static DiscoveryManagement getDiscoveryManager + (Configuration config, + String componentName, + String entryName ) + throws ConfigurationException, + IOException + { + // The discovery manager must be an instance of both + // DiscoveryGroupManagement and DiscoveryLocatorManagement, so that + // the groupsToJoin and locatorsToJoin can both be retrieved from + //the discovery manager and displayed. + DiscoveryManagement dMgr; + try { + dMgr = (DiscoveryManagement)Config.getNonNullEntry + (config, + componentName, + entryName, + DiscoveryManagement.class); + if( !(dMgr instanceof DiscoveryGroupManagement) ) { + throw new ConfigurationException + (entryName + " entry must " + +"implement DiscoveryGroupManagment"); + } + if( !(dMgr instanceof DiscoveryLocatorManagement) ) { + throw new ConfigurationException + (entryName + " entry must " + +"implement DiscoveryLocatorManagement"); + } + } catch (NoSuchEntryException e) { + return ( new LookupDiscoveryManager + (ConfigDeployUtil.getGroupsToDiscover(), + ConfigDeployUtil.getLocatorsToDiscover(), + null, config) ); + } + return dMgr; + } + + /** + * Retrieves and returns a lookup discovery manager from the given + * <code>Configuration</code>, using a default entry name of + * <i>discoveryManager</i>. + */ + public static DiscoveryManagement getDiscoveryManager + (Configuration config, + String componentName) + throws ConfigurationException, + IOException + { + return getDiscoveryManager(config, componentName, "discoveryManager"); + } + + public static Exporter getExporter(Configuration config, + String componentName, + String entryName, + boolean defaultEnableDgc, + boolean defaultKeepAlive) + throws ConfigurationException + { + if(config == null) { + throw new NullPointerException("null config"); + } + if(componentName == null) { + throw new NullPointerException("null componentName"); + } + if(entryName == null) { + throw new NullPointerException("null entryName"); + } + Exporter exporter = null; + ServerEndpoint endpoint = TcpServerEndpoint.getInstance(0); + InvocationLayerFactory ilFactory = new BasicILFactory(); + Exporter defaultExporter = + new BasicJeriExporter + (endpoint, ilFactory, defaultEnableDgc, defaultKeepAlive); + exporter = + (Exporter)config.getEntry + (componentName, entryName, Exporter.class, defaultExporter); + if(exporter == null) { + throw new ConfigurationException("null exporter"); + } + return exporter; + } + + public static <E> Future<E> wrapFuture(Exporter exporter, + Future<E> future) + throws ExportException + { + if(exporter == null) { + throw new NullPointerException("null exporter"); + } + if(future == null) { + throw new NullPointerException("null future"); + } + + // 1. Wrap the given future in a remote (proxyable) object + // 2. Export the remote object to produce a dynamic proxy (stub) + // 3. Return the proxied future (the stub) wrapped in a Serializable + // wrapper class implementing the Future interface. + + final RemoteFuture<E> impl = new RemoteFutureImpl<E>(future); + + final RemoteFuture<E> stub = (RemoteFuture<E>)exporter.export(impl); + + return new ClientFuture<E>(stub); + } + + public static class WaitOnInterruptThread extends InterruptedStatusThread { + private Logger logger; + public WaitOnInterruptThread(final Logger logger) { + super("WaitOnInterruptThread"); + setDaemon(true); + this.logger = (logger == null ? + LogUtil.getLog4jLogger((this.getClass()).getName()) : + logger); + } + public void run() { + while (!hasBeenInterrupted()) { + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException e) { + if( logger.isDebugEnabled() ) { + logger.log(Level.DEBUG, + "Util.WaitOnInterruptThread: " + +"interrupt received"); + } + } + } + } + } +} Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/jini/Util.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -28,11 +28,11 @@ import static com.bigdata.loadbalancer.Constants.*; import com.bigdata.attr.ServiceInfo; +import com.bigdata.jini.Util; import com.bigdata.jini.start.BigdataZooDefs; import com.bigdata.service.Event; import com.bigdata.service.IServiceShutdown.ShutdownType; import com.bigdata.util.BootStateUtil; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -40,29 +40,22 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; -import com.sun.jini.config.Config; import com.sun.jini.start.LifeCycle; import com.sun.jini.thread.ReadyState; import net.jini.config.Configuration; import net.jini.config.ConfigurationProvider; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.jeri.BasicILFactory; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -32,12 +32,12 @@ import com.bigdata.btree.ResultSet; import com.bigdata.btree.filter.IFilterConstructor; import com.bigdata.btree.proc.IIndexProcedure; +import com.bigdata.jini.Util; import com.bigdata.jini.start.BigdataZooDefs; import com.bigdata.jini.util.ConfigMath; import com.bigdata.mdi.PartitionLocator; import com.bigdata.service.IServiceShutdown.ShutdownType; import com.bigdata.util.BootStateUtil; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -45,7 +45,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; import com.sun.jini.config.Config; @@ -55,19 +54,14 @@ import net.jini.config.Configuration; import net.jini.config.ConfigurationProvider; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.lookup.JoinManager; @@ -590,7 +584,7 @@ String[] groups = ((DiscoveryGroupManagement)ldm).getGroups(); LookupLocator[] locs = ((DiscoveryLocatorManagement)ldm).getLocators(); logger.log(Level.INFO, killStr+" [groups=" - +Util.writeGroupArrayToString(groupsToJoin) + + Util.writeGroupArrayToString(groupsToJoin) +", locators=" +Util.writeArrayElementsToString(locatorsToJoin)+"]"); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/process/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/process/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/process/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -31,9 +31,9 @@ import com.bigdata.boot.ProcessEventListener; import com.bigdata.boot.ProcessState; import com.bigdata.boot.ProcessStateChangeEvent; +import com.bigdata.jini.Util; import com.bigdata.util.BootStateUtil; import com.bigdata.util.Format; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/quorum/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/quorum/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/quorum/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -29,9 +29,9 @@ import com.bigdata.attr.QuorumPeerAttr; import com.bigdata.attr.ServiceInfo; +import com.bigdata.jini.Util; import com.bigdata.service.QuorumPeerService; import com.bigdata.service.QuorumPeerService.QuorumPeerData; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -45,9 +45,7 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import com.sun.jini.config.Config; import com.sun.jini.start.LifeCycle; -import com.sun.jini.thread.InterruptedStatusThread; import com.sun.jini.thread.ReadyState; import net.jini.config.Configuration; @@ -57,7 +55,6 @@ import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; import net.jini.core.lookup.ServiceItem; import net.jini.core.lookup.ServiceTemplate; @@ -65,7 +62,6 @@ import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.jeri.BasicILFactory; @@ -88,18 +84,12 @@ import java.io.ObjectOutputStream; import java.io.ObjectStreamClass; import java.io.PrintWriter; -import java.io.Serializable; import java.net.InetAddress; -import java.net.Inet4Address; import java.net.InetSocketAddress; import java.net.InterfaceAddress; import java.net.NetworkInterface; -import java.net.SocketException; -import java.net.UnknownHostException; import java.rmi.RemoteException; -import java.rmi.server.ExportException; import java.util.ArrayList; -import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.Iterator; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/shard/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/shard/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/shard/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -32,10 +32,10 @@ import com.bigdata.btree.ResultSet; import com.bigdata.btree.filter.IFilterConstructor; import com.bigdata.btree.proc.IIndexProcedure; +import com.bigdata.jini.Util; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.IBlock; import com.bigdata.util.BootStateUtil; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -43,26 +43,20 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import com.sun.jini.config.Config; import com.sun.jini.start.LifeCycle; import com.sun.jini.thread.ReadyState; import net.jini.config.Configuration; import net.jini.config.ConfigurationProvider; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.jeri.BasicILFactory; @@ -71,7 +65,6 @@ import net.jini.jeri.ServerEndpoint; import net.jini.jeri.tcp.TcpServerEndpoint; import net.jini.lookup.JoinManager; -import net.jini.lookup.ServiceDiscoveryManager; import java.io.IOException; import java.rmi.RemoteException; @@ -426,7 +419,7 @@ String[] groups = ((DiscoveryGroupManagement)ldm).getGroups(); LookupLocator[] locs = ((DiscoveryLocatorManagement)ldm).getLocators(); logger.log(Level.INFO, killStr+" [groups=" - +Util.writeGroupArrayToString(groupsToJoin) + + Util.writeGroupArrayToString(groupsToJoin) +", locators=" +Util.writeArrayElementsToString(locatorsToJoin)+"]"); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceImpl.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceImpl.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/transaction/ServiceImpl.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -28,12 +28,11 @@ import static com.bigdata.transaction.Constants.*; import com.bigdata.attr.ServiceInfo; +import com.bigdata.jini.Util; import com.bigdata.jini.start.BigdataZooDefs; import com.bigdata.journal.ValidationError; -import com.bigdata.service.Event; import com.bigdata.service.IServiceShutdown.ShutdownType; import com.bigdata.util.BootStateUtil; -import com.bigdata.util.Util; import com.bigdata.util.config.ConfigDeployUtil; import com.bigdata.util.config.LogUtil; import com.bigdata.util.config.NicUtil; @@ -41,7 +40,6 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; -import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; import com.sun.jini.config.Config; @@ -51,19 +49,14 @@ import net.jini.config.Configuration; import net.jini.config.ConfigurationProvider; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.entry.Entry; import net.jini.core.discovery.LookupLocator; -import net.jini.core.lease.Lease; import net.jini.core.lookup.ServiceID; -import net.jini.core.lookup.ServiceItem; -import net.jini.core.lookup.ServiceTemplate; import net.jini.discovery.DiscoveryManagement; import net.jini.discovery.DiscoveryGroupManagement; import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; import net.jini.export.Exporter; import net.jini.jeri.BasicILFactory; Deleted: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java 2010-09-15 10:22:34 UTC (rev 3555) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/util/Util.java 2010-09-15 14:30:14 UTC (rev 3556) @@ -1,524 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ - -package com.bigdata.util; - -import com.bigdata.util.config.ConfigDeployUtil; -import com.bigdata.util.config.LogUtil; -import com.bigdata.service.proxy.ClientFuture; -import com.bigdata.service.proxy.RemoteFuture; -import com.bigdata.service.proxy.RemoteFutureImpl; - -import org.apache.log4j.Level; -import org.apache.log4j.Logger; - -import com.sun.jini.config.Config; -import com.sun.jini.thread.InterruptedStatusThread; -import net.jini.config.Configuration; -import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; -import net.jini.core.lookup.ServiceID; -import net.jini.discovery.DiscoveryManagement; -import net.jini.discovery.DiscoveryGroupManagement; -import net.jini.discovery.DiscoveryLocatorManagement; -import net.jini.discovery.LookupDiscoveryManager; -import net.jini.export.Exporter; -import net.jini.jeri.BasicILFactory; -import net.jini.jeri.BasicJeriExporter; -import net.jini.jeri.InvocationLayerFactory; -import net.jini.jeri.ServerEndpoint; -import net.jini.jeri.tcp.TcpServerEndpoint; - -import net.jini.lookup.JoinManager; -import net.jini.lookup.ServiceDiscoveryEvent; -import net.jini.lookup.ServiceDiscoveryManager; - -import java.io.IOException; -import java.rmi.server.ExportException; -import java.util.Collection; -import java.util.Iterator; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Future; - -/** - * Miscellaneous, convenient utility methods. - */ -public class Util { - - public static <T extends Comparable<T>> T max(final T... elements){ - T max = elements[0]; - for (final T element : elements) { - if(0 < element.compareTo(max)) { - max = element; - } - } - return max; - } - - public static <T extends Comparable<T>> T min(final T... elements){ - T min = elements[0]; - for (final T element : elements) { - if(0 > element.compareTo(min)) { - min = element; - } - } - return min; - } - - /* Convenience method that can be called when a service exits, or - * when failure occurs during the service's initialization process. - * This method un-does any work that may have already been completed; - * for example, un-exports the service if it has already been - * exported, closes any open sockets or file descriptors, terminates - * threads that may have been started, etc. - * <p> - * Note that multiple versions of this method are provided. One version - * is intended to be used by entities that act only as a service (that - * is, entities that export a proxy and use a <code>JoinManager</code>). - * One version is intended to be used by entities that act only as a client - * (that is, entites that use a <code>ServiceDiscoveryManager</code>). - * And the final version can be used by entities that act as both a - * service and as a client. - */ - public static void cleanupOnExit - (Object innerProxy, - Exporter serverExporter, - JoinManager joinManager, - DiscoveryManagement discoveryManager) - { - cleanupOnExit(innerProxy, serverExporter, null, joinManager, - null, discoveryManager); - - } - - public static void cleanupOnExit - (ServiceDiscoveryManager serviceDiscoveryManager, - DiscoveryManagement discoveryManager) - { - cleanupOnExit(null, null, null, null, - serviceDiscoveryManager, discoveryManager); - } - - - public static void cleanupOnExit - (Object innerProxy, - Exporter serverExporter, - JoinManager joinManager, - ServiceDiscoveryManager serviceDiscoveryManager, - DiscoveryManagement discoveryManager) - { - cleanupOnExit(innerProxy, serverExporter, null, joinManager, - serviceDiscoveryManager, discoveryManager); - } - - public static void cleanupOnExit - (Object innerProxy, - Exporter serverExporter, - Set<Exporter> futureExporters, - JoinManager joinManager, - ServiceDiscoveryManager serviceDiscoveryManager, - DiscoveryManagement discoveryManager) - { - if(innerProxy != null) { - try { - if(serverExporter != null) serverExporter.unexport(true); - } catch(Throwable t) { } - } - - if(futureExporters != null) { - for(Exporter exporter : futureExporters) { - if(exporter != null) { - try { - exporter.unexport(true); - exporter = null; - } catch(Throwable t) { } - } - } - synchronized(futureExporters) { - for(Iterator<Exporter> itr = futureExporters.iterator(); - itr.hasNext(); ) - { - itr.next(); - itr.remove(); - } - } - } - - if(joinManager != null) { - try { - joinManager.terminate(); - } catch(Throwable t) { } - } - - if(serviceDiscoveryManager != null) { - try { - serviceDiscoveryManager.terminate(); - } catch(Throwable t) { } - } - - if(discoveryManager != null) { - try { - discoveryManager.terminate(); - } catch(Throwable t) { } - } - } - - - /** - * Unexports the remote object that was exported by the given - * <code>Exporter</code> parameter; which removes the object - * from the RMI runtime so that the object can no longer accept - * incoming remote calls.er accept incoming RMI calls. - * <P> - * This method first makes an attempt to unexport the object - * 'gracefully'. That is, for a finite period of time, an attempt - * is made to allow all calls to the object that are in progress, - * or pending, to complete before the object is actually unexported. - * If, after that finite period of time, the object has not been - * successfully unexported, the object is then 'forcibly' unexported; - * that is, the object is unexported even if there are calls to - * the object that are in progress or still pending. - * <P> - * Upon successfully unexporting the given <code>Exporter</code>, - * <code>true</code> is returned. If the given <code>Exporter</code> - * cannot be unexported, or if the value input for that parameter - * is <code>null</code> or has not exported any interfaces, then - * <code>false</code> is returned. - */ - public static boolean unexportRemoteObject(Exporter exporter) { - if (exporter == null) return false; - - // delay no more than 1 minute - final long endTime = System.currentTimeMillis() + (1L*60L*1000L); - boolean unexported = false; - try { - // Unexport only if there are no pending or in-progress calls - while (!unexported && System.currentTimeMillis() < endTime) { - unexported = exporter.unexport(false);//do not force - if (!unexported) Thread.yield(); - }//end loop - if (!unexported) unexported = exporter.unexport(true);//force - } catch ( IllegalStateException e ) { - // Thrown if no object has been exported with the - // Exporter instance - return false; - } - return unexported; - } - - - /** - * Convenience method that can be called in an entity's constructor - * when failure occurs during the initialization process. This - * method simply rethrows the given <code>Throwable</code> so the - * constructor doesn't have to. - */ - public static void handleInitThrowable(Throwable t, Logger logger) - throws IOException, - ConfigurationException - { - if( logger != null ) { - logger.log(Level.FATAL, "initialization failure ... ", t); - } else { - System.err.println("FATAL: initialization failure ... "+t); - }//endif - if (t instanceof IOException) { - throw (IOException)t; - } else if (t instanceof ConfigurationException) { - throw (ConfigurationException)t; - } else if (t instanceof RuntimeException) { - throw (RuntimeException)t; - } else if (t instanceof Error) { - throw (Error)t; - }//endif - } - - /** - * Convenience method that returns a <code>String</code> containing - * a common-separated list the elements (group names) of the given - * array. - */ - public static String writeGroupArrayToString(String[] groups) { - if(groups == null) { - return new String("[ALL_GROUPS]"); - }//endif - if(groups.length <= 0) { - return new String("[]"); - }//endif - StringBuffer strBuf = null; - if(groups[0].compareTo("") == 0) { - strBuf = new StringBuffer("[The PUBLIC Group"); - } else { - strBuf = new StringBuffer("["+groups[0]); - }//endif - for(int i=1;i<groups.length;i++) { - if(groups[i].compareTo("") == 0) { - strBuf.append(", The PUBLIC Group"); - } else { - strBuf.append(", ").append(groups[i]); - }//endif - }//end loop - strBuf.append("]"); - return strBuf.toString(); - } - - /** - * Convenience method that returns a <code>String</code> containing - * a common-separated list the elements (locators) of the given - * array. - */ - public static String writeArrayElementsToString(Object[] arr) { - if(arr == null) return new String("[]"); - if(arr.length <= 0) { - return new String("[]"); - }//endif - StringBuffer strBuf = new StringBuffer("["+arr[0]); - for(int i=1;i<arr.length;i++){ - strBuf.append(", ").append(arr[i]); - }//end loop - strBuf.append("]"); - return strBuf.toString(); - } - - /** - * Convenience method to simplify the throwing of exceptions with embedded - * causes (avoids having to cast the return value of Throwable.initCause - * back to the exception's type). Use as follows: - * <pre> - * throw Util.initCause(new SomeException("foo"), cause); - * </pre> - */ - public static <T extends Throwable> T initCause(T t, Throwable cause) { - t.initCause(cause); - return t; - } - - /** - * Verifies that all non-<code>null</code> elements of the given - * <code>Collection</code> are assignable to the specified type, - * throwing a <code>ClassCastException</code> if any are not. - */ - public static void checkElementTypes(Collection<?> c, Class<?> type) { - for (Object elt : c) { - if (!type.isInstance(elt)) { - throw new ClassCastException( - elt + " not assignable to " + type); - } - } - } - - /** - * Returns a UUID with the same bit value as the given - * <code>ServiceID</code>. - */ - public static UUID toUUID(ServiceID serviceId) { - return new UUID( serviceId.getMostSignificantBits(), - serviceId.getLeastSignificantBits() ); - } - - /** - * Returns a string representation of the given - * <code>ServiceDiscoveryEvent</code> (since - * <code>ServiceDiscoveryEvent</code> doesn't define - * its own <code>toString</code> method). - */ - public static String eventToString(ServiceDiscoveryEvent event) { - return "ServiceDiscoveryEvent[source=" + event.getSource() + - ",preEventItem=" + event.getPreEventServiceItem() + - ",postEventItem=" + event.getPostEventServiceItem() + "]"; - } - - /** - * Convenience method that encapsulates common functions that services - * or clients may wish to perform to be able to discover lookup services - * in the system. - * <p> - * This method retrieves and returns a lookup discovery manager from - * the given <code>Configuration</code>. If no lookup discovery manager - * has been configured, this method will return an instance of the - * <code>LookupDiscoveryManager</code> helper utility class, - * initialized to discover NO_GROUPS and no locators. When such a - * discovery manager is returned, the calling entity can call the - * <code>setGroups</code> and/or </code>setLocators</code> method - * to initiate the lookup discovery process. - * <p> - * Note that this method expects that the discovery manager - * that has been configured is an instance of both - * <code>DiscoveryGroupManagement</code> and - * <code>DiscoveryLocatorManagement</code>. - * - * @param config The calling service's <code>Configuration</code> - * from which this method will retrieve the items - * needed to perform the desired initialization. - * - * @param componentName <code>String</code> whose value is the name of - * the <i>component</i> used to index the calling - * service's configuration <i>entries</i>. - * - * @param entryName <code>String</code> whose value is the name of - * the configuration entry that references the - * the desired lookup discovery manager instance - * specified in the configuration. - * - * @return An instance of <code>DiscoveryManagement</code> that supports - * both group and locator discovery; where the instance returned - * is either retrieved from the given <code>Configuration</code>, - * or is a default instance of <code>LookupDiscoveryManager</code>. - * - * @throws <code>ConfigurationException</code> when there is a problem - * retrieving the desired entry from the configuration. - * - * @throws IOException when there is a problem with multicast discovery. - */ - public static DiscoveryManagement getDiscoveryManager - (Configuration config, - String componentName, - String entryName ) - throws ConfigurationException, - IOException - { - // The discovery manager must be an instance of both - // DiscoveryGroupManagement and DiscoveryLocatorManagement, so that - // the groupsToJoin and locatorsToJoin can both be retrieved from - //the discovery manager and displayed. - DiscoveryManagement dMgr; - try { - dMgr = (DiscoveryManagement)Config.getNonNullEntry - (config, - componentName, - entryName, - DiscoveryManagement.class); - if( !(dMgr instanceof DiscoveryGroupManagement) ) { - throw new ConfigurationException - (entryName + " entry must " - +"implement DiscoveryGroupManagment"); - } - if( !(dMgr instanceof DiscoveryLocatorManagement) ) { - throw new ConfigurationException - (entryName + " entry must " - +"implement DiscoveryLocatorManagement"); - } - } catch (NoSuchEntryException e) { - return ( new LookupDiscoveryManager - (ConfigDeployUtil.getGroupsToDiscover(), - ConfigDeployUtil.getLocatorsToDiscover(), - null, config) ); - } - return dMgr; - } - - /** - * Retrieves and returns a lookup discovery manager from the given - * <code>Configuration</code>, using a default entry name of - * <i>discoveryManager</i>. - */ - public static DiscoveryManagement getDiscoveryManager - (Configuration config, - String componentName) - throws ConfigurationException, - IOException - { - return getDiscoveryManager(config, componentName, "discoveryManager"); - } - - public static Exporter getExporter(Configuration config, - String componentName, - String entryName, - boolean defaultEnableDgc, - boolean defaultKeepAlive) - throws ConfigurationException - { - if(config == null) { - throw new NullPointerException("null config"); - } - if(componentName == null) { - throw new NullPointerException("null componentName"); - } - if(entryName == null) { - throw new NullPointerException("null entryName"); - } - Exporter exporter = null; - ServerEndpoint endpoint = TcpServerEndpoint.getInstance(0); - InvocationLayerFactory ilFactory = new BasicILFactory(); - Exporter defaultExporter = - new BasicJeriExporter - (endpoint, ilFactory, defaultEnableDgc, defaultKeepAlive); - exporter = - (Exporter)config.getEntry - (componentName, entryName, Exporter.class, defaultExporter); - if(exporter == null) { - throw new ConfigurationException("null exporter"); - } - return exporter; - } - - public static <E> Future<E> wrapFuture(Exporter exporter, - Future<E> future) - throws ExportException - { - if(exporter == null) { - throw new NullPointerException("null exporter"); - } - if(future == null) { - throw new NullPointerException("null future"); - } - - // 1. Wrap the given future in a remote (proxyable) object - // 2. Export the remote object to produce a dynamic proxy (stub) - // 3. Return the proxied future (the stub) wrapped in a Serializable - // wrapper class implementing the Future interface. - - final RemoteFuture<E> impl = new RemoteFutureImpl<E>(future); - - final RemoteFuture<E> stub = (RemoteFuture<E>)exporter.export(impl); - - return new ClientFuture<E>(stub); - } - - public static class WaitOnInterruptThread extends InterruptedStatusThread { - private Logger logger; - public WaitOnInterruptThread(final Logger logger) { - super("WaitOnInterruptThread"); - setDaemon(true); - this.logger = (logger == null ? - LogUtil.getLog4jLogger((this.getClass()).getName()) : - logger); - } - public void run() { - while (!hasBeenInterrupted()) { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException e) { - if( logger.isDebugEnabled() ) { - logger.log(Level.DEBUG, - "Util.WaitOnInterruptThread: " - +"interrupt received"); - } - } - } - } - } -} Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/... [truncated message content] |
From: <tho...@us...> - 2010-09-15 10:22:41
|
Revision: 3555 http://bigdata.svn.sourceforge.net/bigdata/?rev=3555&view=rev Author: thompsonbry Date: 2010-09-15 10:22:34 +0000 (Wed, 15 Sep 2010) Log Message: ----------- Modified QueryEngine#eval() to accept the initial binding set chunk. Modified TestFederatedQueryEngine to create a local query controller to which the queries are submitted. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java Removed Paths: ------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java Deleted: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -1,108 +0,0 @@ -package com.bigdata.bop.engine; - -import java.io.Serializable; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.fed.FederatedRunningQuery; -import com.bigdata.relation.accesspath.IAsynchronousIterator; - -/** - * An non-{@link Serializable} chunk of intermediate results which are ready to - * be consumed by some {@link BOp} in a specific query (this is only used in - * query evaluation for the standalone database). - */ -public class BindingSetChunk<E> implements IChunkMessage<E> { - - /** The query controller. */ - private final IQueryClient queryController; - - /** - * The query identifier. - */ - private final long queryId; - - /** - * The target {@link BOp}. - */ - private final int bopId; - - /** - * The index partition which is being targeted for that {@link BOp}. - */ - private final int partitionId; - - /** - * The binding sets to be consumed by that {@link BOp}. - */ - private IAsynchronousIterator<E[]> source; - - public IQueryClient getQueryController() { - return queryController; - } - - public long getQueryId() { - return queryId; - } - - public int getBOpId() { - return bopId; - } - - public int getPartitionId() { - return partitionId; - } - - public boolean isMaterialized() { - return true; - } - - public BindingSetChunk(final IQueryClient queryController, - final long queryId, final int bopId, final int partitionId, - final IAsynchronousIterator<E[]> source) { - - if (queryController == null) - throw new IllegalArgumentException(); - - if (source == null) - throw new IllegalArgumentException(); - - this.queryController = queryController; - - this.queryId = queryId; - - this.bopId = bopId; - - this.partitionId = partitionId; - - this.source = source; - - } - - public String toString() { - - return getClass().getName() + "{queryId=" + queryId + ",bopId=" + bopId - + ",partitionId=" + partitionId + "}"; - - } - - public void materialize(FederatedRunningQuery runningQuery) { - // NOP - } - - public void release() { - // NOP - } - - public IChunkAccessor<E> getChunkAccessor() { - return new ChunkAccessor(); - } - - private class ChunkAccessor implements IChunkAccessor<E> { - - public IAsynchronousIterator<E[]> iterator() { - return source; - } - - } - -} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -3,7 +3,6 @@ import java.rmi.RemoteException; import com.bigdata.bop.BindingSetPipelineOp; -import com.bigdata.bop.IBindingSet; /** * Interface for a client executing queries (the query controller). @@ -11,25 +10,6 @@ public interface IQueryClient extends IQueryPeer { /** - * Evaluate a query which visits {@link IBindingSet}s, such as a join. This - * node will serve as the controller for the query. - * - * @param queryId - * The unique identifier for the query. - * @param query - * The query to evaluate. - * - * @return An iterator visiting {@link IBindingSet}s which result from - * evaluating the query. - * - * @throws IllegalStateException - * if the {@link QueryEngine} has been {@link #shutdown()}. - * @throws Exception - * @throws RemoteException - */ - RunningQuery eval(long queryId, BindingSetPipelineOp query) throws Exception, RemoteException; - - /** * Return the query. * * @param queryId Copied: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java (from rev 3554, branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BindingSetChunk.java) =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -0,0 +1,110 @@ +package com.bigdata.bop.engine; + +import java.io.Serializable; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.fed.FederatedRunningQuery; +import com.bigdata.relation.accesspath.IAsynchronousIterator; + +/** + * An non-{@link Serializable} chunk of intermediate results which are ready to + * be consumed by some {@link BOp} in a specific query (this is only used in + * query evaluation for the standalone database). + * + * @todo test suite + */ +public class LocalChunkMessage<E> implements IChunkMessage<E> { + + /** The query controller. */ + private final IQueryClient queryController; + + /** + * The query identifier. + */ + private final long queryId; + + /** + * The target {@link BOp}. + */ + private final int bopId; + + /** + * The index partition which is being targeted for that {@link BOp}. + */ + private final int partitionId; + + /** + * The binding sets to be consumed by that {@link BOp}. + */ + private IAsynchronousIterator<E[]> source; + + public IQueryClient getQueryController() { + return queryController; + } + + public long getQueryId() { + return queryId; + } + + public int getBOpId() { + return bopId; + } + + public int getPartitionId() { + return partitionId; + } + + public boolean isMaterialized() { + return true; + } + + public LocalChunkMessage(final IQueryClient queryController, + final long queryId, final int bopId, final int partitionId, + final IAsynchronousIterator<E[]> source) { + + if (queryController == null) + throw new IllegalArgumentException(); + + if (source == null) + throw new IllegalArgumentException(); + + this.queryController = queryController; + + this.queryId = queryId; + + this.bopId = bopId; + + this.partitionId = partitionId; + + this.source = source; + + } + + public String toString() { + + return getClass().getName() + "{queryId=" + queryId + ",bopId=" + bopId + + ",partitionId=" + partitionId + "}"; + + } + + public void materialize(FederatedRunningQuery runningQuery) { + // NOP + } + + public void release() { + // NOP + } + + public IChunkAccessor<E> getChunkAccessor() { + return new ChunkAccessor(); + } + + private class ChunkAccessor implements IChunkAccessor<E> { + + public IAsynchronousIterator<E[]> iterator() { + return source; + } + + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -45,6 +45,7 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; import com.bigdata.bop.bset.Union; +import com.bigdata.bop.fed.FederatedQueryEngine; import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; @@ -500,7 +501,7 @@ * evaluation. * <p> * Chunk concatenation could be performed here if we (a) mark the - * {@link BindingSetChunk} with a flag to indicate when it has been + * {@link LocalChunkMessage} with a flag to indicate when it has been * accepted; and (b) rip through the incoming chunks for the query for * the target bop and combine them to feed the task. Chunks which have * already been assigned would be dropped when take() discovers them. @@ -734,18 +735,57 @@ } + /** + * Evaluate a query which visits {@link IBindingSet}s, such as a join. This + * node will serve as the controller for the query. + * + * @param queryId + * The unique identifier for the query. + * @param query + * The query to evaluate. + * + * @return An iterator visiting {@link IBindingSet}s which result from + * evaluating the query. + * @param msg + * A message providing access to the initial {@link IBindingSet + * binding set(s)} used to begin query evaluation. + * + * @throws IllegalStateException + * if the {@link QueryEngine} has been {@link #shutdown()}. + * @throws Exception + * @throws RemoteException + * + * FIXME The test suites need to be modified to create a local + * {@link FederatedQueryEngine} object which fronts for an + * {@link IIndexManager} which is local to the client - not on a + * data service at all. This is necessary in order for the unit + * test (or application code) to directly access the + * RunningQuery reference, which is needed to use get() (to wait + * for the query), iterator() (to drain the query), etc. + * <p> + * This will also give us a place to hang query-local resources + * on the client. + * <p> + * This has to be a {@link FederatedQueryEngine} because it + * needs to talk to a federation. There should be nothing DS + * specific about the {@link FederatedQueryEngine}. + */ public RunningQuery eval(final long queryId, - final BindingSetPipelineOp query) throws Exception { + final BindingSetPipelineOp query, + final IChunkMessage<IBindingSet> msg) throws Exception { if (query == null) throw new IllegalArgumentException(); + if (msg == null) + throw new IllegalArgumentException(); + + if (queryId != msg.getQueryId()) // @todo use equals() to compare UUIDs. + throw new IllegalArgumentException(); + final RunningQuery runningQuery = newRunningQuery(this, queryId, -// System.currentTimeMillis()/* begin */, true/* controller */, this/* clientProxy */, query); - assertRunning(); - final long timeout = query.getProperty(BOp.Annotations.TIMEOUT, BOp.Annotations.DEFAULT_TIMEOUT); @@ -767,8 +807,12 @@ } + assertRunning(); + putRunningQuery(queryId, runningQuery); + runningQuery.startQuery(msg); + return runningQuery; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -366,7 +366,7 @@ /* * Note: The partitionId will always be -1 in scale-up. */ - final BindingSetChunk<IBindingSet> chunk = new BindingSetChunk<IBindingSet>( + final LocalChunkMessage<IBindingSet> chunk = new LocalChunkMessage<IBindingSet>( clientProxy, queryId, sinkId, -1/* partitionId */, sink .iterator()); @@ -646,11 +646,9 @@ /** * Invoked once by the query controller with the initial - * {@link BindingSetChunk} which gets the query moving. - * - * @todo this should reject multiple invocations for a given query instance. + * {@link IChunkMessage} which gets the query moving. */ - public void startQuery(final IChunkMessage<IBindingSet> msg) { + void startQuery(final IChunkMessage<IBindingSet> msg) { if (!controller) throw new UnsupportedOperationException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -122,20 +122,6 @@ } /** - * Constructor used on a {@link DataService} (a query engine peer). - * - * @param dataService - * The data service. - */ - public FederatedQueryEngine(final DataService dataService) { - - this(dataService.getFederation(), - new DelegateIndexManager(dataService), dataService - .getResourceManager().getResourceService()); - - } - - /** * Overridden to strengthen the return type. * <p> * {@inheritDoc} @@ -152,6 +138,20 @@ return getClass().getName() + "{serviceUUID=" + getServiceUUID() + "}"; } + + /** + * Constructor used on a {@link DataService} (a query engine peer). + * + * @param dataService + * The data service. + */ + public FederatedQueryEngine(final DataService dataService) { + + this(dataService.getFederation(), + new DelegateIndexManager(dataService), dataService + .getResourceManager().getResourceService()); + + } /** * Constructor used on a non-{@link DataService} node to expose a query Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -42,7 +42,7 @@ import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IPredicate; import com.bigdata.bop.IShardwisePipelineOp; -import com.bigdata.bop.engine.BindingSetChunk; +import com.bigdata.bop.engine.LocalChunkMessage; import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryPeer; @@ -94,7 +94,7 @@ * A map associating resources with running queries. When a query halts, the * resources listed in its resource map are released. Resources can include * {@link ByteBuffer}s backing either incoming or outgoing - * {@link BindingSetChunk}s, temporary files associated with the query, hash + * {@link LocalChunkMessage}s, temporary files associated with the query, hash * tables, etc. * * @todo This map will eventually need to be moved into {@link RunningQuery} @@ -604,7 +604,7 @@ * query engine. */ - final IChunkMessage<IBindingSet> msg = new BindingSetChunk<IBindingSet>( + final IChunkMessage<IBindingSet> msg = new LocalChunkMessage<IBindingSet>( getQueryController(), getQueryId(), sinkId, partitionId, source.iterator()); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -198,16 +198,11 @@ })); final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId,query); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1/* partitionId */, + newBindingSetIterator(new HashBindingSet()))); - runningQuery.startQuery(new BindingSetChunk( - queryEngine, - queryId, - startId,// - -1, //partitionId - new ThickAsynchronousIterator<IBindingSet[]>( - new IBindingSet[][] { new IBindingSet[] { new HashBindingSet()} }))); - // Wait until the query is done. final Map<Integer, BOpStats> statsMap = runningQuery.get(); { @@ -290,13 +285,11 @@ ) }; final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId, query); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1 /* partitionId */, + newBindingSetIterator(new HashBindingSet()))); - runningQuery.startQuery(new BindingSetChunk(queryEngine, queryId, - startId,// - -1, // partitionId - newBindingSetIterator(new HashBindingSet()))); - // verify solutions. assertSameSolutions(expected, runningQuery.iterator()); @@ -478,21 +471,22 @@ final BindingSetPipelineOp query = join2Op; - final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId, query); - // start the query. + final long queryId = 1L; + final IChunkMessage<IBindingSet> initialChunkMessage; { - + final IBindingSet initialBindings = new HashBindingSet(); - + initialBindings.set(Var.var("x"), new Constant<String>("Mary")); - runningQuery.startQuery(new BindingSetChunk<IBindingSet>( - queryEngine, queryId, startId,// + initialChunkMessage = new LocalChunkMessage<IBindingSet>(queryEngine, + queryId, startId,// -1, // partitionId - newBindingSetIterator(initialBindings))); + newBindingSetIterator(initialBindings)); } + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + initialChunkMessage); // verify solutions. { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -27,10 +27,15 @@ package com.bigdata.bop.fed; +import java.io.File; import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; import java.util.Map; import java.util.Properties; import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import junit.framework.TestCase2; @@ -51,9 +56,8 @@ import com.bigdata.bop.ap.R; import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.engine.BOpStats; -import com.bigdata.bop.engine.BindingSetChunk; -import com.bigdata.bop.engine.IQueryClient; -import com.bigdata.bop.engine.IQueryPeer; +import com.bigdata.bop.engine.IChunkMessage; +import com.bigdata.bop.engine.LocalChunkMessage; import com.bigdata.bop.engine.PipelineDelayOp; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.RunningQuery; @@ -62,17 +66,21 @@ import com.bigdata.bop.solutions.SliceOp; import com.bigdata.bop.solutions.SortOp; import com.bigdata.btree.keys.KeyBuilder; -import com.bigdata.io.SerializerUtil; +import com.bigdata.journal.BufferMode; import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; import com.bigdata.service.EmbeddedFederation; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; +import com.bigdata.service.ManagedResourceService; +import com.bigdata.service.ResourceService; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniFederation; import com.bigdata.striterator.ChunkedArrayIterator; import com.bigdata.striterator.Dechunkerator; +import com.bigdata.util.config.NicUtil; import com.ibm.icu.impl.ByteBuffer; /** @@ -133,71 +141,130 @@ // The separator key between the index partitions. private byte[] separatorKey; - private IQueryClient queryEngine; - private JiniClient<?> client; - private IDataService dataService0; - private IDataService dataService1; + /** The local persistence store for the {@link #queryEngine}. */ + private Journal queryEngineStore; + + /** The local {@link ResourceService} for the {@link #queryEngine}. */ + private ManagedResourceService queryEngineResourceService; + + /** The query controller. */ + private FederatedQueryEngine queryEngine; + + private IDataService dataService0; + private IDataService dataService1; + protected void setUp() throws Exception { - client = new JiniClient(new String[]{"/nas/bigdata/bigdata-0.83.2/dist/bigdata/var/config/jini/bigdataStandalone.config"}); - - final IBigdataFederation<?> fed = client.connect(); - - final int maxCount = 2; - UUID[] dataServices = null; - while((dataServices = fed.getDataServiceUUIDs(maxCount)).length < maxCount) { - System.err.println("Waiting for "+maxCount+" data services. There are "+dataServices.length+" discovered."); - Thread.sleep(250/*ms*/); - } - - super.setUp(); + /* + * FIXME This is hardcoded to a specific location in the file system. + * + * Also, the dependency on JiniClient means that we must move this test + * class into the bigdata-jini package. + */ + client = new JiniClient( + new String[] { "/nas/bigdata/bigdata-0.83.2/dist/bigdata/var/config/jini/bigdataStandalone.config" }); - dataService0 = fed.getDataService(dataServices[0]); - dataService1 = fed.getDataService(dataServices[1]); + final IBigdataFederation<?> fed = client.connect(); + + // create index manager for the query controller. { + final Properties p = new Properties(); + p.setProperty(Journal.Options.BUFFER_MODE, BufferMode.Transient + .toString()); + queryEngineStore = new Journal(p); + } + + // create resource service for the query controller. + { + queryEngineResourceService = new ManagedResourceService( + new InetSocketAddress(InetAddress + .getByName(NicUtil.getIpAddress("default.nic", + "default", true/* loopbackOk */)), 0/* port */ + ), 0/* requestServicePoolSize */) { - // @todo need to wait for the dataService to be running. -// assertTrue(((DataService) dataServer.getProxy()) -// .getResourceManager().awaitRunning()); - - // resolve the query engine on one of the data services. - while ((queryEngine = (IQueryClient) dataService0.getQueryEngine()) == null) { - - if (log.isInfoEnabled()) - log.info("Waiting for query engine on dataService0"); - - Thread.sleep(250); - - } - - System.err.println("controller: " + queryEngine); - + @Override + protected File getResource(UUID uuid) throws Exception { + // Will not serve up files. + return null; + } + }; } + + // create the query controller. + queryEngine = new FederatedQueryEngine(fed, queryEngineStore, + queryEngineResourceService); + + /* + * Discover the data services. We need their UUIDs in order to create + * the test relation split across an index partition located on each of + * the two data services. + */ + final int maxCount = 2; + UUID[] dataServices = null; + final long begin = System.currentTimeMillis(); + long elapsed = 0L; + while ((dataServices = fed.getDataServiceUUIDs(maxCount)).length < maxCount + && ((elapsed = System.currentTimeMillis() - begin) < TimeUnit.SECONDS + .toMillis(60))) { + System.err.println("Waiting for " + maxCount + + " data services. There are " + dataServices.length + + " discovered : elapsed=" + elapsed + "ms"); + Thread.sleep(250/* ms */); + } - // resolve the query engine on the other data services. - { + if (dataServices.length < maxCount) + throw new TimeoutException("Discovered " + dataServices.length + + " data services in " + elapsed + "ms but require " + + maxCount); + + super.setUp(); - IQueryPeer other = null; - -// assertTrue(((DataService) dataServer.getProxy()) -// .getResourceManager().awaitRunning()); - - while ((other = dataService1.getQueryEngine()) == null) { - - if (log.isInfoEnabled()) - log.info("Waiting for query engine on dataService1"); - - Thread.sleep(250); - - } +// dataService0 = fed.getDataService(dataServices[0]); +// dataService1 = fed.getDataService(dataServices[1]); +// { +// +// // @todo need to wait for the dataService to be running. +//// assertTrue(((DataService) dataServer.getProxy()) +//// .getResourceManager().awaitRunning()); +// +// // resolve the query engine on one of the data services. +// while ((queryEngine = (IQueryClient) dataService0.getQueryEngine()) == null) { +// +// if (log.isInfoEnabled()) +// log.info("Waiting for query engine on dataService0"); +// +// Thread.sleep(250); +// +// } +// +// System.err.println("controller: " + queryEngine); +// +// } +// +// // resolve the query engine on the other data services. +// { +// +// IQueryPeer other = null; +// +//// assertTrue(((DataService) dataServer.getProxy()) +//// .getResourceManager().awaitRunning()); +// +// while ((other = dataService1.getQueryEngine()) == null) { +// +// if (log.isInfoEnabled()) +// log.info("Waiting for query engine on dataService1"); +// +// Thread.sleep(250); +// +// } +// +// System.err.println("other : " + other); +// +// } - System.err.println("other : " + other); - - } - loadData(); } @@ -213,7 +280,18 @@ dataService0 = null; dataService1 = null; - queryEngine = null; + if (queryEngineResourceService != null) { + queryEngineResourceService.shutdownNow(); + queryEngineResourceService = null; + } + if (queryEngineStore != null) { + queryEngineStore.destroy(); + queryEngineStore = null; + } + if (queryEngine != null) { + queryEngine.shutdownNow(); + queryEngine = null; + } super.tearDown(); @@ -256,19 +334,21 @@ * Create the relation with the primary index key-range partitioned * using the given separator keys and data services. */ - - final R rel = new R(client.getFederation(), namespace, ITx.UNISOLATED, new Properties()); - if(client.getFederation() - .getResourceLocator().locate(namespace, ITx.UNISOLATED)==null) { - - rel.create(separatorKeys, dataServices); + final R rel = new R(client.getFederation(), namespace, ITx.UNISOLATED, + new Properties()); - /* - * Insert data into the appropriate index partitions. - */ - rel.insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */)); - + if (client.getFederation().getResourceLocator().locate(namespace, + ITx.UNISOLATED) == null) { + + rel.create(separatorKeys, dataServices); + + /* + * Insert data into the appropriate index partitions. + */ + rel + .insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */)); + } } @@ -314,20 +394,14 @@ final BindingSetPipelineOp query = new StartOp(new BOp[] {}, NV .asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, startId),// -// new NV(Predicate.Annotations.READ_TIMESTAMP, ITx.READ_COMMITTED),// })); final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId, query); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1 /* partitionId */, + newBindingSetIterator(new HashBindingSet()))); - runningQuery.startQuery(new BindingSetChunk( - queryEngine, - queryId, - startId,// - -1, //partitionId - new ThickAsynchronousIterator<IBindingSet[]>( - new IBindingSet[][] { new IBindingSet[] { new HashBindingSet()} }))); - // Wait until the query is done. final Map<Integer, BOpStats> statsMap = runningQuery.get(); { @@ -425,13 +499,12 @@ ) }; final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId, query); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId,// + -1, /* partitionId */ + newBindingSetIterator(new HashBindingSet()))); - runningQuery.startQuery(new BindingSetChunk(queryEngine, queryId, - startId,// - -1, // partitionId - newBindingSetIterator(new HashBindingSet()))); - // verify solutions. TestQueryEngine.assertSameSolutionsAnyOrder(expected, new Dechunkerator<IBindingSet>(runningQuery.iterator())); @@ -618,22 +691,23 @@ new NV(Predicate.Annotations.BOP_ID, sliceId),// })); + // start the query. final long queryId = 1L; - final RunningQuery runningQuery = queryEngine.eval(queryId, query); + final IChunkMessage<IBindingSet> initialChunkMessage; + { - // start the query. - { - final IBindingSet initialBindings = new HashBindingSet(); - + initialBindings.set(Var.var("x"), new Constant<String>("Mary")); - runningQuery.startQuery(new BindingSetChunk(queryEngine, queryId, - startId,// + initialChunkMessage = new LocalChunkMessage<IBindingSet>( + queryEngine, queryId, startId,// -1, // partitionId - newBindingSetIterator(initialBindings))); + newBindingSetIterator(initialBindings)); } + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + initialChunkMessage); // verify solutions. { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -257,12 +257,6 @@ return null; } - @Override - public RunningQuery eval(long queryId, BindingSetPipelineOp query) - throws Exception, RemoteException { - return null; - } - } private static class MyNIOChunkMessage<E> extends NIOChunkMessage<E> { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-14 20:48:21 UTC (rev 3554) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-15 10:22:34 UTC (rev 3555) @@ -180,12 +180,6 @@ throws RemoteException { return null; } - - @Override - public RunningQuery eval(long queryId, BindingSetPipelineOp query) - throws Exception, RemoteException { - return null; - } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 20:48:27
|
Revision: 3554 http://bigdata.svn.sourceforge.net/bigdata/?rev=3554&view=rev Author: thompsonbry Date: 2010-09-14 20:48:21 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Working through RMI invocations on 2-DS cluster. IQueryClient#eval() was added, but it needs to be modified to pass along the initial IChunkMessage which gets query evaluation started and the unit tests need to be updated to reflect that change. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/StartOp.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -0,0 +1,31 @@ +package com.bigdata.bop.bset; + +import java.util.Map; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpEvaluationContext; + +/** + * A version of {@link CopyBindingSetOp} which is always evaluated on the query + * controller. + */ +public class StartOp extends CopyBindingSetOp { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public StartOp(StartOp op) { + super(op); + } + + public StartOp(BOp[] args, Map<String, Object> annotations) { + super(args, annotations); + } + + final public BOpEvaluationContext getEvaluationContext() { + return BOpEvaluationContext.CONTROLLER; + } + +} Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryClient.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -3,6 +3,7 @@ import java.rmi.RemoteException; import com.bigdata.bop.BindingSetPipelineOp; +import com.bigdata.bop.IBindingSet; /** * Interface for a client executing queries (the query controller). @@ -10,6 +11,25 @@ public interface IQueryClient extends IQueryPeer { /** + * Evaluate a query which visits {@link IBindingSet}s, such as a join. This + * node will serve as the controller for the query. + * + * @param queryId + * The unique identifier for the query. + * @param query + * The query to evaluate. + * + * @return An iterator visiting {@link IBindingSet}s which result from + * evaluating the query. + * + * @throws IllegalStateException + * if the {@link QueryEngine} has been {@link #shutdown()}. + * @throws Exception + * @throws RemoteException + */ + RunningQuery eval(long queryId, BindingSetPipelineOp query) throws Exception, RemoteException; + + /** * Return the query. * * @param queryId @@ -19,13 +39,13 @@ * @throws IllegalArgumentException * if there is no such query. */ - public BindingSetPipelineOp getQuery(long queryId) throws RemoteException; + BindingSetPipelineOp getQuery(long queryId) throws RemoteException; /** * Notify the client that execution has started for some query, operator, * node, and index partition. */ - public void startOp(StartOpMessage msg) + void startOp(StartOpMessage msg) throws RemoteException; /** @@ -33,6 +53,6 @@ * node, shard, and source binding set chunk(s). If execution halted * abnormally, then the cause is sent as well. */ - public void haltOp(HaltOpMessage msg) throws RemoteException; + void haltOp(HaltOpMessage msg) throws RemoteException; } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -33,7 +33,7 @@ * @throws UnsupportedOperationException * unless running in scale-out. */ - void declareQuery(IQueryDecl queryDecl); + void declareQuery(IQueryDecl queryDecl) throws RemoteException; /** * Notify a service that a buffer having data for some {@link BOp} in some Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -27,7 +27,6 @@ package com.bigdata.bop.engine; -import com.bigdata.bop.BOp; import com.bigdata.btree.ILocalBTreeView; import com.bigdata.journal.IIndexManager; import com.bigdata.service.IBigdataFederation; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -734,22 +734,6 @@ } - /** - * Evaluate a query which visits {@link IBindingSet}s, such as a join. This - * node will serve as the controller for the query. - * - * @param queryId - * The unique identifier for the query. - * @param query - * The query to evaluate. - * - * @return An iterator visiting {@link IBindingSet}s which result from - * evaluating the query. - * - * @throws IllegalStateException - * if the {@link QueryEngine} has been {@link #shutdown()}. - * @throws Exception - */ public RunningQuery eval(final long queryId, final BindingSetPipelineOp query) throws Exception { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -37,7 +37,6 @@ import com.bigdata.bop.ArrayBindingSet; import com.bigdata.bop.BOp; import com.bigdata.bop.BOpContext; -import com.bigdata.bop.BOpEvaluationContext; import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.Constant; import com.bigdata.bop.HashBindingSet; @@ -50,9 +49,10 @@ import com.bigdata.bop.ap.E; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.ap.R; -import com.bigdata.bop.bset.CopyBindingSetOp; +import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.BindingSetChunk; +import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryPeer; import com.bigdata.bop.engine.PipelineDelayOp; import com.bigdata.bop.engine.QueryEngine; @@ -62,15 +62,15 @@ import com.bigdata.bop.solutions.SliceOp; import com.bigdata.bop.solutions.SortOp; import com.bigdata.btree.keys.KeyBuilder; -import com.bigdata.jini.util.JiniUtil; +import com.bigdata.io.SerializerUtil; import com.bigdata.journal.ITx; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; -import com.bigdata.service.DataService; import com.bigdata.service.EmbeddedFederation; -import com.bigdata.service.jini.DataServer; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.service.IDataService; +import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniFederation; -import com.bigdata.service.jini.util.JiniServicesHelper; import com.bigdata.striterator.ChunkedArrayIterator; import com.bigdata.striterator.Dechunkerator; import com.ibm.icu.impl.ByteBuffer; @@ -128,40 +128,43 @@ } // Namespace for the relation. - static private final String namespace = "ns"; + static private final String namespace = TestFederatedQueryEngine.class.getName(); // The separator key between the index partitions. private byte[] separatorKey; - private FederatedQueryEngine queryEngine; + private IQueryClient queryEngine; + private JiniClient<?> client; - private JiniServicesHelper helper; - -// private JiniClient<?> client; - + private IDataService dataService0; + private IDataService dataService1; + protected void setUp() throws Exception { + + client = new JiniClient(new String[]{"/nas/bigdata/bigdata-0.83.2/dist/bigdata/var/config/jini/bigdataStandalone.config"}); + + final IBigdataFederation<?> fed = client.connect(); - helper = new JiniServicesHelper(); - - // start services. - helper.start(); - -// // expose to subclasses. -// client = helper.client; - + final int maxCount = 2; + UUID[] dataServices = null; + while((dataServices = fed.getDataServiceUUIDs(maxCount)).length < maxCount) { + System.err.println("Waiting for "+maxCount+" data services. There are "+dataServices.length+" discovered."); + Thread.sleep(250/*ms*/); + } + super.setUp(); - + + dataService0 = fed.getDataService(dataServices[0]); + dataService1 = fed.getDataService(dataServices[1]); { - - final DataServer dataServer = helper.dataServer0; - - assertTrue(((DataService) dataServer.getProxy()) - .getResourceManager().awaitRunning()); + // @todo need to wait for the dataService to be running. +// assertTrue(((DataService) dataServer.getProxy()) +// .getResourceManager().awaitRunning()); + // resolve the query engine on one of the data services. - while ((queryEngine = (FederatedQueryEngine) ((DataService) dataServer - .getProxy()).getQueryEngine()) == null) { + while ((queryEngine = (IQueryClient) dataService0.getQueryEngine()) == null) { if (log.isInfoEnabled()) log.info("Waiting for query engine on dataService0"); @@ -175,17 +178,14 @@ } // resolve the query engine on the other data services. - if (helper.dataServer1 != null) { + { - final DataServer dataServer = helper.dataServer1; - IQueryPeer other = null; - assertTrue(((DataService) dataServer.getProxy()) - .getResourceManager().awaitRunning()); +// assertTrue(((DataService) dataServer.getProxy()) +// .getResourceManager().awaitRunning()); - while ((other = ((DataService) dataServer.getProxy()) - .getQueryEngine()) == null) { + while ((other = dataService1.getQueryEngine()) == null) { if (log.isInfoEnabled()) log.info("Waiting for query engine on dataService1"); @@ -207,9 +207,13 @@ // clear reference. separatorKey = null; - helper.destroy(); + client.disconnect(true/*immediateShutdown*/); + client = null; + + dataService0 = null; + dataService1 = null; - helper = null; + queryEngine = null; super.tearDown(); @@ -244,8 +248,8 @@ }; final UUID[] dataServices = new UUID[] {// - JiniUtil.serviceID2UUID(helper.dataServer0.getServiceID()),// - JiniUtil.serviceID2UUID(helper.dataServer1.getServiceID()),// + dataService0.getServiceUUID(),// + dataService1.getServiceUUID(),// }; /* @@ -253,14 +257,19 @@ * using the given separator keys and data services. */ - final R rel = new R(helper.getFederation(), namespace, ITx.UNISOLATED, new Properties()); + final R rel = new R(client.getFederation(), namespace, ITx.UNISOLATED, new Properties()); + if(client.getFederation() + .getResourceLocator().locate(namespace, ITx.UNISOLATED)==null) { + rel.create(separatorKeys, dataServices); /* * Insert data into the appropriate index partitions. */ rel.insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */)); + + } } @@ -302,18 +311,12 @@ public void test_query_startRun() throws Exception { final int startId = 1; - final BindingSetPipelineOp query = new CopyBindingSetOp(new BOp[] {}, NV + final BindingSetPipelineOp query = new StartOp(new BOp[] {}, NV .asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, startId),// // new NV(Predicate.Annotations.READ_TIMESTAMP, ITx.READ_COMMITTED),// - })){ - private static final long serialVersionUID = 1L; + })); - public BOpEvaluationContext getEvaluationContext() { - return BOpEvaluationContext.CONTROLLER; - } - }; - final long queryId = 1L; final RunningQuery runningQuery = queryEngine.eval(queryId, query); @@ -379,7 +382,7 @@ final BindingSetPipelineOp query = new SliceOp(new BOp[]{new PipelineJoin<E>( // left - new CopyBindingSetOp(new BOp[] {}, NV.asMap(new NV[] {// + new StartOp(new BOp[] {}, NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, startId),// })), // right @@ -563,7 +566,7 @@ final int predId2 = 5; final int sliceId = 6; - final BindingSetPipelineOp startOp = new CopyBindingSetOp(new BOp[] {}, + final BindingSetPipelineOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// new NV(Predicate.Annotations.BOP_ID, startId),// })); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -46,6 +46,7 @@ import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryDecl; +import com.bigdata.bop.engine.RunningQuery; import com.bigdata.bop.engine.StartOpMessage; import com.bigdata.io.DirectBufferPoolAllocator.IAllocationContext; import com.bigdata.relation.accesspath.BlockingBuffer; @@ -255,6 +256,12 @@ throws RemoteException { return null; } + + @Override + public RunningQuery eval(long queryId, BindingSetPipelineOp query) + throws Exception, RemoteException { + return null; + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-14 20:46:47 UTC (rev 3553) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java 2010-09-14 20:48:21 UTC (rev 3554) @@ -43,6 +43,7 @@ import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IQueryClient; import com.bigdata.bop.engine.IQueryDecl; +import com.bigdata.bop.engine.RunningQuery; import com.bigdata.bop.engine.StartOpMessage; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.relation.accesspath.IBlockingBuffer; @@ -180,6 +181,12 @@ return null; } + @Override + public RunningQuery eval(long queryId, BindingSetPipelineOp query) + throws Exception, RemoteException { + return null; + } + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 20:46:53
|
Revision: 3553 http://bigdata.svn.sourceforge.net/bigdata/?rev=3553&view=rev Author: thompsonbry Date: 2010-09-14 20:46:47 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Removed some unused fields. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/JiniFederation.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/JiniFederation.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/JiniFederation.java 2010-09-14 19:55:11 UTC (rev 3552) +++ branches/QUADS_QUERY_BRANCH/bigdata-jini/src/java/com/bigdata/service/jini/JiniFederation.java 2010-09-14 20:46:47 UTC (rev 3553) @@ -786,10 +786,10 @@ } - private static final String ERR_RESOLVE = "Could not resolve: "; +// private static final String ERR_RESOLVE = "Could not resolve: "; +// +// private static final String ERR_DESTROY_ADMIN = "Could not destroy: "; - private static final String ERR_DESTROY_ADMIN = "Could not destroy: "; - /** * Shutdown the services in the distributed federation <strong>NOT</strong> * just your client. This method may be used to take the entire federation This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 19:55:17
|
Revision: 3552 http://bigdata.svn.sourceforge.net/bigdata/?rev=3552&view=rev Author: thompsonbry Date: 2010-09-14 19:55:11 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Fixed import of a class since removed in AbstractTripleStore. BOp#getRequiredProperty(String name) was not compiling under ant due to a generic type parameter for the return type. I have removed the generic type parameter and added explicit casts everywhere this method is used. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/DistinctBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SortOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SparqlBindingSetComparatorOp.java branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -116,18 +116,36 @@ */ <T> T getProperty(final String name); - /** - * Return the value of the named annotation. - * - * @param name - * The name of the annotation. - * - * @return The value of the annotation. - * - * @throws IllegalArgumentException - * if the named annotation is not bound. - */ - <T> T getRequiredProperty(final String name); +// /** +// * Return the value of the named annotation. +// * +// * @param name +// * The name of the annotation. +// * +// * @return The value of the annotation. +// * +// * @throws IllegalArgumentException +// * if the named annotation is not bound. +// */ +// <T> T getRequiredProperty(final String name); + + /** + * Return the value of the named annotation. + * + * @param name + * The name of the annotation. + * + * @return The value of the annotation. + * + * @throws IllegalArgumentException + * if the named annotation is not bound. + * + * @todo Note: This variant without generics is required for some java + * compiler versions. + * + * @see http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6302954 + */ + public Object getRequiredProperty(final String name); /** * Deep copy clone of the operator. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -306,11 +306,22 @@ } - public <T> T getRequiredProperty(final String name) { +// public <T> T getRequiredProperty(final String name) { +// +// @SuppressWarnings("unchecked") +// final T tmp = (T) annotations.get(name); +// +// if (tmp == null) +// throw new IllegalArgumentException("Required property: " + name); +// +// return tmp; +// +// } - @SuppressWarnings("unchecked") - final T tmp = (T) annotations.get(name); + public Object getRequiredProperty(final String name) { + final Object tmp = annotations.get(name); + if (tmp == null) throw new IllegalArgumentException("Required property: " + name); @@ -358,7 +369,7 @@ public final long getTimestamp() { - return getRequiredProperty(Annotations.TIMESTAMP); + return (Long) getRequiredProperty(Annotations.TIMESTAMP); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -319,7 +319,7 @@ final IIndexManager tmp = getFederation() == null ? getIndexManager() : getFederation(); - final long timestamp = pred + final long timestamp = (Long) pred .getRequiredProperty(BOp.Annotations.TIMESTAMP); return (IRelation<?>) tmp.getResourceLocator().locate( @@ -391,7 +391,7 @@ final int partitionId = predicate.getPartitionId(); - final long timestamp = predicate + final long timestamp = (Long) predicate .getRequiredProperty(BOp.Annotations.TIMESTAMP); final int flags = predicate.getProperty( Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/mutation/InsertOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -113,34 +113,36 @@ super(args, annotations); - getRequiredProperty(Annotations.SELECTED); - - } + getRequiredProperty(Annotations.SELECTED); - /** - * @see Annotations#SELECTED - */ - public IPredicate<E> getPredicate() { + } - return getRequiredProperty(Annotations.SELECTED); + /** + * @see Annotations#SELECTED + */ + @SuppressWarnings("unchecked") + public IPredicate<E> getPredicate() { - } - + return (IPredicate<E>) getRequiredProperty(Annotations.SELECTED); + + } + /** * @see Annotations#RELATION */ public String getRelation() { - return getRequiredProperty(Annotations.RELATION); + return (String) getRequiredProperty(Annotations.RELATION); } /** * @see Annotations#KEY_ORDER */ - public IKeyOrder<E> getKeyOrder() { + @SuppressWarnings("unchecked") + public IKeyOrder<E> getKeyOrder() { - return getRequiredProperty(Annotations.KEY_ORDER); + return (IKeyOrder<E>) getRequiredProperty(Annotations.KEY_ORDER); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/DistinctBindingSetOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/DistinctBindingSetOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/DistinctBindingSetOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -125,7 +125,7 @@ */ public IVariable<?>[] getVariables() { - return getRequiredProperty(Annotations.VARIABLES); + return (IVariable<?>[]) getRequiredProperty(Annotations.VARIABLES); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -140,7 +140,7 @@ */ public long getOffset() { - return getRequiredProperty(Annotations.OFFSET); + return (Long) getRequiredProperty(Annotations.OFFSET); } @@ -149,7 +149,7 @@ */ public long getLimit() { - return getRequiredProperty(Annotations.LIMIT); + return (Long) getRequiredProperty(Annotations.LIMIT); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SortOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SortOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SortOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -80,7 +80,7 @@ */ public ComparatorOp getComparator() { - return getRequiredProperty(Annotations.COMPARATOR); + return (ComparatorOp) getRequiredProperty(Annotations.COMPARATOR); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SparqlBindingSetComparatorOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SparqlBindingSetComparatorOp.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SparqlBindingSetComparatorOp.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -46,7 +46,7 @@ */ public ISortOrder<?>[] getOrder() { - return getRequiredProperty(Annotations.ORDER); + return (ISortOrder<?>[]) getRequiredProperty(Annotations.ORDER); } Modified: branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-09-14 19:18:42 UTC (rev 3551) +++ branches/QUADS_QUERY_BRANCH/bigdata-rdf/src/java/com/bigdata/rdf/store/AbstractTripleStore.java 2010-09-14 19:55:11 UTC (rev 3552) @@ -146,7 +146,6 @@ import com.bigdata.relation.rule.eval.IRuleTaskFactory; import com.bigdata.relation.rule.eval.ISolution; import com.bigdata.search.FullTextIndex; -import com.bigdata.service.AbstractEmbeddedDataService; import com.bigdata.service.DataService; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.ndx.IClientIndex; @@ -1129,8 +1128,7 @@ * and writers. This property depends on primarily on the concurrency * control mechanisms (if any) that are used to prevent concurrent access to * an unisolated index while a thread is writing on that index. Stores based - * on the {@link IBigdataFederation} or an - * {@link AbstractEmbeddedDataService} automatically inherent the + * on the {@link IBigdataFederation} automatically inherent the * appropriate concurrency controls as would a store whose index access was * intermediated by the executor service of an {@link IConcurrencyManager}. * <p> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 19:18:49
|
Revision: 3551 http://bigdata.svn.sourceforge.net/bigdata/?rev=3551&view=rev Author: thompsonbry Date: 2010-09-14 19:18:42 +0000 (Tue, 14 Sep 2010) Log Message: ----------- minor edit Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-14 19:15:27 UTC (rev 3550) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-14 19:18:42 UTC (rev 3551) @@ -488,8 +488,8 @@ initialBindings.set(Var.var("x"), new Constant<String>("Mary")); - runningQuery.startQuery(new BindingSetChunk(queryEngine, queryId, - startId,// + runningQuery.startQuery(new BindingSetChunk<IBindingSet>( + queryEngine, queryId, startId,// -1, // partitionId newBindingSetIterator(initialBindings))); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-14 19:15:34
|
Revision: 3550 http://bigdata.svn.sourceforge.net/bigdata/?rev=3550&view=rev Author: thompsonbry Date: 2010-09-14 19:15:27 +0000 (Tue, 14 Sep 2010) Log Message: ----------- Modified the LUBM performance test to use 10M write cache buffers and to locate its log4j configuration file correctly. Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/RWStore.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.properties branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.xml Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/RWStore.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/RWStore.properties 2010-09-14 19:14:30 UTC (rev 3549) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/RWStore.properties 2010-09-14 19:15:27 UTC (rev 3550) @@ -10,9 +10,12 @@ # Disk is the worm store. DiskRW is the read/write store. com.bigdata.journal.AbstractJournal.bufferMode=DiskRW -com.bigdata.btree.writeRetentionQueue.capacity=4000 +com.bigdata.btree.writeRetentionQueue.capacity=8000 com.bigdata.btree.BTree.branchingFactor=128 +# Override the #of write cache buffers. +com.bigdata.journal.AbstractJournal.writeCacheBufferCount=12 + # 200M initial extent. com.bigdata.journal.AbstractJournal.initialExtent=209715200 com.bigdata.journal.AbstractJournal.maximumExtent=209715200 Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.properties =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.properties 2010-09-14 19:14:30 UTC (rev 3549) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.properties 2010-09-14 19:15:27 UTC (rev 3550) @@ -40,7 +40,7 @@ lubm.maxMem=4g # The data set size (U50, U1000, etc.) -lubm.univ=50 +lubm.univ=8000 # The namespace of the KB instance (multiple KBs can be in the same database). lubm.namespace=LUBM_U${lubm.univ} @@ -48,9 +48,9 @@ # Laptop benchmark data directory. #lubm.baseDir=d:/bigdata-perf-analysis/lubm/U${lubm.univ} # Server benchmark directory. -#lubm.baseDir=/nas/data/lubm/U${lubm.univ} +lubm.baseDir=/nas/data/lubm/U${lubm.univ} # Windows Server 2008 benchmark data directory. -lubm.baseDir=c:/usr/local/data/lubm/lubm_${lubm.univ} +#lubm.baseDir=c:/usr/local/data/lubm/lubm_${lubm.univ} ## Where to put the XML results files. #bsbm.resultsDir=${bsbm.baseDir}/.. @@ -68,8 +68,8 @@ lubm.compressType=GZip # Which mode to use for the Journal. (DiskRW or DiskWORM) -#journalMode=RW -journalMode=WORM +journalMode=RW +#journalMode=WORM ## The name of the directory containing the generated RDF data without the filename extension. #lubm.outputFile=${lubm.baseDir} @@ -80,11 +80,11 @@ # The name of the file used for the journal. #lubm.journalFile=${lubm.baseDir}/bigdata-lubm.${journalMode}.jnl # Note: This is on the large volume. -#lubm.journalFile=/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl +lubm.journalFile=/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl # SSD. #lubm.journalFile=e:/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl # SAS -lubm.journalFile=f:/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl +#lubm.journalFile=f:/data/lubm/U${lubm.univ}/bigdata-lubm.${journalMode}.jnl # The database to test. lubm.configFile=${lubm.dir}/src/resources/config/config.kb.sparql @@ -136,4 +136,4 @@ #-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. -queryJvmArgs=-server -Xmx${lubm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties +queryJvmArgs=-server -Xmx${lubm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Modified: branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.xml =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.xml 2010-09-14 19:14:30 UTC (rev 3549) +++ branches/JOURNAL_HA_BRANCH/bigdata-perf/lubm/build.xml 2010-09-14 19:15:27 UTC (rev 3550) @@ -54,14 +54,14 @@ <exclude name="**/*.java" /> <exclude name="**/package.html" /> </fileset> - <!-- copy log4j configuration file. --> - <fileset dir="${lubm.dir}/src/resources/logging" /> </copy> <copy toDir="${build.dir}/bin"> <!-- copy benchmark data and queries. --> <fileset dir="${lubm.dir}/src/resources/config" /> <!-- copy the journal configuration file. --> <fileset file="${lubm.dir}/*.properties" /> + <!-- copy log4j configuration file. --> + <fileset dir="${lubm.dir}/src/resources/logging" /> </copy> </target> @@ -90,7 +90,9 @@ <java classname="com.bigdata.rdf.store.DataLoader" fork="true" failonerror="true"> <arg line="-closure -namespace ${lubm.namespace} ${lubm.journalPropertyFile} ${lubm.ontologyFile} ${lubm.dataDir}" /> <!-- specify/override the journal file name. --> - <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${lubm.journalFile}" /> + <jvmarg line="${queryJvmArgs} -Dcom.bigdata.journal.AbstractJournal.file=${lubm.journalFile} + -Dcom.bigdata.io.DirectBufferPool.bufferCapacity=10485760 + " /> <classpath> <path refid="runtime.classpath" /> </classpath> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |