This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2010-09-24 16:49:57
|
Revision: 3624 http://bigdata.svn.sourceforge.net/bigdata/?rev=3624&view=rev Author: thompsonbry Date: 2010-09-24 16:49:51 +0000 (Fri, 24 Sep 2010) Log Message: ----------- Added shutdown of the query engine. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-09-24 14:36:50 UTC (rev 3623) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java 2010-09-24 16:49:51 UTC (rev 3624) @@ -996,6 +996,8 @@ assertOpen(); + queryEngine.shutdown(); + super.shutDown(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-24 14:36:56
|
Revision: 3623 http://bigdata.svn.sourceforge.net/bigdata/?rev=3623&view=rev Author: thompsonbry Date: 2010-09-24 14:36:50 +0000 (Fri, 24 Sep 2010) Log Message: ----------- Added a unit test for MapBindingSetsOverShards which exercises the condition where the key for the target access path is only partly bound (a prefix). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java 2010-09-24 14:30:39 UTC (rev 3622) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java 2010-09-24 14:36:50 UTC (rev 3623) @@ -69,10 +69,6 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id: TestMapBindingSetsOverShards.java 3448 2010-08-18 20:55:58Z * thompsonbry $ - * - * FIXME More unit tests. It appears that none of these tests cover the - * case where there is a shared prefix, e.g., because at least one - * component of the selected key order is bound. */ public class TestMapBindingSetsOverShards extends AbstractEmbeddedFederationTestCase { @@ -297,13 +293,14 @@ // } } - + /** - * Unit test verifies that binding sets are correctly mapped over shards. + * Unit test verifies that binding sets are correctly mapped over shards + * when the target access path will be fully bound. * - * @throws IOException + * @throws IOException */ - public void test_mapShards() throws IOException { + public void test_mapShards_fullyBound() throws IOException { // scale-out view of the relation. final R rel = (R) fed.getResourceLocator().locate(namespace, @@ -451,6 +448,154 @@ } /** + * Unit test verifies that binding sets are correctly mapped over shards + * when only one component of the key is bound (the key has two components, + * this unit test only binds the first component in the key). + * + * @throws IOException + */ + public void test_mapShards_oneBound() throws IOException { + + // scale-out view of the relation. + final R rel = (R) fed.getResourceLocator().locate(namespace, + ITx.UNISOLATED); + + /* + * Setup the binding sets to be mapped across the shards. + */ + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + + final List<IBindingSet> data = new LinkedList<IBindingSet>(); + final List<IBindingSet> expectedPartition0 = new LinkedList<IBindingSet>(); + final List<IBindingSet> expectedPartition1 = new LinkedList<IBindingSet>(); + { + IBindingSet bset = null; + { // partition0 + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("John")); +// bset.set(y, new Constant<String>("Mary")); + data.add(bset); + expectedPartition0.add(bset); + } + { // partition1 + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Mary")); +// bset.set(y, new Constant<String>("Paul")); + data.add(bset); + expectedPartition1.add(bset); + } + { // partition1 + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Paul")); +// bset.set(y, new Constant<String>("John")); + data.add(bset); + expectedPartition1.add(bset); + } + { // partition0 + bset = new HashBindingSet(); + bset.set(x, new Constant<String>("Leon")); +// bset.set(y, new Constant<String>("Paul")); + data.add(bset); + expectedPartition0.add(bset); + } + +// // partition0 +// new E("John", "Mary"),// +// new E("Leon", "Paul"),// +// // partition1 +// new E("Mary", "John"),// +// new E("Mary", "Paul"),// +// new E("Paul", "Leon"),// + + } + + final Predicate<E> pred = new Predicate<E>(new BOp[] { x, y }, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.RELATION_NAME, + new String[] { namespace }) // + })); + + final long tx = fed.getTransactionService().newTx(ITx.READ_COMMITTED); + + try { + + final MockMapBindingSetsOverShardsBuffer<E> fixture = new MockMapBindingSetsOverShardsBuffer<E>( + fed, pred, rel.getPrimaryKeyOrder(), tx, 100/* capacity */); + + // write the binding sets on the fixture. + for (IBindingSet bindingSet : data) { + + fixture.add(bindingSet); + + } + + // flush (verify #of binding sets reported by flush). + assertEquals((long) data.size(), fixture.flush()); + + /* + * Examine the output sinks, verifying that each binding set was + * mapped onto the correct index partition. + */ + { + + final List<Bundle> flushedChunks = fixture.flushedChunks; + final List<IBindingSet[]> actualPartition0 = new LinkedList<IBindingSet[]>(); + final List<IBindingSet[]> actualPartition1 = new LinkedList<IBindingSet[]>(); + for (Bundle b : flushedChunks) { + if (b.locator.getPartitionId() == 0) { + actualPartition0.add(b.bindingSets); + } else if (b.locator.getPartitionId() == 1) { + actualPartition1.add(b.bindingSets); + } else { + fail("Not expecting: " + b.locator); + } + } + + final int nflushed = flushedChunks.size(); + +// assertEquals("#of sinks", 2, nflushed); + + // partition0 + { + +// assertEquals("#of binding sets", partition0.size(), +// bundle0.bindingSets.length); + + TestQueryEngine.assertSameSolutionsAnyOrder( + expectedPartition0.toArray(new IBindingSet[0]), + new Dechunkerator<IBindingSet>(actualPartition0 + .iterator())); + + } + + // partition1 + { +// final Bundle bundle1 = flushedChunks.get(1); +// +// assertEquals("partitionId", 1/* partitionId */, +// bundle1.locator.getPartitionId()); + +// assertEquals("#of binding sets", partition1.size(), +// bundle1.bindingSets.length); + + TestQueryEngine.assertSameSolutionsAnyOrder( + expectedPartition1.toArray(new IBindingSet[0]), + new Dechunkerator<IBindingSet>(actualPartition1 + .iterator())); + } + + } + + } finally { + + fed.getTransactionService().abort(tx); + + } + + } + + /** * A unit test where no variables are bound. This should cause the binding * sets to be mapped across all shards. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-24 14:30:46
|
Revision: 3622 http://bigdata.svn.sourceforge.net/bigdata/?rev=3622&view=rev Author: thompsonbry Date: 2010-09-24 14:30:39 +0000 (Fri, 24 Sep 2010) Log Message: ----------- All implemented unit tests for distributed query now run correctly. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Bundle.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -78,6 +78,12 @@ .getLogger(ChunkTask.class); /** + * Error message used when an operation which must be performed on the query + * controller is attempted on some other {@link IQueryPeer}. + */ + static protected final String ERR_NOT_CONTROLLER = "Operator only permitted on the query controller"; + + /** * The class executing the query on this node. */ final private QueryEngine queryEngine; @@ -1160,8 +1166,10 @@ */ // cancel any running operators for this query on this node. cancelled |= cancelRunningOperators(mayInterruptIfRunning); - // cancel any running operators for this query on other nodes. - cancelled |= cancelQueryOnPeers(future.getCause()); + if (controller) { + // cancel query on other peers. + cancelled |= cancelQueryOnPeers(future.getCause()); + } if (queryBuffer != null) { /* * Close the query buffer so the iterator draining the query @@ -1216,13 +1224,12 @@ return cancelled; } - + /** * Cancel the query on each node where it is known to be running. * <p> * Note: The default implementation verifies that the caller is holding the - * {@link #lock} but is otherwise a NOP. This is overridden for - * scale-out. + * {@link #lock} but is otherwise a NOP. This is overridden for scale-out. * * @param cause * When non-<code>null</code>, the cause. @@ -1230,11 +1237,15 @@ * @return <code>true</code> iff something was cancelled. * * @throws IllegalMonitorStateException - * unless the {@link #lock} is held by the current - * thread. + * unless the {@link #lock} is held by the current thread. + * @throws UnsupportedOperationException + * unless this is the query controller. */ protected boolean cancelQueryOnPeers(final Throwable cause) { + if (!controller) + throw new UnsupportedOperationException(ERR_NOT_CONTROLLER); + if (!lock.isHeldByCurrentThread()) throw new IllegalMonitorStateException(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -415,7 +415,7 @@ final BOp targetOp = bopIndex.get(sinkId); - if (bop == null) + if (targetOp == null) throw new IllegalStateException("Not found: " + sinkId); if(log.isTraceEnabled()) @@ -724,12 +724,10 @@ @Override protected boolean cancelQueryOnPeers(final Throwable cause) { - super.cancelQueryOnPeers(cause); + boolean cancelled = super.cancelQueryOnPeers(cause); final UUID queryId = getQueryId(); - boolean cancelled = false; - for (IQueryPeer peer : peers.values()) { try { Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Bundle.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Bundle.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/shards/Bundle.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -37,7 +37,7 @@ this.fromKey = keyOrder.getFromKey(keyBuilder, asBound); - this.toKey = keyOrder.getFromKey(keyBuilder, asBound); + this.toKey = keyOrder.getToKey(keyBuilder, asBound); } @@ -94,4 +94,14 @@ private int hash = 0; + public String toString() { + StringBuilder sb = new StringBuilder(super.toString()); + sb.append("{bindingSet="+bindingSet); + sb.append(",asBound="+asBound); + sb.append(",fromKey="+BytesUtil.toString(fromKey)); + sb.append(",toKey="+BytesUtil.toString(toKey)); + sb.append("}"); + return sb.toString(); + } + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestAll.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestAll.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestAll.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -108,7 +108,7 @@ * Note: This is tested later once we have gone through the core unit * tests for the services. */ - //suite.addTest( com.bigdata.bop.fed.TestAll.suite() ); + suite.addTest( com.bigdata.bop.fed.TestAll.suite() ); return suite; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -107,7 +107,10 @@ * @todo reuse the stress tests from {@link TestQueryEngine}. * * @todo verify that the peers notify the query controller when they first - * register + * register + * + * FIXME Write test of an RMI based join (this is used for some default + * graph query patterns). */ public class TestFederatedQueryEngine extends AbstractEmbeddedFederationTestCase { @@ -651,9 +654,9 @@ log.info("join : "+stats.toString()); // verify query solution stats details. - assertEquals(1L, stats.chunksIn.get()); - assertEquals(1L, stats.unitsIn.get()); - assertEquals(5L, stats.unitsOut.get()); + assertEquals(2L, stats.chunksIn.get()); // two shards. + assertEquals(2L, stats.unitsIn.get()); // two shards, one empty bset each. + assertEquals(5L, stats.unitsOut.get()); // total of 5 tuples read across both shards. assertEquals(2L, stats.chunksOut.get()); // since we read on both shards. } @@ -809,9 +812,9 @@ // verify query solution stats details. assertEquals(2L, stats.chunksIn.get()); // since we read on two shards. - assertEquals(1L, stats.unitsIn.get()); // a single empty binding set. - assertEquals(5L, stats.unitsOut.get()); // each of the tuples will be read. - assertEquals(2L, stats.chunksOut.get()); // since we read on both shards. + assertEquals(2L, stats.unitsIn.get()); // a single empty binding set for each. + assertEquals(2L, stats.unitsOut.get()); // one tuple on each shard will satisfy the constraint. + assertEquals(2L, stats.chunksOut.get()); // since we read on both shards and both shards have one tuple which joins. } // validate the stats for the slice operator. @@ -831,12 +834,12 @@ } /** - * Test the ability run a simple join reading on a single shard. There are - * three operators. One feeds an empty binding set[] into the join, another - * is the predicate for the access path on which the join will read (it - * probes the index once for "Mary" and binds "Paul" and "John" when it does - * so), and the third is the join itself (there are two solutions, which are - * "value=Paul" and value="John"). + * Test the ability to run a simple join reading on a single shard. There + * are three operators. One feeds an empty binding set[] into the join, + * another is the predicate for the access path on which the join will read + * (it probes the index once for "Mary" and binds "Paul" and "John" when it + * does so), and the third is the join itself (there are two solutions, + * which are value="Paul" and value="John"). */ public void test_query_join_1shard() throws Exception { @@ -1096,15 +1099,15 @@ // verify solutions. { - // the expected solution (just one). + // the expected solutions. final IBindingSet[] expected = new IBindingSet[] {// - new ArrayBindingSet(// + new ArrayBindingSet(// partition1 new IVariable[] { Var.var("x"), Var.var("y"), Var.var("z") },// new IConstant[] { new Constant<String>("Mary"), new Constant<String>("Paul"), new Constant<String>("Leon") }// ),// - new ArrayBindingSet(// + new ArrayBindingSet(// partition0 new IVariable[] { Var.var("x"), Var.var("y"), Var.var("z") },// new IConstant[] { new Constant<String>("Mary"), new Constant<String>("John"), @@ -1114,6 +1117,13 @@ TestQueryEngine.assertSameSolutionsAnyOrder(expected, new Dechunkerator<IBindingSet>(runningQuery.iterator())); +// // partition0 +// new E("John", "Mary"),// +// new E("Leon", "Paul"),// +// // partition1 +// new E("Mary", "John"),// +// new E("Mary", "Paul"),// +// new E("Paul", "Leon"),// } // Wait until the query is done. @@ -1122,7 +1132,7 @@ { // validate the stats map. assertNotNull(statsMap); - assertEquals(3, statsMap.size()); + assertEquals(4, statsMap.size()); if (log.isInfoEnabled()) log.info(statsMap.toString()); } @@ -1149,10 +1159,10 @@ log.info("join1: " + stats.toString()); // verify query solution stats details. - assertEquals(1L, stats.chunksIn.get()); - assertEquals(1L, stats.unitsIn.get()); + assertEquals(1L, stats.chunksIn.get()); // reads only on one shard. + assertEquals(1L, stats.unitsIn.get()); // the initial binding set. assertEquals(2L, stats.unitsOut.get()); - assertEquals(1L, stats.chunksOut.get()); // @todo depends on where the shards are. + assertEquals(1L, stats.chunksOut.get()); // one chunk out, but will be mapped over two shards. } // validate the stats for the 2nd join operator. @@ -1163,10 +1173,10 @@ log.info("join2: " + stats.toString()); // verify query solution stats details. - assertEquals(1L, stats.chunksIn.get()); // @todo depends on where the shards are. - assertEquals(2L, stats.unitsIn.get()); - assertEquals(2L, stats.unitsOut.get()); - assertEquals(1L, stats.chunksOut.get()); // @todo depends on where the shards are. + assertEquals(2L, stats.chunksIn.get()); // one chunk per shard on which we will read. + assertEquals(2L, stats.unitsIn.get()); // one binding set in per shard. + assertEquals(2L, stats.unitsOut.get()); // one solution per shard. + assertEquals(2L, stats.chunksOut.get()); // since join ran on two shards and each had one solution. } // validate stats for the sliceOp (on the query controller) @@ -1177,10 +1187,10 @@ log.info("slice: " + stats.toString()); // verify query solution stats details. - assertEquals(1L, stats.chunksIn.get()); // @todo? - assertEquals(2L, stats.unitsIn.get()); - assertEquals(2L, stats.unitsOut.get()); - assertEquals(1L, stats.chunksOut.get()); // @todo? + assertEquals(2L, stats.chunksIn.get()); // one chunk from each shard of join2 with a solution. + assertEquals(2L, stats.unitsIn.get()); // one solution per shard for join2. + assertEquals(2L, stats.unitsOut.get()); // slice passes all units. + assertEquals(2L, stats.chunksOut.get()); // slice runs twice. } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java 2010-09-24 13:51:34 UTC (rev 3621) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/shards/TestMapBindingSetsOverShards.java 2010-09-24 14:30:39 UTC (rev 3622) @@ -64,11 +64,15 @@ import com.bigdata.striterator.IKeyOrder; /** - * Unit tests for {@link MapBindingSetsOverShardsBuffer}. + * Unit tests for {@link MapBindingSetsOverShardsBuffer}. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id: TestMapBindingSetsOverShards.java 3448 2010-08-18 20:55:58Z * thompsonbry $ + * + * FIXME More unit tests. It appears that none of these tests cover the + * case where there is a shared prefix, e.g., because at least one + * component of the selected key order is bound. */ public class TestMapBindingSetsOverShards extends AbstractEmbeddedFederationTestCase { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-24 13:51:41
|
Revision: 3621 http://bigdata.svn.sourceforge.net/bigdata/?rev=3621&view=rev Author: blevine218 Date: 2010-09-24 13:51:34 +0000 (Fri, 24 Sep 2010) Log Message: ----------- untar tarball during the package phase so it happens just before the pre-integration-test phase Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-24 13:38:31 UTC (rev 3620) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-24 13:51:34 UTC (rev 3621) @@ -9,6 +9,11 @@ <groupId>com.bigdata</groupId> <artifactId>bigdata-integration-test</artifactId> <name>bigdata Integration Tests</name> + + <!-- + Note: Most properties include the "integ." prefix so that they + can displayed easily using the ANT <echoproperties> task in the services.xml script + --> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> @@ -22,7 +27,7 @@ <integ.deploy.dir>${integ.deploy.root.dir}/${integ.bigdata.dependency}-${project.version}</integ.deploy.dir> <integ.test.dir>${integ.deploy.dir}/testing</integ.test.dir> - <integ.testScript>${integ.test.dir}/test.xml</integ.testScript> + <integ.testScript>${project.build.testOutputDirectory}/services.xml</integ.testScript> <integ.basedir>${integ.test.dir}</integ.basedir> <integ.app.home>${integ.deploy.dir}</integ.app.home> <integ.deploy.conf.dir>${integ.test.dir}/conf</integ.deploy.conf.dir> @@ -50,6 +55,11 @@ <executions> <execution> <id>unpack</id> + + <!-- Bound to the package phase so that we untar the tarball just + before the pre-integration-test phase --> + <phase>package</phase> + <goals> <goal>unpack</goal> </goals> @@ -156,7 +166,7 @@ <phase>pre-integration-test</phase> <configuration> <tasks> - <ant antfile="${project.build.testOutputDirectory}/services.xml" target="start" useNativeBasedir="true" inheritAll="true"/> + <ant antfile="${integ.testScript}" target="start" useNativeBasedir="true" inheritAll="true"/> </tasks> </configuration> <goals> @@ -169,8 +179,7 @@ <phase>post-integration-test</phase> <configuration> <tasks> - <echo message="testscript = ${testScript}" /> - <ant antfile="${project.build.testOutputDirectory}/services.xml" target="stop" useNativeBasedir="true" inheritAll="true"/> + <ant antfile="${integ.testScript}" target="stop" useNativeBasedir="true" inheritAll="true"/> </tasks> </configuration> <goals> Modified: branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml 2010-09-24 13:38:31 UTC (rev 3620) +++ branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml 2010-09-24 13:51:34 UTC (rev 3621) @@ -26,8 +26,10 @@ </target> <target name="dumpProps"> - <echo message="Application properties:\n" /> + <echo message="Application properties:" /> + <echo message="-----------------------" /> <echoproperties prefix="integ." /> + <echo message="-----------------------" /> </target> <target name="start"> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-24 13:38:39
|
Revision: 3620 http://bigdata.svn.sourceforge.net/bigdata/?rev=3620&view=rev Author: thompsonbry Date: 2010-09-24 13:38:31 +0000 (Fri, 24 Sep 2010) Log Message: ----------- Fixed a nagging bug when handling multiple small chunks flowing through the pipeline. In the end, it turns out that the problem was the SliceOp. The SliceTask was closing the sink after the first invocation, which was causing the query to be interrupted (cancelled). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -195,7 +195,7 @@ * identifier for the {@link BOp} within the context of its owning * query. */ - String BOP_ID = BOp.class.getName()+".bopId"; + String BOP_ID = BOp.class.getName() + ".bopId"; /** * The timeout for the operator evaluation (milliseconds). @@ -210,8 +210,8 @@ * be interpreted with respect to the time when the query began to * execute. */ - String TIMEOUT = BOp.class.getName()+".timeout"; - + String TIMEOUT = BOp.class.getName() + ".timeout"; + /** * The default timeout for operator evaluation. */ @@ -233,9 +233,9 @@ * @see #TIMESTAMP */ String MUTATION = BOp.class.getName() + ".mutation"; - + boolean DEFAULT_MUTATION = false; - + /** * The timestamp (or transaction identifier) used by this operator if it * reads or writes on the database. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -30,6 +30,9 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.log4j.Level; +import org.apache.log4j.Priority; + import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.btree.IRangeQuery; @@ -307,6 +310,22 @@ } + /** + * You can uncomment a line in this method to see who is closing the + * buffer. + * <p> + * {@inheritDoc} + */ + @Override + public void close() { + +// if (isOpen()) +// log.error(toString(), new RuntimeException("STACK TRACE")); + + super.close(); + + } + } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -119,7 +119,7 @@ public String toString() { final StringBuilder sb = new StringBuilder(); - sb.append(getClass().getName()); + sb.append(super.toString()); sb.append("{chunksIn=" + chunksIn.get()); sb.append(",unitsIn=" + unitsIn.get()); sb.append(",chunksOut=" + chunksOut.get()); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -413,7 +413,17 @@ continue; } } catch (InterruptedException e) { - log.warn("Interrupted."); + /* + * Note: Uncomment the stack trace here if you want to find + * where the query was interrupted. + * + * Note: If you want to find out who interrupted the query, + * then you can instrument BlockingBuffer#close() in + * PipelineOp#newBuffer(stats). + */ + log.warn("Interrupted." +// ,e + ); return; } catch (Throwable ex) { // log and continue Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -52,10 +52,11 @@ * Note: DO NOT halt the query here!!!! That will cause it to not * accept any more messages. Just close the source iterator. */ + src.close(); // try { // runningQuery.halt(); // } finally { - src.close(); +// src.close(); // } } } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -338,9 +338,6 @@ messagesProduced(msg.getBOpId(), 1/* nmessages */); - if (log.isInfoEnabled()) - log.info(msg.toString()); - if (TableLog.tableLog.isInfoEnabled()) { /* * Note: RunState is only used by the query controller so this will @@ -359,9 +356,12 @@ null/* cause */, null/* stats */)); } - if (debug) - System.err.println("startQ : " + toString()); + if(log.isInfoEnabled()) + log.info("startQ : " + toString()); + if (log.isTraceEnabled()) + log.trace(msg.toString()); + } /** @@ -397,19 +397,18 @@ messagesConsumed(msg.bopId, msg.nmessages); - if (log.isTraceEnabled()) - log.trace(msg.toString()); - if (TableLog.tableLog.isInfoEnabled()) { TableLog.tableLog.info(getTableRow("startOp", msg.serviceId, msg.bopId, msg.partitionId, msg.nmessages/* fanIn */, null/* cause */, null/* stats */)); } - if (debug) - System.err - .println("startOp: " + toString() + " : bop=" + msg.bopId); + if (log.isInfoEnabled()) + log.info("startOp: " + toString() + " : bop=" + msg.bopId); + if (log.isTraceEnabled()) + log.trace(msg.toString()); + return firstTime; } @@ -470,9 +469,6 @@ if (isAllDone) this.allDone.set(true); - if (log.isTraceEnabled()) - log.trace(msg.toString()); - if (TableLog.tableLog.isInfoEnabled()) { final int fanOut = msg.sinkMessagesOut + msg.altSinkMessagesOut; TableLog.tableLog.info(getTableRow("haltOp", msg.serviceId, @@ -480,10 +476,13 @@ msg.taskStats)); } - if (debug) - System.err.println("haltOp : " + toString() + " : bop=" + msg.bopId + if (log.isInfoEnabled()) + log.info("haltOp : " + toString() + " : bop=" + msg.bopId + ",isOpDone=" + isOpDone); + if (log.isTraceEnabled()) + log.trace(msg.toString()); + if (msg.cause != null) { /* Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -275,70 +275,69 @@ public Void call() throws Exception { - if(log.isTraceEnabled()) - log.trace(toString()); - final IAsynchronousIterator<IBindingSet[]> source = context .getSource(); final IBlockingBuffer<IBindingSet[]> sink = context.getSink(); - try { + /* + * buffer forms chunks which get flushed onto the sink. + * + * @todo if we have visibility into the #of source chunks, then do + * not buffer more than min(#source,#needed). + */ + final UnsynchronizedArrayBuffer<IBindingSet> out = new UnsynchronizedArrayBuffer<IBindingSet>( + sink, op.getChunkCapacity()); + while (source.hasNext()) { + + final IBindingSet[] chunk = source.next(); + /* - * buffer forms chunks which get flushed onto the sink. + * Batch each chunk through a lock for better concurrency + * (avoids CAS contention). * - * @todo if we have visibility into the #of source chunks, then - * do not buffer more than min(#source,#needed). + * Note: This is safe because the source chunk is already + * materialized and the sink will not block (that is part of the + * bop evaluation contract). + * + * Note: We need to be careful here with concurrent close of the + * sink (which is the shared queryBuffer) by concurrent + * SliceOps. The problem is that the slice can count off the + * solutions without having them flushed all the way through to + * the queryBuffer, but we can not close the query buffer until + * we actually see the last solution added to the query buffer. + * This is why the slice flushes the buffer while it is + * synchronized. */ - final UnsynchronizedArrayBuffer<IBindingSet> out = new UnsynchronizedArrayBuffer<IBindingSet>( - sink, op.getChunkCapacity()); + synchronized (stats) { - boolean halt = false; - - while (source.hasNext() && !halt) { + if (log.isTraceEnabled()) + log.trace(toString() + ": stats=" + stats + ", sink=" + + sink); - final IBindingSet[] chunk = source.next(); + final boolean halt = handleChunk(out, chunk); - /* - * Batch each chunk through a lock for better concurrency - * (avoids CAS contention). - * - * Note: This is safe because the source chunk is already - * materialized and the sink will not block (that is part of - * the bop evaluation contract). - */ - synchronized (stats) { - - if (handleChunk(out, chunk)) { + if (!out.isEmpty()) + out.flush(); - halt = true; + sink.flush(); - } + if (halt) { - } + if (log.isInfoEnabled()) + log.info("Slice will interrupt query."); - } + context.getRunningQuery().halt(); - if (!out.isEmpty()) - out.flush(); + } - sink.flush(); - - if (halt) { -// log.error("Slice will interrupt query.");// FIXME comment out this line. - context.getRunningQuery().halt();//throw new InterruptedException(); } - // cancelQuery(); - return null; - - } finally { - - sink.close(); - } + return null; + } /** @@ -400,6 +399,8 @@ stats.chunksIn.increment(); +// int nadded = 0; + for (int i = 0; i < chunk.length; i++) { if (stats.naccepted.get() >= limit) @@ -420,6 +421,8 @@ out.add(bset); +// nadded++; + stats.naccepted.incrementAndGet(); if (log.isTraceEnabled()) @@ -428,29 +431,14 @@ } } // next bindingSet - + return false; } - // /** - // * Cancel the query evaluation. This is invoked when the slice has - // been - // * satisfied. At that point we want to halt not only the {@link - // SliceOp} - // * but also the entire query since it does not need to produce any - // more - // * results. - // */ - // private void cancelQuery() { - // - // context.halt(); - // - // } - public String toString() { - return getClass().getName() + "{offset=" + offset + ",limit=" + return super.toString() + "{offset=" + offset + ",limit=" + limit + ",nseen=" + stats.nseen + ",naccepted=" + stats.naccepted + "}"; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -475,7 +475,8 @@ final StringBuilder sb = new StringBuilder(); - sb.append("BlockingBuffer"); + sb.append(super.toString()); +// sb.append("BlockingBuffer"); sb.append("{ open=" + open); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties 2010-09-24 13:38:31 UTC (rev 3620) @@ -91,13 +91,13 @@ log4j.logger.com.bigdata.util.concurrent.Haltable=ALL -log4j.logger.com.bigdata.bop=ALL +#log4j.logger.com.bigdata.bop=ALL #log4j.logger.com.bigdata.bop.join.PipelineJoin=ALL -#log4j.logger.com.bigdata.bop.solutions.SliceOp=ALL +#log4j.logger.com.bigdata.bop.solutions.SliceOp=ALL,destPlain #log4j.logger.com.bigdata.bop.engine=ALL #log4j.logger.com.bigdata.bop.engine.QueryEngine=ALL #log4j.logger.com.bigdata.bop.engine.RunningQuery=ALL -#log4j.logger.com.bigdata.bop.engine.RunState=ALL +log4j.logger.com.bigdata.bop.engine.RunState=INFO #log4j.logger.com.bigdata.bop.engine.RunningQuery$ChunkTask=ALL #log4j.logger.com.bigdata.bop.fed.FederatedQueryEngine=ALL #log4j.logger.com.bigdata.bop.fed.FederatedRunningQuery=ALL @@ -215,6 +215,11 @@ log4j.appender.dest2.layout=org.apache.log4j.PatternLayout log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + ## # BOp run state trace (tab delimited file). Uncomment the next line to enable. log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -46,10 +46,6 @@ private final IIndexManager indexManager; -// private final long readTimestamp; -// -// private final long writeTimestamp; - /** * Note: This constructor DOES NOT check its arguments so unit tests may be * written with the minimum dependencies @@ -60,13 +56,10 @@ * @param writeTimestamp */ public MockRunningQuery(final IBigdataFederation<?> fed, - final IIndexManager indexManager/*, final long readTimestamp, - final long writeTimestamp*/) { + final IIndexManager indexManager) { this.fed = fed; this.indexManager = indexManager; -// this.readTimestamp = readTimestamp; -// this.writeTimestamp = writeTimestamp; } @@ -78,14 +71,6 @@ return indexManager; } -// public long getReadTimestamp() { -// return readTimestamp; -// } -// -// public long getWriteTimestamp() { -// return writeTimestamp; -// } - /** * NOP (you have to test things like slices with a full integration). */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -405,6 +405,7 @@ final int startId = 1; final int joinId = 2; final int predId = 3; + final int sliceId = 4; /* * Enforce a constraint on the source such that it hands 3 each source @@ -442,7 +443,6 @@ })// ); - final int sliceId = 4; final SliceOp sliceOp = new SliceOp(new BOp[] { joinOp }, // slice annotations NV.asMap(new NV[] {// @@ -509,6 +509,8 @@ startId, -1 /* partitionId */, newBindingSetIterator(sources))); +// runningQuery.get(); + // verify solutions. assertSameSolutionsAnyOrder(expected, new Dechunkerator<IBindingSet>( runningQuery.iterator())); @@ -686,6 +688,131 @@ fail("write test"); } + + /** + * Unit test runs chunks into a slice without a limit. This verifies that + * the query terminates properly even though the slice is willing to accept + * more data. + * + * @throws Exception + */ + public void test_query_slice_noLimit() throws Exception { + + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + + final int startId = 1; + final int sliceId = 2; + + /* + * Enforce a constraint on the source such that it hands 3 each source + * chunk to the join operator as a separate chunk + */ + final int nsources = 4; + final StartOp startOp = new StartOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + new NV(PipelineOp.Annotations.CHUNK_CAPACITY, 1),// + new NV(PipelineOp.Annotations.CHUNK_OF_CHUNKS_CAPACITY, nsources),// + new NV(QueryEngineTestAnnotations.ONE_MESSAGE_PER_CHUNK, true),// + })); + + final SliceOp sliceOp = new SliceOp(new BOp[] { startOp }, + // slice annotations + NV.asMap(new NV[] { // + new NV(BOp.Annotations.BOP_ID, sliceId),// + new NV(SliceOp.Annotations.OFFSET, 0L),// + new NV(SliceOp.Annotations.LIMIT, Long.MAX_VALUE),// + })// + ); + + // the source data. + final IBindingSet[] source = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("John"), + new Constant<String>("Mary") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Leon"), + new Constant<String>("Paul") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Paul"), + new Constant<String>("Mary") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Paul"), + new Constant<String>("Mark") }// + )}; + // Put each source binding set into a chunk by itself. + final IBindingSet[][] sources = new IBindingSet[source.length][]; + for (int i = 0; i < sources.length; i++) { + sources[i] = new IBindingSet[] { source[i] }; + } + assertEquals(nsources, source.length); + assertEquals(nsources, sources.length); + + final BindingSetPipelineOp query = sliceOp; + final UUID queryId = UUID.randomUUID(); + final RunningQuery runningQuery = queryEngine.eval(queryId, query, + new LocalChunkMessage<IBindingSet>(queryEngine, queryId, + startId, -1 /* partitionId */, + newBindingSetIterator(sources))); + + // + // + // + + // the expected solutions. + final IBindingSet[] expected = source; + + // verify solutions. + assertSameSolutionsAnyOrder(expected, new Dechunkerator<IBindingSet>( + runningQuery.iterator())); + + // Wait until the query is done. + runningQuery.get(); + final Map<Integer, BOpStats> statsMap = runningQuery.getStats(); + { + // validate the stats map. + assertNotNull(statsMap); + assertEquals(2, statsMap.size()); + if (log.isInfoEnabled()) + log.info(statsMap.toString()); + } + + // validate the stats for the start operator. + { + final BOpStats stats = statsMap.get(startId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("start: " + stats.toString()); + + // verify query solution stats details. + assertEquals((long)nsources, stats.chunksIn.get()); + assertEquals((long)nsources, stats.unitsIn.get()); + assertEquals((long)nsources, stats.unitsOut.get()); + assertEquals((long)nsources, stats.chunksOut.get()); + } + + // validate the stats for the slice operator. + { + final BOpStats stats = statsMap.get(sliceId); + assertNotNull(stats); + if (log.isInfoEnabled()) + log.info("slice: " + stats.toString()); + + // verify query solution stats details. + assertEquals((long)nsources, stats.chunksIn.get()); + assertEquals((long)nsources, stats.unitsIn.get()); + assertEquals((long)nsources, stats.unitsOut.get()); + assertEquals((long)nsources, stats.chunksOut.get()); + } + + } /** * Run a join with a slice. The slice is always evaluated on the query Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java 2010-09-23 20:22:50 UTC (rev 3619) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java 2010-09-24 13:38:31 UTC (rev 3620) @@ -54,13 +54,14 @@ import com.bigdata.bop.Var; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IRunningQuery; -import com.bigdata.bop.engine.MockRunningQuery; import com.bigdata.bop.engine.TestQueryEngine; import com.bigdata.bop.solutions.SliceOp.SliceStats; +import com.bigdata.journal.IIndexManager; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.relation.accesspath.ThickAsynchronousIterator; +import com.bigdata.service.IBigdataFederation; import com.bigdata.util.InnerCause; /** @@ -219,7 +220,7 @@ final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( new MockRunningQuery(null/* fed */, null/* indexManager */ - ), -1/* partitionId */, stats, + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); // get task. @@ -311,7 +312,7 @@ final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( new MockRunningQuery(null/* fed */, null/* indexManager */ - ), -1/* partitionId */, stats, + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); // get task. @@ -323,18 +324,7 @@ assertTrue(ft.isDone()); assertFalse(ft.isCancelled()); -// try { - ft.get(); // verify nothing thrown. -// fail("Expecting inner cause : " + InterruptedException.class); -// } catch (Throwable t) { -// if (InnerCause.isInnerCause(t, InterruptedException.class)) { -// if (log.isInfoEnabled()) -// log.info("Ignoring expected exception: " + t, t); -// } else { -// fail("Expecting inner cause " + InterruptedException.class -// + ", not " + t, t); -// } -// } + ft.get(); // verify nothing thrown. assertEquals(limit, stats.naccepted.get()); assertEquals(offset+limit, stats.nseen.get()); @@ -346,6 +336,171 @@ } + /** + * Unit test where the offset is never satisfied. For this test, all binding + * sets will be consumed but none will be emitted. + * + * @throws InterruptedException + * @throws ExecutionException + */ + public void test_slice_offsetNeverSatisfied() throws InterruptedException, + ExecutionException { + + final int bopId = 1; + + final long offset = 100L; + final long limit = 3L; + + final SliceOp query = new SliceOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(SliceOp.Annotations.BOP_ID, bopId),// + new NV(SliceOp.Annotations.OFFSET, offset),// + new NV(SliceOp.Annotations.LIMIT, limit),// + })); + + assertEquals("offset", offset, query.getOffset()); + + assertEquals("limit", limit, query.getLimit()); + + // the expected solutions (none) + final IBindingSet[] expected = new IBindingSet[0]; + + final SliceStats stats = query.newStats(); + + final IAsynchronousIterator<IBindingSet[]> source = new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { data.toArray(new IBindingSet[0]) }); + + final IBlockingBuffer<IBindingSet[]> sink = query.newBuffer(stats); + + final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( + new MockRunningQuery(null/* fed */, null/* indexManager */ + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); + + // get task. + final FutureTask<Void> ft = query.eval(context); + + ft.run(); + + /* + * Note: When the slice does not have a limit (or if we write a test + * where the #of source binding sets can not satisfy the offset and/or + * limit) then the sink WILL NOT be closed by the slice. Therefore, in + * order for the iterator to terminate we first check the Future of the + * SliceTask and then _close_ the sink before consuming the iterator. + */ + assertTrue(ft.isDone()); + assertFalse(ft.isCancelled()); + ft.get(); // verify nothing thrown. + sink.close(); // close the sink so the iterator will terminate! + + TestQueryEngine.assertSameSolutions(expected, sink.iterator()); + + assertEquals(1L, stats.chunksIn.get()); + assertEquals(6L, stats.unitsIn.get()); + assertEquals(0L, stats.unitsOut.get()); + assertEquals(0L, stats.chunksOut.get()); + assertEquals(6L, stats.nseen.get()); + assertEquals(0L, stats.naccepted.get()); + + } + + /** + * Unit test where the offset plus the limit is never satisfied. For this + * test, all binding sets will be consumed and some will be emitted, but the + * slice is never satisfied. + * + * @throws InterruptedException + * @throws ExecutionException + */ + public void test_slice_offsetPlusLimitNeverSatisfied() throws InterruptedException, + ExecutionException { + + final Var<?> x = Var.var("x"); + final Var<?> y = Var.var("y"); + + final int bopId = 1; + + final long offset = 2L; + final long limit = 10L; + + final SliceOp query = new SliceOp(new BOp[] {}, NV.asMap(new NV[] {// + new NV(SliceOp.Annotations.BOP_ID, bopId),// + new NV(SliceOp.Annotations.OFFSET, offset),// + new NV(SliceOp.Annotations.LIMIT, limit),// + })); + + assertEquals("offset", offset, query.getOffset()); + + assertEquals("limit", limit, query.getLimit()); + + // the expected solutions + final IBindingSet[] expected = new IBindingSet[] {// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Mary"), + new Constant<String>("Jane") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Paul"), + new Constant<String>("Leon") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Paul"), + new Constant<String>("John") }// + ),// + new ArrayBindingSet(// + new IVariable[] { x, y },// + new IConstant[] { new Constant<String>("Leon"), + new Constant<String>("Paul") }// + ),// + }; + + final SliceStats stats = query.newStats(); + + final IAsynchronousIterator<IBindingSet[]> source = new ThickAsynchronousIterator<IBindingSet[]>( + new IBindingSet[][] { data.toArray(new IBindingSet[0]) }); + + final IBlockingBuffer<IBindingSet[]> sink = query.newBuffer(stats); + + final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( + new MockRunningQuery(null/* fed */, null/* indexManager */ + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); + + // get task. + final FutureTask<Void> ft = query.eval(context); + + ft.run(); + + /* + * Note: When the slice does not have a limit (or if we write a test + * where the #of source binding sets can not satisfy the offset and/or + * limit) then the sink WILL NOT be closed by the slice. Therefore, in + * order for the iterator to terminate we first check the Future of the + * SliceTask and then _close_ the sink before consuming the iterator. + */ + assertTrue(ft.isDone()); + assertFalse(ft.isCancelled()); + ft.get(); // verify nothing thrown. + sink.close(); // close the sink so the iterator will terminate! + + TestQueryEngine.assertSameSolutions(expected, sink.iterator()); + + assertEquals(1L, stats.chunksIn.get()); + assertEquals(6L, stats.unitsIn.get()); + assertEquals(4L, stats.unitsOut.get()); + assertEquals(1L, stats.chunksOut.get()); + assertEquals(6L, stats.nseen.get()); + assertEquals(4L, stats.naccepted.get()); + + } + + /** + * Unit test where the slice accepts everything. + * + * @throws InterruptedException + * @throws ExecutionException + */ public void test_slice_offset0_limitAll() throws InterruptedException, ExecutionException { @@ -353,8 +508,8 @@ final SliceOp query = new SliceOp(new BOp[] {}, NV.asMap(new NV[] {// new NV(SliceOp.Annotations.BOP_ID, bopId),// -// new NV(SliceOp.Annotations.OFFSET, 1L),// -// new NV(SliceOp.Annotations.LIMIT, 3L),// + // new NV(SliceOp.Annotations.OFFSET, 1L),// + // new NV(SliceOp.Annotations.LIMIT, 3L),// })); assertEquals("offset", 0L, query.getOffset()); @@ -362,8 +517,8 @@ assertEquals("limit", Long.MAX_VALUE, query.getLimit()); // the expected solutions - final IBindingSet[] expected = data.toArray(new IBindingSet[0]); - + final IBindingSet[] expected = data.toArray(new IBindingSet[0]); + final SliceStats stats = query.newStats(); final IAsynchronousIterator<IBindingSet[]> source = new ThickAsynchronousIterator<IBindingSet[]>( @@ -373,19 +528,27 @@ final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( new MockRunningQuery(null/* fed */, null/* indexManager */ - ), -1/* partitionId */, stats, source, sink, null/* sink2 */); + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); // get task. final FutureTask<Void> ft = query.eval(context); ft.run(); - TestQueryEngine.assertSameSolutions(expected, sink.iterator()); - + /* + * Note: When the slice does not have a limit (or if we write a test + * where the #of source binding sets can not satisfy the offset and/or + * limit) then the sink WILL NOT be closed by the slice. Therefore, in + * order for the iterator to terminate we first check the Future of the + * SliceTask and then _close_ the sink before consuming the iterator. + */ assertTrue(ft.isDone()); assertFalse(ft.isCancelled()); ft.get(); // verify nothing thrown. + sink.close(); // close the sink so the iterator will terminate! + TestQueryEngine.assertSameSolutions(expected, sink.iterator()); + assertEquals(1L, stats.chunksIn.get()); assertEquals(6L, stats.unitsIn.get()); assertEquals(6L, stats.unitsOut.get()); @@ -395,7 +558,8 @@ } - public void test_slice_correctRejection_badOffset() throws InterruptedException { + public void test_slice_correctRejection_badOffset() + throws InterruptedException { final int bopId = 1; @@ -418,7 +582,7 @@ final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( new MockRunningQuery(null/* fed */, null/* indexManager */ - ), -1/* partitionId */, stats, source, sink, null/* sink2 */); + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); // get task. try { @@ -455,7 +619,7 @@ final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( new MockRunningQuery(null/* fed */, null/* indexManager */ - ), -1/* partitionId */, stats, source, sink, null/* sink2 */); + , sink), -1/* partitionId */, stats, source, sink, null/* sink2 */); // get task. try { @@ -516,8 +680,6 @@ final SliceStats stats = query.newStats(); - final IRunningQuery q = new MockRunningQuery(null/* fed */, null/* indexManager */); - // start time in nanos. final long begin = System.nanoTime(); @@ -540,9 +702,14 @@ final IAsynchronousIterator<IBindingSet[]> source = new ThickAsynchronousIterator<IBindingSet[]>( new IBindingSet[][] { chunk }); + final IBlockingBuffer<IBindingSet[]> sink = new BlockingBuffer<IBindingSet[]>( + chunk.length); + + final IRunningQuery q = new MockRunningQuery(null/* fed */, + null/* indexManager */, sink); + final BOpContext<IBindingSet> context = new BOpContext<IBindingSet>( - q, -1/* partitionId */, stats, source, - new BlockingBuffer<IBindingSet[]>(chunk.length), null/* sink2 */); + q, -1/* partitionId */, stats, source, sink, null/* sink2 */); final FutureTask<Void> ft = query.eval(context); @@ -610,4 +777,29 @@ } + private static class MockRunningQuery extends + com.bigdata.bop.engine.MockRunningQuery { + + private final IBlockingBuffer<IBindingSet[]> sink; + + public MockRunningQuery(final IBigdataFederation<?> fed, + final IIndexManager indexManager, + final IBlockingBuffer<IBindingSet[]> sink) { + + super(fed, indexManager); + + this.sink = sink; + + } + + /** + * Overridden to close the sink so the slice will terminate. + */ + @Override + public void halt() { + sink.close(); + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-23 20:22:57
|
Revision: 3619 http://bigdata.svn.sourceforge.net/bigdata/?rev=3619&view=rev Author: blevine218 Date: 2010-09-23 20:22:50 +0000 (Thu, 23 Sep 2010) Log Message: ----------- break dependency on test.xml script from bigdata-core. bigdata-integ now uses its own ANT script to start/stop the class and lookup services. Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-23 20:20:24 UTC (rev 3618) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-23 20:22:50 UTC (rev 3619) @@ -1,6 +1,4 @@ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> - - <parent> <groupId>com.bigdata</groupId> <artifactId>bigdata</artifactId> @@ -14,40 +12,33 @@ <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> - <deploy.root.dir>${project.build.directory}/deploy</deploy.root.dir> - <bigdata.dependency>bigdata-core</bigdata.dependency> + <integ.deploy.root.dir>${project.build.directory}/deploy</integ.deploy.root.dir> + <integ.bigdata.dependency>bigdata-core</integ.bigdata.dependency> <!-- This is kinda hokey, but not sure there's a better way to construct the path to the root of the exploded tarball --> - - <deploy.dir>${deploy.root.dir}/${bigdata.dependency}-${project.version}</deploy.dir> - <test.dir>${deploy.dir}/testing</test.dir> - <testScript>${test.dir}/test.xml</testScript> - - <basedir>${test.dir}</basedir> - <app.home>${deploy.dir}</app.home> - <deploy.conf.dir>${test.dir}/conf</deploy.conf.dir> - <deploy.lib>${deploy.dir}/lib</deploy.lib> - <deploy.lib.test>${test.dir}/lib-test</deploy.lib.test> - <deploy.lib.dl>${deploy.dir}/lib-dl</deploy.lib.dl> - <test.codebase.dir>${deploy.lib.dl}</test.codebase.dir> - <test.codebase.port>23333</test.codebase.port> - <java.security.policy>${deploy.conf.dir}/policy.all</java.security.policy> - <log4j.configuration>${deploy.dir}/var/config/logging/log4j.properties</log4j.configuration> - <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack> - <default.nic>eth0</default.nic> - <parent.artifactName>bigdata-core</parent.artifactName> - - - <!-- - In the ANT script, hostname is obtained by an exec of the - 'hostname' command. Hard-coding for now. - --> - <hostname>blevine-desktop</hostname> - <test.codebase>http://${hostname}:${test.codebase.port}/jsk-dl.jar</test.codebase> <!-- Not used??? --> - <federation.name>bigdata.test.group-${hostname}</federation.name> + <integ.deploy.dir>${integ.deploy.root.dir}/${integ.bigdata.dependency}-${project.version}</integ.deploy.dir> + + <integ.test.dir>${integ.deploy.dir}/testing</integ.test.dir> + <integ.testScript>${integ.test.dir}/test.xml</integ.testScript> + <integ.basedir>${integ.test.dir}</integ.basedir> + <integ.app.home>${integ.deploy.dir}</integ.app.home> + <integ.deploy.conf.dir>${integ.test.dir}/conf</integ.deploy.conf.dir> + <integ.deploy.lib>${integ.deploy.dir}/lib</integ.deploy.lib> + <integ.deploy.lib.test>${integ.test.dir}/lib-test</integ.deploy.lib.test> + <integ.deploy.lib.dl>${integ.deploy.dir}/lib-dl</integ.deploy.lib.dl> + <integ.test.codebase.dir>${integ.deploy.lib.dl}</integ.test.codebase.dir> + <integ.test.codebase.port>23333</integ.test.codebase.port> + <integ.java.security.policy>${integ.deploy.conf.dir}/policy.all</integ.java.security.policy> + <integ.log4j.configuration>${integ.deploy.dir}/var/config/logging/log4j.properties</integ.log4j.configuration> + <integ.java.net.preferIPv4Stack>true</integ.java.net.preferIPv4Stack> + <integ.default.nic>eth0</integ.default.nic> + <integ.parent.artifactName>bigdata-core</integ.parent.artifactName> + + <!-- Set to empty string to indicate "unset." Application code will set a reasonable default --> + <integ.federation.name></integ.federation.name> </properties> @@ -69,7 +60,7 @@ <artifactId>bigdata-core</artifactId> <classifier>deploy</classifier> <type>tar.gz</type> - <outputDirectory>${deploy.root.dir}</outputDirectory> + <outputDirectory>${integ.deploy.root.dir}</outputDirectory> </artifactItem> </artifactItems> <useSubdirPerArtifact>true</useSubdirPerArtifact> @@ -116,42 +107,34 @@ </excludes> <systemPropertyVariables> - <java.security.policy>${java.security.policy}</java.security.policy> - <java.net.preferIPv4Stack>{java.net.preferIPv4Stack}"</java.net.preferIPv4Stack> - <log4j.configuration>${log4j.configuration}</log4j.configuration> - <!-- ><log4j.debug>true"</log4j.debug> --> + <java.security.policy>${integ.java.security.policy}</java.security.policy> + <java.net.preferIPv4Stack>{integ.java.net.preferIPv4Stack}"</java.net.preferIPv4Stack> + <log4j.configuration>${integ.log4j.configuration}</log4j.configuration> - <basedir>${basedir}</basedir> <!-- Tells the unit tests where the ant script is, so they can find resources. --> - <app.home>${app.home}</app.home> <!-- This is the deployment directory, easily accessed by the DataFinder class. --> - <log4j.path>${log4j.configuration}</log4j.path> - <default.nic>${default.nic}</default.nic> - - <!-- Jini group name - <federation.name>${federation.name}</federation.name> --> - - <!-- TODO !!!!!! - <property key="java.class.path" value="${junit.classpath.text}" /> - --> - - <classserver.jar>${deploy.lib}/classserver.jar</classserver.jar> - <colt.jar>${deploy.lib}/colt.jar</colt.jar> - <ctc_utils.jar>${deploy.lib}/ctc_utils.jar</ctc_utils.jar> - <cweb-commons.jar>${deploy.lib}/cweb-commons.jar</cweb-commons.jar> - <cweb-extser.jar>${deploy.lib}/cweb-extser.jar</cweb-extser.jar> - <highscalelib.jar>${deploy.lib}/highscalelib.jar</highscalelib.jar> - <dsiutils.jar>${deploy.lib}/dsiutils.jar</dsiutils.jar> - <lgplutils.jar>${deploy.lib}/lgplutils.jar</lgplutils.jar> - <fastutil.jar>${deploy.lib}/fastutil.jar</fastutil.jar> - <icu4j.jar>${deploy.lib}/icu4j.jar</icu4j.jar> - <jsk-lib.jar>${deploy.lib}/jsk-lib.jar</jsk-lib.jar> - <jsk-platform.jar>${deploy.lib}jsk-platform.jar</jsk-platform.jar> - <log4j.jar>${deploy.lib}/log4j.jar</log4j.jar> - <iris.jar>${deploy.lib}/iris.jar</iris.jar> - <jgrapht.jar>${deploy.lib}/jgrapht.jar</jgrapht.jar> - <openrdf-sesame.jar>${deploy.lib}/openrdf-sesame.jar</openrdf-sesame.jar> - <slf4j.jar>${deploy.lib}/slf4j.jar</slf4j.jar> - <nxparser.jar>${deploy.lib}/nxparser.jar</nxparser.jar> - <zookeeper.jar>${deploy.lib}/zookeeper.jar</zookeeper.jar> + <basedir>${integ.basedir}</basedir> <!-- Tells the unit tests where the ant script is, so they can find resources. --> + <app.home>${integ.app.home}</app.home> <!-- This is the deployment directory, easily accessed by the DataFinder class. --> + <log4j.path>${integ.log4j.configuration}</log4j.path> + <default.nic>${integ.default.nic}</default.nic> + <federation.name>${integ.federation.name}</federation.name> + <classserver.jar>${integ.deploy.lib}/classserver.jar</classserver.jar> + <colt.jar>${integ.deploy.lib}/colt.jar</colt.jar> + <ctc_utils.jar>${integ.deploy.lib}/ctc_utils.jar</ctc_utils.jar> + <cweb-commons.jar>${integ.deploy.lib}/cweb-commons.jar</cweb-commons.jar> + <cweb-extser.jar>${integ.deploy.lib}/cweb-extser.jar</cweb-extser.jar> + <highscalelib.jar>${integ.deploy.lib}/highscalelib.jar</highscalelib.jar> + <dsiutils.jar>${integ.deploy.lib}/dsiutils.jar</dsiutils.jar> + <lgplutils.jar>${integ.deploy.lib}/lgplutils.jar</lgplutils.jar> + <fastutil.jar>${integ.deploy.lib}/fastutil.jar</fastutil.jar> + <icu4j.jar>${integ.deploy.lib}/icu4j.jar</icu4j.jar> + <jsk-lib.jar>${integ.deploy.lib}/jsk-lib.jar</jsk-lib.jar> + <jsk-platform.jar>${integ.deploy.lib}jsk-platform.jar</jsk-platform.jar> + <log4j.jar>${integ.deploy.lib}/log4j.jar</log4j.jar> + <iris.jar>${integ.deploy.lib}/iris.jar</iris.jar> + <jgrapht.jar>${integ.deploy.lib}/jgrapht.jar</jgrapht.jar> + <openrdf-sesame.jar>${integ.deploy.lib}/openrdf-sesame.jar</openrdf-sesame.jar> + <slf4j.jar>${integ.deploy.lib}/slf4j.jar</slf4j.jar> + <nxparser.jar>${integ.deploy.lib}/nxparser.jar</nxparser.jar> + <zookeeper.jar>${integ.deploy.lib}/zookeeper.jar</zookeeper.jar> </systemPropertyVariables> </configuration> <executions> @@ -173,8 +156,7 @@ <phase>pre-integration-test</phase> <configuration> <tasks> - <echo message="testscript = ${testScript}" /> - <ant antfile="${testScript}" target="startTestServices" useNativeBasedir="true" inheritAll="false"/> + <ant antfile="${project.build.testOutputDirectory}/services.xml" target="start" useNativeBasedir="true" inheritAll="true"/> </tasks> </configuration> <goals> @@ -188,7 +170,7 @@ <configuration> <tasks> <echo message="testscript = ${testScript}" /> - <ant antfile="${testScript}" target="stopTestServices" useNativeBasedir="true" inheritAll="false"/> + <ant antfile="${project.build.testOutputDirectory}/services.xml" target="stop" useNativeBasedir="true" inheritAll="true"/> </tasks> </configuration> <goals> Added: branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/resources/services.xml 2010-09-23 20:22:50 UTC (rev 3619) @@ -0,0 +1,104 @@ +<project name="bigdata-integration-lookup-starter" default="help" basedir="."> +<!-- + <property name="integ.deploy.dir" location="${basedir}/.." /> + <property name="integ.app.home" location="${deploy.dir}" /> + <property name="integ.deploy.conf.dir" location="${deploy.dir}/conf" /> + <property name="integ.deploy.lib" location="${deploy.dir}/lib" /> + <property name="integ.deploy.lib.test" location="${deploy.dir}/testing/lib-test" /> + <property name="integ.deploy.lib.dl" location="${deploy.dir}/lib-dl" /> + <property name="integ.test.codebase.dir" location="${deploy.lib.dl}" /> + <property name="integ.test.codebase.port" value="23333" /> + <property name="integ.java.security.policy" location="conf/policy.all" /> + <property name="integ.log4j.configuration" location="${deploy.dir}/var/config/logging/log4j.properties" /> + <property name="integ.java.net.preferIPv4Stack" value="true" /> + <property name="integ.default.nic" value="eth0" /> + <property name="integ.parent.artifactName" value="bigdata-core" /> +--> + + <!-- Set to the empty string to indicate "unset". Application code will determine correct + default value. + <property name="integ.federation.name" value="" /> + --> + + <target name="help"> + <echo level="error" message="This script must be run with either the 'start' or 'stop' targets" /> + <antcall target="dumpProps" /> + </target> + + <target name="dumpProps"> + <echo message="Application properties:\n" /> + <echoproperties prefix="integ." /> + </target> + + <target name="start"> + <echo message="Starting support services..." /> + <antcall target="dumpProps" /> + <antcall target="startHttpd" /> + <antcall target="startLookup" /> + </target> + + <target name="stop"> + <echo message="Stopping support services..." /> + <antcall target="dumpProps" /> + <antcall target="stopLookup" /> + <antcall target="stopHttpd" /> + </target> + + <target name="startHttpd"> + <echo message="Starting class(http) service..." /> + <java jar="${integ.deploy.lib}/classserver.jar" fork="true" spawn="true"> + <arg value="-verbose" /> + <arg value="-stoppable" /> + <arg line="-port ${integ.test.codebase.port}" /> + <arg line="-dir '${integ.test.codebase.dir}'" /> + </java> + </target> + + <target name="stopHttpd"> + <echo message="Stopping class(http) service..." /> + <java jar="${integ.deploy.lib}/classserver.jar" fork="true" spawn="false" failonerror="true"> + <arg line="-port ${integ.test.codebase.port}" /> + <arg line="-dir '${integ.test.codebase.dir}'" /> + <arg value="-stop" /> + </java> + </target> + + <target name="startLookup"> + <echo message="Starting lookup service..." /> + <java jar="${integ.deploy.lib}/${integ.parent.artifactName}-lookupstarter.jar" fork="true" spawn="true"> + <sysproperty key="app.home" value="${integ.app.home}" /> + <sysproperty key="jini.lib" value="${integ.deploy.lib}" /> + <sysproperty key="jini.lib.dl" value="${integ.deploy.lib.dl}" /> + <sysproperty key="java.security.policy" value="${integ.java.security.policy}" /> + <sysproperty key="java.security.debug" value="off" /> + <sysproperty key="java.protocol.handler.pkgs" value="net.jini.url" /> + <sysproperty key="log4j.configuration" value="${integ.log4j.configuration}" /> + <sysproperty key="codebase.port" value="${integ.test.codebase.port}" /> + <sysproperty key="java.net.preferIPv4Stack" value="${integ.java.net.preferIPv4Stack}" /> + <sysproperty key="federation.name" value="${integ.federation.name}" /> + <sysproperty key="default.nic" value="${integ.default.nic}" /> + </java> + </target> + + <target name="stopLookup"> + <echo message="Stopping lookup service..." /> + <java jar="${integ.deploy.lib}/${integ.parent.artifactName}-lookupstarter.jar" fork="true" spawn="false" failonerror="true"> + <sysproperty key="app.home" value="${integ.app.home}" /> + <sysproperty key="jini.lib" value="${integ.deploy.lib}" /> + <sysproperty key="jini.lib.dl" value="${integ.deploy.lib.dl}" /> + <sysproperty key="java.security.policy" value="${integ.java.security.policy}" /> + <sysproperty key="log4j.configuration" value="${integ.log4j.configuration}" /> + <sysproperty key="java.net.preferIPv4Stack" value="${integ.java.net.preferIPv4Stack}" /> + <sysproperty key="federation.name" value="${integ.federation.name}" /> + <sysproperty key="default.nic" value="${integ.default.nic}" /> + <arg value="-stop" /> + </java> + </target> + + <target name="clean-sparql-test-suite" description="delete the files unpacked by the Sesame SPARQL test suite."> + <echo>"clearing: ${java.io.tmpdir}/sparql-*"</echo> + <delete verbose="true"> + <dirset dir="${java.io.tmpdir}" includes="sparql-*" /> + </delete> + </target> +</project> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-23 20:20:30
|
Revision: 3618 http://bigdata.svn.sourceforge.net/bigdata/?rev=3618&view=rev Author: thompsonbry Date: 2010-09-23 20:20:24 +0000 (Thu, 23 Sep 2010) Log Message: ----------- Fixed a problem I introduced into the layering of the BOpContext. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-23 20:10:58 UTC (rev 3617) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-23 20:20:24 UTC (rev 3618) @@ -196,7 +196,7 @@ final BOpStats stats, final IAsynchronousIterator<E[]> source, final IBlockingBuffer<E[]> sink, final IBlockingBuffer<E[]> sink2) { - super(null); + super(runningQuery.getFederation(), runningQuery.getIndexManager()); this.runningQuery = runningQuery; // if (indexManager == null) Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java 2010-09-23 20:10:58 UTC (rev 3617) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java 2010-09-23 20:20:24 UTC (rev 3618) @@ -59,7 +59,10 @@ static private final transient Logger log = Logger.getLogger(BOpContextBase.class); - private final QueryEngine queryEngine; +// private final QueryEngine queryEngine; + + private final IBigdataFederation<?> fed; + private final IIndexManager indexManager; /** * The <strong>local</strong> {@link IIndexManager}. Query evaluation occurs @@ -68,17 +71,18 @@ * {@link ILocalBTreeView}. */ final public IIndexManager getIndexManager() { - return queryEngine.getIndexManager(); + return indexManager; } - + /** * The {@link IBigdataFederation} IFF the operator is being evaluated on an - * {@link IBigdataFederation}. When evaluating operations against an - * {@link IBigdataFederation}, this reference provides access to the - * scale-out view of the indices and to other bigdata services. + * {@link IBigdataFederation} and otherwise <code>null</code>. When + * evaluating operations against an {@link IBigdataFederation}, this + * reference provides access to the scale-out view of the indices and to + * other bigdata services. */ final public IBigdataFederation<?> getFederation() { - return queryEngine.getFederation(); + return fed; } /** @@ -88,23 +92,37 @@ * <em>local</em> {@link #getIndexManager() index manager}. */ public final Executor getExecutorService() { - return getIndexManager().getExecutorService(); + return indexManager.getExecutorService(); } + public BOpContextBase(final QueryEngine queryEngine) { + + this(queryEngine.getFederation(), queryEngine.getIndexManager()); + + } + /** - * + * Core constructor. + * @param fed * @param indexManager - * The <strong>local</strong> {@link IIndexManager}. Query - * evaluation occurs against the local indices. In scale-out, - * query evaluation proceeds shard wise and this - * {@link IIndexManager} MUST be able to read on the - * {@link ILocalBTreeView}. - * */ - public BOpContextBase(final QueryEngine queryEngine) { - this.queryEngine = queryEngine; + public BOpContextBase(final IBigdataFederation<?> fed, + final IIndexManager indexManager) { + + /* + * @todo null is permitted here for the unit tests, but we should really + * mock the IIndexManager and pass in a non-null object here and then + * verify that the reference is non-null. + */ +// if (indexManager == null) +// throw new IllegalArgumentException(); + + this.fed = fed; + + this.indexManager = indexManager; + } - + /** * Locate and return the view of the relation(s) identified by the * {@link IPredicate}. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-23 20:11:07
|
Revision: 3617 http://bigdata.svn.sourceforge.net/bigdata/?rev=3617&view=rev Author: thompsonbry Date: 2010-09-23 20:10:58 +0000 (Thu, 23 Sep 2010) Log Message: ----------- Finally chased down one bug which I had introduced in QueryResultIterator. I've added a bunch of unit tests and the PipelineType annotation, which will eventually support both vectored and operator at a time evaluation. I am still chasing the bug with multiple chunk messages flowing through the query controller. Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/Union.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/PipelineUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/StartOpMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/notes.txt branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/FederatedRunningQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/rdf/join/DataSetJoin.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java branches/QUADS_QUERY_BRANCH/bigdata/src/resources/logging/log4j.properties branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/TestBOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/bset/TestCopyBindingSets.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/PipelineDelayOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestAll.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestPipelineUtility.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestFederatedQueryEngine.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/fed/TestThickChunkMessage.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestMemorySortOp.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/solutions/TestSliceOp.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineType.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngineTestAnnotations.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/fed/CancelQuery.java branches/QUADS_QUERY_BRANCH/bigdata/src/test/com/bigdata/bop/engine/TestRunState.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOp.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -167,7 +167,7 @@ * override this method. */ BOpEvaluationContext getEvaluationContext(); - + /** * Return <code>true</code> iff this operator is an access path which writes * on the database. Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpBase.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -344,7 +344,8 @@ public String toString() { final StringBuilder sb = new StringBuilder(); - sb.append(getClass().getName()); +// sb.append(getClass().getName()); + sb.append(super.toString()); sb.append("("); for (int i = 0; i < args.length; i++) { final BOp t = args[i]; Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -27,31 +27,18 @@ */ package com.bigdata.bop; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; - import org.apache.log4j.Logger; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.RunningQuery; -import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; -import com.bigdata.btree.IRangeQuery; import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.TimestampUtility; -import com.bigdata.relation.IRelation; -import com.bigdata.relation.accesspath.AccessPath; import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; -import com.bigdata.relation.locator.IResourceLocator; -import com.bigdata.relation.rule.IRule; -import com.bigdata.relation.rule.eval.IJoinNexus; -import com.bigdata.service.DataService; import com.bigdata.service.IBigdataFederation; -import com.bigdata.striterator.IKeyOrder; import com.ibm.icu.impl.ByteBuffer; /** @@ -62,18 +49,10 @@ */ public class BOpContext<E> extends BOpContextBase { - static private final Logger log = Logger.getLogger(BOpContext.class); + static private final transient Logger log = Logger.getLogger(BOpContext.class); private final IRunningQuery runningQuery; -// private final IBigdataFederation<?> fed; -// -// private final IIndexManager indexManager; -// -// private final long readTimestamp; -// -// private final long writeTimestamp; - private final int partitionId; private final BOpStats stats; @@ -95,60 +74,8 @@ public IRunningQuery getRunningQuery() { return runningQuery; } - + /** - * The {@link IBigdataFederation} IFF the operator is being evaluated on an - * {@link IBigdataFederation}. When evaluating operations against an - * {@link IBigdataFederation}, this reference provides access to the - * scale-out view of the indices and to other bigdata services. - */ - @Override - public IBigdataFederation<?> getFederation() { - return runningQuery.getFederation(); - } - - /** - * The <strong>local</strong> {@link IIndexManager}. Query evaluation occurs - * against the local indices. In scale-out, query evaluation proceeds shard - * wise and this {@link IIndexManager} MUST be able to read on the - * {@link ILocalBTreeView}. - */ - @Override - public IIndexManager getIndexManager() { - return runningQuery.getIndexManager(); - } - - /** - * Return the {@link Executor} on to which the operator may submit tasks. - * <p> - * Note: The is the {@link ExecutorService} associated with the - * <em>local</em> {@link #getIndexManager() index manager}. - */ - public final Executor getExecutorService() { - return runningQuery.getIndexManager().getExecutorService(); - } - -// /** -// * The timestamp or transaction identifier against which the query is -// * reading. -// * -// * @deprecated by {@link BOp.Annotations#TIMESTAMP} -// */ -// public final long getReadTimestamp() { -// return runningQuery.getReadTimestamp(); -// } -// -// /** -// * The timestamp or transaction identifier against which the query is -// * writing. -// * -// * @deprecated by {@link BOp.Annotations#TIMESTAMP} -// */ -// public final long getWriteTimestamp() { -// return runningQuery.getWriteTimestamp(); -// } - - /** * The index partition identifier -or- <code>-1</code> if the index is not * sharded. */ Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -27,7 +27,11 @@ */ package com.bigdata.bop; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; + import org.apache.log4j.Logger; + import com.bigdata.bop.engine.QueryEngine; import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; @@ -53,7 +57,7 @@ */ public class BOpContextBase { - static private final Logger log = Logger.getLogger(BOpContextBase.class); + static private final transient Logger log = Logger.getLogger(BOpContextBase.class); private final QueryEngine queryEngine; @@ -63,7 +67,7 @@ * wise and this {@link IIndexManager} MUST be able to read on the * {@link ILocalBTreeView}. */ - public IIndexManager getIndexManager() { + final public IIndexManager getIndexManager() { return queryEngine.getIndexManager(); } @@ -73,11 +77,21 @@ * {@link IBigdataFederation}, this reference provides access to the * scale-out view of the indices and to other bigdata services. */ - public IBigdataFederation<?> getFederation() { + final public IBigdataFederation<?> getFederation() { return queryEngine.getFederation(); } /** + * Return the {@link Executor} on to which the operator may submit tasks. + * <p> + * Note: The is the {@link ExecutorService} associated with the + * <em>local</em> {@link #getIndexManager() index manager}. + */ + public final Executor getExecutorService() { + return getIndexManager().getExecutorService(); + } + + /** * * @param indexManager * The <strong>local</strong> {@link IIndexManager}. Query Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -27,6 +27,7 @@ package com.bigdata.bop; +import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; @@ -374,7 +375,7 @@ * @param op * A {@link BOp}. * - * @return The index. + * @return The index, which is immutable and thread-safe. * * @throws DuplicateBOpIdException * if there are two or more {@link BOp}s having the same @@ -412,7 +413,8 @@ throw new DuplicateBOpException(t.toString()); } } - return map; + // wrap to ensure immutable and thread-safe. + return Collections.unmodifiableMap(map); } /** Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -209,6 +209,16 @@ protected static transient final TimeUnit chunkTimeoutUnit = TimeUnit.MILLISECONDS; /** + * Return the {@link PipelineType} of the operator (default + * {@link PipelineType#Vectored}). + */ + public PipelineType getPipelineType() { + + return PipelineType.Vectored; + + } + + /** * Return <code>true</code> iff {@link #newStats()} must be shared across * all invocations of {@link #eval(BOpContext)} for this operator for a * given query (default <code>false</code>). Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineType.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineType.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineType.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -0,0 +1,68 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 21, 2010 + */ + +package com.bigdata.bop; + +/** + * Return the type of pipelining supported by an operator. + * <p> + * Note: bigdata does not support tuple-at-a-time processing. Only vectored and + * operator-at-a-time processing. Tuple at a time processing is generally very + * inefficient. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public enum PipelineType { + + /** + * Vectored operators stream chunks of intermediate results from one + * operator to the next using producer / consumer pattern. Each time a set + * of intermediate results is available for a vectored operator, it is + * evaluated against those inputs producing another set of intermediate + * results for its target operator(s). Vectored operators may be evaluated + * many times during a given query and often have excellent parallelism due + * to the concurrent evaluation of the different operators on different sets + * of intermediate results. + */ + Vectored, + + /** + * The operator will run exactly once and must wait for all of its inputs to + * be assembled before it runs. + * <p> + * There are some operations for which this is always true, such as SORT. + * Other operations MAY use operator-at-once evaluation in order to benefit + * from a combination of more efficient IO patterns and simpler design. + * However, pipelined operators using large memory blocks have many of the + * benefits of operator-at-once evaluation. By deferring their evaluation + * until some minimum number of source data blocks are available, they may + * be evaluated once or more than once, depending on the data scale. + */ + OneShot; + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/PipelineType.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -30,6 +30,8 @@ import java.util.Map; +import cern.colt.Arrays; + import com.bigdata.bop.AbstractChunkedOrderedIteratorOp; import com.bigdata.bop.BOp; import com.bigdata.bop.Constant; @@ -414,7 +416,15 @@ for (Map.Entry<String, Object> e : annotations.entrySet()) { if (!first) sb.append(", "); - sb.append(e.getKey() + "=" + e.getValue()); + // @todo remove relation name hack when making relation name a scalar. + if (Annotations.RELATION_NAME.equals(e.getKey()) + && e.getValue() != null + && e.getValue().getClass().isArray()) { + sb.append(e.getKey() + "=" + + Arrays.toString((String[]) e.getValue())); + } else { + sb.append(e.getKey() + "=" + e.getValue()); + } first = false; } sb.append("]"); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/CopyBindingSetOp.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -35,6 +35,7 @@ import com.bigdata.bop.BOpContext; import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.IConstraint; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IChunkAccessor; import com.bigdata.relation.accesspath.IAsynchronousIterator; @@ -56,6 +57,16 @@ */ private static final long serialVersionUID = 1L; + public interface Annotations extends BindingSetPipelineOp.Annotations { + + /** + * An optional {@link IConstraint}[] which places restrictions on the + * legal patterns in the variable bindings. + */ + String CONSTRAINTS = CopyBindingSetOp.class.getName() + ".constraints"; + + } + /** * Deep copy constructor. * @@ -75,10 +86,19 @@ super(args, annotations); } + /** + * @see Annotations#CONSTRAINTS + */ + public IConstraint[] constraints() { + + return getProperty(Annotations.CONSTRAINTS, null/* defaultValue */); + + } + public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { - return new FutureTask<Void>(new CopyTask(context)); - + return new FutureTask<Void>(new CopyTask(this, context)); + } /** @@ -90,11 +110,19 @@ static private class CopyTask implements Callable<Void> { private final BOpContext<IBindingSet> context; - - CopyTask(final BOpContext<IBindingSet> context) { - + + /** + * The constraint (if any) specified for the join operator. + */ + final private IConstraint[] constraints; + + CopyTask(final CopyBindingSetOp op, + final BOpContext<IBindingSet> context) { + this.context = context; - + + this.constraints = op.constraints(); + } public Void call() throws Exception { @@ -108,9 +136,10 @@ final IBindingSet[] chunk = source.next(); stats.chunksIn.increment(); stats.unitsIn.add(chunk.length); - sink.add(chunk); + final IBindingSet[] tmp = applyConstraints(chunk); + sink.add(tmp); if (sink2 != null) - sink2.add(chunk); + sink2.add(tmp); } sink.flush(); if (sink2 != null) @@ -124,6 +153,56 @@ } } - } + private IBindingSet[] applyConstraints(final IBindingSet[] chunk) { + + if (constraints == null) { + /* + * No constraints, copy all binding sets. + */ + + return chunk; + + } + + /* + * Copy binding sets which satisfy the constraint(s). + */ + + IBindingSet[] t = new IBindingSet[chunk.length]; + + int j = 0; + + for (int i = 0; i < chunk.length; i++) { + + final IBindingSet bindingSet = chunk[i]; + + if (context.isConsistent(constraints, bindingSet)) { + + t[j++] = bindingSet; + + } + + } + + if (j != chunk.length) { + + // allocate exact size array. + final IBindingSet[] tmp = (IBindingSet[]) java.lang.reflect.Array + .newInstance(chunk[0].getClass(), j); + + // make a dense copy. + System.arraycopy(t/* src */, 0/* srcPos */, tmp/* dst */, + 0/* dstPos */, j/* len */); + + t = tmp; + + } + + return t; + + } + + } // class CopyTask + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/Union.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/Union.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/bset/Union.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -54,7 +54,7 @@ * be mapped across shards or nodes as appropriate for the parent. UNION runs on * the query controller. In order to avoid routing intermediate results through * the controller, the {@link BindingSetPipelineOp.Annotations#SINK_REF} of each - * child operand should be overriden to specify the parent of the UNION + * child operand should be overridden to specify the parent of the UNION * operator. * <p> * UNION can not be used when the intermediate results must be routed into the Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -67,21 +67,26 @@ * #of chunks in. */ final public CAT chunksIn = new CAT(); +// final public AtomicLong chunksIn = new AtomicLong(); /** * #of units sets in (tuples, elements, binding sets, etc). */ final public CAT unitsIn = new CAT(); +// final public AtomicLong unitsIn = new AtomicLong(); /** * #of chunks out. */ final public CAT chunksOut = new CAT(); +// final public AtomicLong chunksOut = new AtomicLong(); + /** * #of units sets in (tuples, elements, binding sets, etc). */ final public CAT unitsOut = new CAT(); +// final public AtomicLong unitsOut = new AtomicLong(); /** * Constructor. @@ -105,15 +110,20 @@ unitsIn.add(o.unitsIn.get()); unitsOut.add(o.unitsOut.get()); chunksOut.add(o.chunksOut.get()); +// chunksIn.addAndGet(o.chunksIn.get()); +// unitsIn.addAndGet(o.unitsIn.get()); +// unitsOut.addAndGet(o.unitsOut.get()); +// chunksOut.addAndGet(o.chunksOut.get()); } + public String toString() { final StringBuilder sb = new StringBuilder(); sb.append(getClass().getName()); - sb.append("{chunksIn=" + chunksIn.estimate_get()); - sb.append(",unitsIn=" + unitsIn.estimate_get()); - sb.append(",chunksOut=" + chunksOut.estimate_get()); - sb.append(",unitsOut=" + unitsOut.estimate_get()); + sb.append("{chunksIn=" + chunksIn.get()); + sb.append(",unitsIn=" + unitsIn.get()); + sb.append(",chunksOut=" + chunksOut.get()); + sb.append(",unitsOut=" + unitsOut.get()); toString(sb); // extension hook sb.append("}"); return sb.toString(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/HaltOpMessage.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -3,6 +3,8 @@ import java.io.Serializable; import java.util.UUID; +import com.bigdata.bop.BOp; + /** * A message sent to the {@link IQueryClient} when an operator is done executing * for some chunk of inputs. @@ -53,7 +55,7 @@ * scale-out, this is one per index partition over which the intermediate * results were mapped. */ - final public int sinkChunksOut; + final public int sinkMessagesOut; /** * The operator identifier for the alternative sink -or- <code>null</code> @@ -71,7 +73,7 @@ * results were mapped. It is zero if there was no alternative sink for the * operator. */ - final public int altSinkChunksOut; + final public int altSinkMessagesOut; /** * The statistics for the execution of the bop against the partition on the @@ -91,10 +93,19 @@ * The node which executed the operator. * @param cause * <code>null</code> unless execution halted abnormally. - * @param chunksOut - * A map reporting the #of binding set chunks which were output - * for each downstream operator for which at least one chunk of - * output was produced. + * @param sinkId + * The {@link BOp.Annotations#BOP_ID} of the default sink and + * <code>null</code> if there is no sink (for example, if this is + * the last operator in the pipeline). + * @param sinkMessagesOut + * The number of {@link IChunkMessage} which were sent to the + * operator for the default sink. + * @param altSinkId + * The {@link BOp.Annotations#BOP_ID} of the alternative sink and + * <code>null</code> if there is no alternative sink. + * @param altSinkMessagesOut + * The number of {@link IChunkMessage} which were sent to the + * operator for the alternative sink. * @param taskStats * The statistics for the execution of that bop on that shard and * service. @@ -103,8 +114,8 @@ // final UUID queryId, final int bopId, final int partitionId, final UUID serviceId, Throwable cause, // - final Integer sinkId, final int sinkChunksOut,// - final Integer altSinkId, final int altSinkChunksOut,// + final Integer sinkId, final int sinkMessagesOut,// + final Integer altSinkId, final int altSinkMessagesOut,// final BOpStats taskStats) { this.queryId = queryId; @@ -113,9 +124,9 @@ this.serviceId = serviceId; this.cause = cause; this.sinkId = sinkId; - this.sinkChunksOut = sinkChunksOut; + this.sinkMessagesOut = sinkMessagesOut; this.altSinkId = altSinkId; - this.altSinkChunksOut = altSinkChunksOut; + this.altSinkMessagesOut = altSinkMessagesOut; this.taskStats = taskStats; } @@ -128,9 +139,9 @@ if (cause != null) sb.append(",cause=" + cause); sb.append(",sinkId=" + sinkId); - sb.append(",sinkChunksOut=" + sinkChunksOut); + sb.append(",sinkChunksOut=" + sinkMessagesOut); sb.append(",altSinkId=" + altSinkId); - sb.append(",altSinkChunksOut=" + altSinkChunksOut); + sb.append(",altSinkChunksOut=" + altSinkMessagesOut); sb.append(",stats=" + taskStats); sb.append("}"); return sb.toString(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -49,4 +49,18 @@ */ void bufferReady(IChunkMessage<IBindingSet> msg) throws RemoteException; + /** + * Notify a service that the query has been terminated. The peer MUST NOT + * cancel the query synchronously as that can lead to a deadlock with the + * query controller. Instead, the peer should queue a task to cancel the + * query and then return. + * + * @param queryId + * The query identifier. + * @param cause + * The cause. When <code>null</code>, this is presumed to be + * normal query termination. + */ + void cancelQuery(UUID queryId, Throwable cause) throws RemoteException; + } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -56,7 +56,14 @@ IIndexManager getIndexManager(); /** - * Terminate query evaluation + * Cancel the running query (normal termination). + * <p> + * Note: This method provides a means for an operator to indicate that the + * query should halt immediately for reasons other than abnormal + * termination. + * <p> + * Note: For abnormal termination of a query, just throw an exception out of + * the query operator implementation. */ void halt(); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/PipelineUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/PipelineUtility.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/PipelineUtility.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -48,18 +48,16 @@ private static final Logger log = Logger.getLogger(PipelineUtility.class); /** - * Return <code>true</code> iff the <i>runningCountMap</i> AND - * <i>availableChunkMap</i> map are ZERO (0) for both the given operator and - * for all operators which proceed the given operator in the tree structure - * of its operands. + * Return <code>true</code> iff <i>availableChunkMap</i> map is ZERO (0) for + * the given operator and its descendants AND the <i>runningCountMap</i> is + * ZERO (0) for the operator and all descendants of the operator. For the + * purposes of this method, only {@link BOp#args() operands} are considered + * as descendants. * <p> - * Note: The movement of the intermediate binding set chunks forms an - * acyclic directed graph. We can decide whether or not a {@link BOp} in the - * query plan can be triggered by the current activity pattern by inspecting - * the {@link BOp} and its operands recursively. If neither the {@link BOp} - * nor any of its operands (recursively) has non-zero activity then the - * {@link BOp} can not be triggered and this method will return - * <code>true</code>. + * Note: The movement of the intermediate binding set chunks during query + * processing forms an acyclic directed graph. We can decide whether or not + * a {@link BOp} in the query plan can be triggered by the current activity + * pattern by inspecting the {@link BOp} and its operands recursively. * * @param bopId * The identifier for an operator which appears in the query @@ -92,8 +90,10 @@ if (queryPlan == null) throw new IllegalArgumentException(); + if (queryIndex == null) throw new IllegalArgumentException(); + if (availableChunkCountMap == null) throw new IllegalArgumentException(); @@ -103,7 +103,7 @@ throw new NoSuchBOpException(bopId); final Iterator<BOp> itr = BOpUtility.preOrderIterator(op); - + while (itr.hasNext()) { final BOp t = itr.next(); @@ -112,8 +112,17 @@ if (id == null) continue; + { + /* + * If the operator is running then it is, defacto, "not done." + * + * If any descendants of the operator are running, then they + * could cause the operator to be re-triggered and it is "not + * done." + */ + final AtomicLong runningCount = runningCountMap.get(id); if (runningCount != null && runningCount.get() != 0) { @@ -125,11 +134,16 @@ return false; } - + } { - + + /* + * Any chunks available for the operator in question or any of + * its descendants could cause that operator to be triggered. + */ + final AtomicLong availableChunkCount = availableChunkCountMap .get(id); Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -41,31 +41,16 @@ import org.apache.log4j.Logger; -import alice.tuprolog.Prolog; - import com.bigdata.bop.BOp; import com.bigdata.bop.BindingSetPipelineOp; import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.IPredicate; -import com.bigdata.bop.bset.Union; import com.bigdata.btree.BTree; import com.bigdata.btree.IndexSegment; import com.bigdata.btree.view.FusedView; import com.bigdata.journal.IIndexManager; -import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.spo.SPORelation; -import com.bigdata.relation.IMutableRelation; -import com.bigdata.relation.IRelation; -import com.bigdata.relation.accesspath.IElementFilter; -import com.bigdata.relation.rule.IRule; -import com.bigdata.relation.rule.Program; -import com.bigdata.relation.rule.eval.pipeline.DistributedJoinTask; import com.bigdata.resources.IndexManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; -import com.bigdata.service.ndx.IAsynchronousWriteBufferFactory; -import com.bigdata.striterator.ChunkedArrayIterator; -import com.bigdata.striterator.IChunkedOrderedIterator; /** * A class managing execution of concurrent queries against a local @@ -185,132 +170,6 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ * - * - * FIXME Unit tests for non-distinct {@link IElementFilter}s on an - * {@link IPredicate}, unit tests for distinct element filter on an - * {@link IPredicate} which is capable of distributed operations. Do not use - * distinct where not required (SPOC, only one graph, etc). - * <p> - * It seems like the right way to approach this is by unifying the stackable CTC - * striterator pattern with the chunked iterator pattern and passing the query - * engine (or the bop context) into the iterator construction process (or simply - * requesting that the query engine construct the iterator stack). - * <p> - * In terms of harmonization, it is difficult to say which way would work - * better. In the short term we could simply allow both and mask the differences - * in how we construct the filters, but the conversion to/from striterators and - * chunked iterators seems to waste a bit of effort. - * <p> - * The trickiest part of all of this is to allow a distributed filter pattern - * where the filter gets created on a set of nodes identified by the operator - * and the elements move among those nodes using the query engine's buffers. - * <p> - * To actually implement the distributed distinct filter we need to stack the - * following: - * - * <pre> - * - ITupleIterator - * - Resolve ITuple to Element (e.g., SPOC). - * - Layer on optional IElementFilter associated with the IPredicate. - * - Layer on SameVariableConstraint iff required (done by AccessPath) - * - Resolve SPO to SPO, stripping off the context position. - * - Chunk SPOs (SPO[], IKeyOrder), where the key order is from the access path. - * - Filter SPO[] using DHT constructed on specified nodes of the cluster. - * The SPO[] chunks should be packaged into NIO buffers and shipped to those - * nodes. The results should be shipped back as a bit vectors packaged into - * a NIO buffers. - * - Dechunk SPO[] to SPO since that is the current expectation for the filter - * stack. - * - The result then gets wrapped as a {@link IChunkedOrderedIterator} by - * the AccessPath using a {@link ChunkedArrayIterator}. - * </pre> - * - * This stack is a bit complex(!). But it is certainly easy enough to generate - * the necessary bits programmatically. - * - * FIXME Handling the {@link Union} of binding sets. Consider whether the chunk - * combiner logic from the {@link DistributedJoinTask} could be reused. - * - * FIXME INSERT and DELETE which will construct elements using - * {@link IRelation#newElement(java.util.List, IBindingSet)} from a binding set - * and then use {@link IMutableRelation#insert(IChunkedOrderedIterator)} and - * {@link IMutableRelation#delete(IChunkedOrderedIterator)}. For s/o, we first - * need to move the bits into the right places so it makes sense to unpack the - * processing of the loop over the elements and move the data around, writing on - * each index as necessary. There could be eventually consistent approaches to - * this as well. For justifications we need to update some additional indices, - * in which case we are stuck going through {@link IRelation} rather than - * routing data directly or using the {@link IAsynchronousWriteBufferFactory}. - * For example, we could handle routing and writing in s/o as follows: - * - * <pre> - * INSERT(relation,bindingSets) - * - * expands to - * - * SEQUENCE( - * SELECT(s,p,o), // drop bindings that we do not need - * PARALLEL( - * INSERT_INDEX(spo), // construct (s,p,o) elements and insert - * INSERT_INDEX(pos), // construct (p,o,s) elements and insert - * INSERT_INDEX(osp), // construct (o,s,p) elements and insert - * )) - * - * </pre> - * - * The output of the SELECT operator would be automatically mapped against the - * shards on which the next operators need to write. Since there is a nested - * PARALLEL operator, the mapping will be against the shards of each of the - * given indices. (A simpler operator would invoke - * {@link SPORelation#insert(IChunkedOrderedIterator)}. Handling justifications - * requires that we also formulate the justification chain from the pattern of - * variable bindings in the rule). - * - * FIXME Handle {@link Program}s. There are three flavors, which should probably - * be broken into three operators: sequence(ops), set(ops), and closure(op). The - * 'set' version would be parallelized, or at least have an annotation for - * parallel evaluation. These things belong in the same broad category as the - * join graph since they are operators which control the evaluation of other - * operators (the current pipeline join also has that characteristic which it - * uses to do the nested index subqueries). - * - * FIXME SPARQL to BOP translation - * <p> - * The initial pass should translate from {@link IRule} to {@link BOp}s so we - * can immediately begin running SPARQL queries against the {@link QueryEngine}. - * A second pass should explore a rules base translation from the openrdf SPARQL - * operator tree into {@link BOp}s, perhaps using an embedded {@link Prolog} - * engine. What follows is a partial list of special considerations for that - * translation: - * <ul> - * <li>Distinct can be trivially enforced for default graph queries against the - * SPOC index.</li> - * <li>Local distinct should wait until there is more than one tuple from the - * index since a single tuple does not need to be made distinct using a hash - * map.</li> - * <li>Low volume distributed queries should use solution modifiers which - * evaluate on the query controller node rather than using distributed sort, - * distinct, slice, or aggregation operators.</li> - * <li></li> - * <li></li> - * <li></li> - * <li>High volume queries should use special operators (different - * implementations of joins, use an external merge sort, etc).</li> - * </ul> - * - * FIXME SPARQL Coverage: Add native support for all SPARQL operators. A lot of - * this can be picked up from Sesame. Some things, such as isIRI() can be done - * natively against the {@link IV}. Likewise, there is already a set of - * comparison methods for {@link IV}s which are inlined values. Add support for - * <ul> - * <li></li> - * <li></li> - * <li></li> - * <li></li> - * <li></li> - * <li></li> - * </ul> - * * @todo Expander patterns will continue to exist until we handle the standalone * backchainers in a different manner for scale-out so add support for * those for now. @@ -536,6 +395,8 @@ if (q.isCancelled()) continue; final IChunkMessage<IBindingSet> chunk = q.chunksIn.poll(); + if (chunk == null) + continue; if (log.isTraceEnabled()) log.trace("Accepted chunk: " + chunk); try { @@ -820,6 +681,9 @@ */ protected RunningQuery getRunningQuery(final UUID queryId) { + if(queryId == null) + throw new IllegalArgumentException(); + return runningQueries.get(queryId); } @@ -868,4 +732,13 @@ } + /** + * {@inheritDoc} + * <p> + * The default implementation is a NOP. + */ + public void cancelQuery(UUID queryId, Throwable cause) { + // NOP + } + } Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngineTestAnnotations.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngineTestAnnotations.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngineTestAnnotations.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -0,0 +1,69 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 21, 2010 + */ + +package com.bigdata.bop.engine; + +import com.bigdata.bop.PipelineOp; + +/** + * Annotations understood by the {@link QueryEngine} which are used for some + * unit tests but which should not be used for real queries. + * <p> + * Note: This class is in the main source tree because {@link QueryEngine} + * references it, but the annotations defined here should only be specified from + * within a unit test. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public interface QueryEngineTestAnnotations { + + /** + * When <code>true</code>, each chunk will be sent out using its own + * {@link IChunkMessage}. Otherwise the {@link QueryEngine} MAY (and + * generally does) combine the chunks in the output of a given operator + * evaluation pass into a single {@link IChunkMessage} per target query + * peer. + * <p> + * Note: This annotation was introduced to make it easier to control the #of + * {@link IChunkMessage}s output from a given operator and thereby diagnose + * {@link RunState} termination conditions linked to having multiple + * {@link IChunkMessage}s. + * <p> + * Note: Just controlling the {@link PipelineOp.Annotations#CHUNK_CAPACITY} + * and {@link PipelineOp.Annotations#CHUNK_OF_CHUNKS_CAPACITY} is not enough + * to force the {@link QueryEngine} to run the an operator once per source + * chunk. The {@link QueryEngine} normally combines chunks together. You + * MUST also specify this annotation in order for the query engine to send + * multiple {@link IChunkMessage} rather than just one. + */ + String ONE_MESSAGE_PER_CHUNK = QueryEngineTestAnnotations.class.getName() + + ".oneMessagePerChunk"; + + boolean DEFAULT_ONE_MESSAGE_PER_CHUNK = false; + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngineTestAnnotations.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -0,0 +1,98 @@ +package com.bigdata.bop.engine; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.bigdata.relation.accesspath.IAsynchronousIterator; + +/** + * Delegate pattern cancels the {@link RunningQuery} when the iterator is + * {@link #close() closed} and signals normal completion of the query once the + * iterator is exhausted. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +class QueryResultIterator<E> implements IAsynchronousIterator<E> { + + private final RunningQuery runningQuery; + + private final IAsynchronousIterator<E> src; + + private final AtomicBoolean open = new AtomicBoolean(true); + + public QueryResultIterator(final RunningQuery runningQuery, + final IAsynchronousIterator<E> src) { + + if (runningQuery == null) + throw new IllegalArgumentException(); + + if (src == null) + throw new IllegalArgumentException(); + + this.runningQuery = runningQuery; + + this.src = src; + + } + + public void close() { + if (open.compareAndSet(true/* expect */, false/* update */)) { + try { + runningQuery.cancel(true/* mayInterruptIfRunning */); + } finally { + src.close(); + } + } + } + + private void normalCompletion() { + if (open.compareAndSet(true/* expect */, false/* update */)) { + /* + * Note: DO NOT halt the query here!!!! That will cause it to not + * accept any more messages. Just close the source iterator. + */ +// try { +// runningQuery.halt(); +// } finally { + src.close(); +// } + } + } + + public boolean isExhausted() { +// return src.isExhausted(); + if (src.isExhausted()) { + normalCompletion(); + return true; + } + return false; + } + + public boolean hasNext() { +// return src.hasNext(); + if (!src.hasNext()) { + normalCompletion(); + return false; + } + return true; + } + + public boolean hasNext(long timeout, TimeUnit unit) + throws InterruptedException { + return src.hasNext(timeout, unit); + } + + public E next(long timeout, TimeUnit unit) throws InterruptedException { + return src.next(timeout, unit); + } + + public E next() { + return src.next(); + } + + public void remove() { + src.remove(); + } + +} Property changes on: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/QueryResultIterator.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-23 16:09:13 UTC (rev 3616) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/RunState.java 2010-09-23 20:10:58 UTC (rev 3617) @@ -29,11 +29,12 @@ import java.rmi.RemoteException; import java.util.Arrays; -import java.util.LinkedHashMap; +import java.util.Collections; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; @@ -42,12 +43,10 @@ import org.apache.log4j.Logger; import com.bigdata.bop.BOp; -import com.bigdata.util.InnerCause; +import com.bigdata.relation.accesspath.IBlockingBuffer; /** - * The run state for a {@link RunningQuery}. This class is NOT thread-safe. - * {@link RunningQuery} uses an internal lock to serialize requests against the - * public methods of this class. + * The run state for a {@link RunningQuery}. This class is thread-safe. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ @@ -66,19 +65,63 @@ } /** - * Note: Due to concurrency, it is possible for an {@link IChunkMessage} to - * be accepted and the corresponding chunk task started, before a - * {@link RunState#startOp(StartOpMessage)} transition has been fully - * processed. This means that the {@link RunState#totalAvailableChunkCount} - * can become transiently negative. This flag disables asserts which would - * otherwise fail on legal transient negatives. + * Message if the query has already started evaluation. */ - static private boolean availableChunkCountMayBeNegative = true; + static private final transient String ERR_QUERY_STARTED = "Query already running."; + + /** + * Message if query evaluation has already halted. + */ + static private final transient String ERR_QUERY_HALTED = "Query already halted."; + + /** + * Message if an operator addressed by a {@link HaltOpMessage} was never started. + */ + static private final transient String ERR_OP_NOT_STARTED = "Operator never ran."; + + /** + * Message if an operator addressed by a message has been halted. + */ + static private final transient String ERR_OP_HALTED = "Operator is not running."; + + /** + * Message if a query deadline has been exceeded. + */ + static private final transient String ERR_DEADLINE = "Query deadline is expired."; + + /** + * {@link RunningQuery#handleOutputChunk(BOp, int, IBlockingBuffer)} drops + * {@link IChunkMessage}s onto {@link RunningQuery#chunksIn} and drops the + * {@link RunningQuery} on {@link QueryEngine#runningQueries} as soon as + * output {@link IChunkMessage}s are generated. A {@link IChunkMessage} MAY + * be taken for evaluation as soon as it is published. This means that the + * operator which will consume that {@link IChunkMessage} can begin to + * execute <em>before</em> {@link RunningQuery#haltOp(HaltOpMessage)} is + * invoked to indicate the end of the operator which produced that + * {@link IChunkMessage}. + * <p> + * This is all fine. However, due to the potential overlap in these + * schedules {@link RunState#totalAvailableCount} can become transiently + * negative. This flag disables asserts which would otherwise fail on legal + * transient negatives. + */ + static private final boolean availableMessageCountMayBeNegative = true; /** + * Flag may be used to turn on stderr output. + */ + static private final boolean debug = true; + + /** * The query. */ - private final RunningQuery query; + private final BOp query; + + /** + * An index from {@link BOp.Annotations#BOP_ID} to {@link BOp} for the + * {@link #query}. + */ + private final Map<Integer,BOp> bopIndex; /** * The query identifier. @@ -94,36 +137,42 @@ private final long deadline; /** + * Set to <code>true</code> iff the query evaluation has begun. + * + * @see #startQuery(IChunkMessage) + */ + private final AtomicBoolean started = new AtomicBoolean(false); + + /** * Set to <code>true</code> iff the query evaluation is complete due to * normal termination. - * <p> - * Note: This is package private to expose it to {@link RunningQuery}. * * @see #haltOp(HaltOpMessage) */ - /*private*/ final AtomicBoolean allDone = new AtomicBoolean(false); + private final AtomicBoolean allDone = new AtomicBoolean(false); /** * The #of run state transitions which have occurred for this query. */ - private long nsteps = 0; + private final AtomicLong nsteps = new AtomicLong(); /** * The #of tasks for this query which have started but not yet halted. */ - private long totalRunningTaskCount = 0; + private final AtomicLong totalRunningCount = new AtomicLong(); /** - * The #of chunks for this query of which a running task has made available - * but which have not yet been accepted for processing by another task. + * The #of {@link IChunkMessage} for the query which a running task has made + * available but which have not yet been accepted for processing by another + * task. */ - private long totalAvailableChunkCount = 0; + private final AtomicLong totalAvailableCount = new AtomicLong(); /** - * A map reporting the #of chunks available for each operator in the - * pipeline (we only report chunks for pipeline operators). The total #of - * chunks available across all operators in the pipeline is reported by - * {@link #totalAvailableChunkCount}. + * A map reporting the #of {@link IChunkMessage} available for each operator + * in the pipeline. The total #of {@link IChunkMessage}s available across + * all operators in the pipeline is reported by {@link #totalAvailableCount} + * . * <p> * The movement of the intermediate binding set chunks forms an acyclic * directed graph. This map is used to track the #of chunks available for @@ -132,62 +181,166 @@ * {@link BOp} had executed informing the {@link QueryEngine} on that node * that it should immediately release all resources associated with that * {@link BOp}. + * <p> + * Note: This collection is package private in order to expose its state to + * the unit tests. Since the map contains {@link AtomicLong}s it can not be + * readily exposed as {@link Map} object. If we were to expose the map, it + * would have to be via a get(key) style interface. */ - private final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableChunkCountMap = new LinkedHashMap<Integer, AtomicLong>(); + /* private */final Map<Integer/* bopId */, AtomicLong/* availableChunkCount */> availableMap = new ConcurrentHashMap<Integer, AtomicLong>(); /** * A collection reporting on the #of instances of a given {@link BOp} which * are concurrently executing. + * <p> + * Note: This collection is package private in order to expose its state to + * the unit tests. Since the map contains {@link AtomicLong}s it can not be + * readily exposed as {@link Map} object. If we were to expose the map, it + * would have to be via a get(key) style interface. */ - private final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningTaskCountMap = new LinkedHashMap<Integer, AtomicLong>(); + /* private */final Map<Integer/* bopId */, AtomicLong/* runningCount */> runningMap = new ConcurrentHashMap<Integer, AtomicLong>(); /** * A collection of the operators which have executed at least once. */ private final Set<Integer/* bopId */> startedSet = new LinkedHashSet<Integer>(); - public RunState(final RunningQuery query) { + /** + * Return the query identifier specified to the constructor. + */ + final public UUID getQueryId() { + return queryId; + } - this.query = query; + /** + * Return the deadline specified to the constructor. + */ + final public long getDeadline() { + return deadline; + } - this.queryId = query.getQueryId(); + /** + * Return <code>true</code> if evaluation of the query has been initiated + * using {@link #startQuery(IChunkMessage)}. + */ + final public boolean isStarted() { + return started.get(); + } - this.deadline = query.getDeadline(); - - // this.nops = query.bopIndex.size(); + /** + * Return <code>true</code> if the query is known to be completed based on + * the {@link #haltOp(HaltOpMessage)}. + */ + final public boolean isAllDone() { + return allDone.get(); + } + /** + * The #of run state transitions which have occurred for this query. + */ + final public long getStepCount() { + return nsteps.get(); } - public void startQuery(final IChunkMessage<?> msg) { + /** + * The #... [truncated message content] |
From: <mar...@us...> - 2010-09-23 16:09:19
|
Revision: 3616 http://bigdata.svn.sourceforge.net/bigdata/?rev=3616&view=rev Author: martyncutcher Date: 2010-09-23 16:09:13 +0000 (Thu, 23 Sep 2010) Log Message: ----------- Fix nwrites stats when using BuffereWrite Modified Paths: -------------- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2010-09-23 14:00:36 UTC (rev 3615) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2010-09-23 16:09:13 UTC (rev 3616) @@ -55,34 +55,38 @@ m_data = DirectBufferPool.INSTANCE.acquire(); } - public void write(final long offset, final ByteBuffer data, final IReopenChannel<FileChannel> opener) throws IOException { + public int write(final long offset, final ByteBuffer data, final IReopenChannel<FileChannel> opener) throws IOException { + int nwrites = 0; + m_dataWrites++; int data_len = data.remaining(); int slot_len = m_store.getSlotSize(data_len); if (slot_len > m_data.remaining()) { - flush(opener); + nwrites += flush(opener); } if (m_startAddr == -1) { m_startAddr = m_endAddr = offset; } else if (m_endAddr != offset) { // if this is NOT a contiguous write then flush existing content - flush(opener); + nwrites += flush(opener); m_startAddr = m_endAddr = offset; } m_data.put(data); m_endAddr += slot_len; long pos = m_endAddr - m_startAddr; m_data.position((int) pos); + + return nwrites; } - public void flush(final IReopenChannel<FileChannel> opener) throws IOException { + public int flush(final IReopenChannel<FileChannel> opener) throws IOException { m_dataBytes += m_data.position(); m_data.flip(); - FileChannelUtility.writeAll(opener, m_data, m_startAddr); + final int nwrites = FileChannelUtility.writeAll(opener, m_data, m_startAddr); m_fileWrites++; m_data.position(0); @@ -90,6 +94,8 @@ m_startAddr = -1; m_endAddr = 0; + + return nwrites; } public String getStats(StringBuffer buf, boolean reset) { Modified: branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-23 14:00:36 UTC (rev 3615) +++ branches/JOURNAL_HA_BRANCH/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2010-09-23 16:09:13 UTC (rev 3616) @@ -1659,7 +1659,7 @@ if (m_bufferedWrite == null) { nwrites += FileChannelUtility.writeAll(opener, view, offset); } else { - m_bufferedWrite.write(offset, view, opener); + nwrites += m_bufferedWrite.write(offset, view, opener); } // if (log.isInfoEnabled()) // log.info("writing to: " + offset); @@ -1667,7 +1667,7 @@ } if (m_bufferedWrite != null) { - m_bufferedWrite.flush(opener); + nwrites += m_bufferedWrite.flush(opener); if (log.isTraceEnabled()) log.trace(m_bufferedWrite.getStats(null, true)); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-23 14:00:42
|
Revision: 3615 http://bigdata.svn.sourceforge.net/bigdata/?rev=3615&view=rev Author: blevine218 Date: 2010-09-23 14:00:36 +0000 (Thu, 23 Sep 2010) Log Message: ----------- Allow federation.name to be overridden by settting -Dfederation.name=... when executing ANT script. Value of federation.name property is set to the empty string by default in which case the application code assigns a consistent default value. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/LookupStarter.java branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/LookupStarter.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/LookupStarter.java 2010-09-22 23:37:50 UTC (rev 3614) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/util/LookupStarter.java 2010-09-23 14:00:36 UTC (rev 3615) @@ -71,21 +71,18 @@ private String jiniLib = System.getProperty("jini.lib"); private String jiniLibDl = System.getProperty("jini.lib.dl"); private String localPolicy = System.getProperty("java.security.policy"); - + private static String group = null; private static String thisHost = null; - private static String defaultGroup = null; + static { try { thisHost = NicUtil.getIpAddress("default.nic", "default", false); - //defaultGroup = System.getProperty("federation.name","bigdata.test.group-"+thisHost); - defaultGroup = ConfigDeployUtil.getFederationName(); - + group = ConfigDeployUtil.getFederationName(); } catch (Throwable t) { /* swallow */ } } + private static String defaultCodebasePort = "23333"; - private static String group = - System.getProperty("federation.name", defaultGroup); private static String codebasePortStr = System.getProperty("codebase.port", defaultCodebasePort); private static int codebasePort = Integer.parseInt(codebasePortStr); Modified: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml 2010-09-22 23:37:50 UTC (rev 3614) +++ branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml 2010-09-23 14:00:36 UTC (rev 3615) @@ -32,7 +32,10 @@ <target name="setup" > <exec executable="hostname" outputproperty="this.hostname" /> <property name="test.codebase" value="http://${this.hostname}:${test.codebase.port}/jsk-dl.jar" /> - <!-- <property name="federation.name" value="bigdata.test.group-${this.hostname}" /> --> + + <!-- Set to the empty string to indicate "unset". Application code will determine correct + default value. --> + <property name="federation.name" value="" /> </target> <target name="junit" description="starts http class server, lookup service, runs junit tests, stops lookup service, stops http class server." @@ -100,9 +103,7 @@ <sysproperty key="log4j.configuration" value="${log4j.configuration}" /> <sysproperty key="codebase.port" value="${test.codebase.port}" /> <sysproperty key="java.net.preferIPv4Stack" value="${java.net.preferIPv4Stack}" /> - <!-- <sysproperty key="federation.name" value="${federation.name}" /> - --> <sysproperty key="default.nic" value="${default.nic}" /> </java> </target> @@ -122,9 +123,9 @@ <sysproperty key="java.security.policy" value="${java.security.policy}" /> <sysproperty key="log4j.configuration" value="${log4j.configuration}" /> <sysproperty key="java.net.preferIPv4Stack" value="${java.net.preferIPv4Stack}" /> - <!-- + <sysproperty key="federation.name" value="${federation.name}" /> - --> + <sysproperty key="default.nic" value="${default.nic}" /> <arg value="-stop" /> </java> @@ -215,13 +216,11 @@ <sysproperty key="app.home" value="${app.home}" /> <!-- This is the deployment directory, easily accessed by the DataFinder class. --> <sysproperty key="log4j.path" value="${log4j.configuration}" /> <sysproperty key="default.nic" value="${default.nic}" /> - <!-- Jini group name --> - <!-- + + <!-- Jini group name --> <sysproperty key="federation.name" value="${federation.name}" /> - --> - + <sysproperty key="java.class.path" value="${junit.classpath.text}" /> - <sysproperty key="classserver.jar" value="${deploy.lib}/classserver.jar" /> <sysproperty key="colt.jar" value="${deploy.lib}/colt.jar" /> <sysproperty key="ctc_utils.jar" value="${deploy.lib}/ctc_utils.jar" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2010-09-22 23:37:57
|
Revision: 3614 http://bigdata.svn.sourceforge.net/bigdata/?rev=3614&view=rev Author: mrpersonick Date: 2010-09-22 23:37:50 +0000 (Wed, 22 Sep 2010) Log Message: ----------- adding Sesame to BOp conversion Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2010-09-22 23:36:12 UTC (rev 3613) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/IPredicate.java 2010-09-22 23:37:50 UTC (rev 3614) @@ -403,4 +403,13 @@ */ public int hashCode(); + /** + * Sets the {@link com.bigdata.bop.BOp.Annotations#BOP_ID} annotation. + * + * @param bopId + * The bop id. + * + * @return The newly annotated {@link IPredicate}. + */ + public IPredicate<E> setBOpId(int bopId); } Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-22 23:36:12 UTC (rev 3613) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/engine/Rule2BOpUtility.java 2010-09-22 23:37:50 UTC (rev 3614) @@ -27,26 +27,34 @@ package com.bigdata.bop.engine; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.log4j.Logger; import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpContextBase; +import com.bigdata.bop.BOpUtility; import com.bigdata.bop.BindingSetPipelineOp; +import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; -import com.bigdata.bop.IVariableOrConstant; +import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; -import com.bigdata.bop.Var; import com.bigdata.bop.ap.E; import com.bigdata.bop.ap.Predicate; -import com.bigdata.bop.bset.CopyBindingSetOp; import com.bigdata.bop.bset.StartOp; import com.bigdata.bop.join.PipelineJoin; -import com.bigdata.journal.ITx; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.relation.rule.IProgram; import com.bigdata.relation.rule.IRule; import com.bigdata.relation.rule.IStep; import com.bigdata.relation.rule.Program; -import com.bigdata.relation.rule.Rule; +import com.bigdata.relation.rule.eval.DefaultEvaluationPlan2; +import com.bigdata.relation.rule.eval.IRangeCountFactory; /** * Utility class converts {@link IRule}s to {@link BOp}s. @@ -63,6 +71,8 @@ */ public class Rule2BOpUtility { + protected static final Logger log = Logger.getLogger(Rule2BOpUtility.class); + /** * Convert an {@link IStep} into an operator tree. This should handle * {@link IRule}s and {@link IProgram}s as they are currently implemented @@ -73,12 +83,11 @@ * * @return */ - public static BindingSetPipelineOp convert(final IStep step, final int startId) { + public static BindingSetPipelineOp convert(final IStep step, + final int startId, final QueryEngine queryEngine) { - if (step instanceof Rule) - return convert((Rule) step, startId); - else if (step instanceof Program) - return convert((Program) step); + if (step instanceof IRule) + return convert((IRule) step, startId, queryEngine); throw new UnsupportedOperationException(); @@ -91,7 +100,8 @@ * * @return */ - public static BindingSetPipelineOp convert(final Rule rule, final int startId) { + public static BindingSetPipelineOp convert(final IRule rule, + final int startId, final QueryEngine queryEngine) { int bopId = startId; @@ -100,98 +110,119 @@ new NV(Predicate.Annotations.BOP_ID, bopId++),// })); - Iterator<Predicate> tails = rule.getTail(); + /* + * First put the tails in the correct order based on the logic in + * DefaultEvaluationPlan2. + */ + final BOpContextBase context = new BOpContextBase(queryEngine); + final DefaultEvaluationPlan2 plan = new DefaultEvaluationPlan2( + new IRangeCountFactory() { + public long rangeCount(final IPredicate pred) { + return context.getRelation(pred).getAccessPath(pred) + .rangeCount(false); + } + + }, rule); + + final int[] order = plan.getOrder(); + + /* + * Map the constraints from the variables they use. This way, we can + * properly attach constraints to only the first tail in which the + * variable appears. This way we only run the appropriate constraint + * once, instead of for every tail. + */ + final Map<IVariable<?>, Collection<IConstraint>> constraintsByVar = + new HashMap<IVariable<?>, Collection<IConstraint>>(); + for (int i = 0; i < rule.getConstraintCount(); i++) { + final IConstraint c = rule.getConstraint(i); + + if (log.isDebugEnabled()) { + log.debug(c); + } + + final Set<IVariable<?>> uniqueVars = new HashSet<IVariable<?>>(); + final Iterator<IVariable<?>> vars = BOpUtility.getSpannedVariables(c); + while (vars.hasNext()) { + final IVariable<?> v = vars.next(); + uniqueVars.add(v); + } + + for (IVariable<?> v : uniqueVars) { + + if (log.isDebugEnabled()) { + log.debug(v); + } + + Collection<IConstraint> constraints = constraintsByVar.get(v); + if (constraints == null) { + constraints = new LinkedList<IConstraint>(); + constraintsByVar.put(v, constraints); + } + constraints.add(c); + } + } + BindingSetPipelineOp left = startOp; - while (tails.hasNext()) { - + for (int i = 0; i < order.length; i++) { + final int joinId = bopId++; - final Predicate<?> pred = tails.next().setBOpId(bopId++); + // assign a bop id to the predicate + final IPredicate<?> pred = rule.getTail(order[i]).setBOpId(bopId++); - System.err.println(pred); + /* + * Collect all the constraints for this predicate based on which + * variables make their first appearance in this tail + */ + final Collection<IConstraint> constraints = + new LinkedList<IConstraint>(); + /* + * Peek through the predicate's args to find its variables. Use + * these to attach constraints to the join based on the variables + * that make their first appearance in this tail. + */ + for (BOp arg : pred.args()) { + if (arg instanceof IVariable) { + final IVariable<?> v = (IVariable) arg; + /* + * We do a remove because we don't ever need to run these + * constraints again during subsequent joins once they + * have been run once at the initial appearance of the + * variable. + * + * FIXME revisit this when we dynamically re-order running + * joins + */ + if (constraintsByVar.containsKey(v)) + constraints.addAll(constraintsByVar.remove(v)); + } + } + final BindingSetPipelineOp joinOp = new PipelineJoin<E>(// left, pred,// NV.asMap(new NV[] {// - new NV(Predicate.Annotations.BOP_ID, joinId),// + new NV(BOp.Annotations.BOP_ID, joinId),// + new NV(PipelineJoin.Annotations.CONSTRAINTS, + constraints.size() > 0 ? + constraints.toArray(new IConstraint[constraints.size()]) : null),// + new NV(PipelineJoin.Annotations.OPTIONAL, pred.isOptional()),// })); left = joinOp; } + // just for now while i'm debugging System.err.println(toString(left)); -// test_query_join2(); - return left; } - public static void test_query_join2() { - - final String namespace = "ns"; - final int startId = 1; - final int joinId1 = 2; - final int predId1 = 3; - final int joinId2 = 4; - final int predId2 = 5; - - final BindingSetPipelineOp startOp = new StartOp(new BOp[] {}, - NV.asMap(new NV[] {// - new NV(Predicate.Annotations.BOP_ID, startId),// - })); - - final Predicate<?> pred1Op = new Predicate<E>(new IVariableOrConstant[] { - Var.var("x"), Var.var("y") }, NV - .asMap(new NV[] {// - new NV(Predicate.Annotations.RELATION_NAME, - new String[] { namespace }),// - new NV(Predicate.Annotations.PARTITION_ID, - Integer.valueOf(-1)),// - new NV(Predicate.Annotations.OPTIONAL, - Boolean.FALSE),// - new NV(Predicate.Annotations.CONSTRAINT, null),// - new NV(Predicate.Annotations.EXPANDER, null),// - new NV(Predicate.Annotations.BOP_ID, predId1),// - new NV(Predicate.Annotations.TIMESTAMP, ITx.READ_COMMITTED),// - })); - - final Predicate<?> pred2Op = new Predicate<E>(new IVariableOrConstant[] { - Var.var("y"), Var.var("z") }, NV - .asMap(new NV[] {// - new NV(Predicate.Annotations.RELATION_NAME, - new String[] { namespace }),// - new NV(Predicate.Annotations.PARTITION_ID, - Integer.valueOf(-1)),// - new NV(Predicate.Annotations.OPTIONAL, - Boolean.FALSE),// - new NV(Predicate.Annotations.CONSTRAINT, null),// - new NV(Predicate.Annotations.EXPANDER, null),// - new NV(Predicate.Annotations.BOP_ID, predId2),// - new NV(Predicate.Annotations.TIMESTAMP, ITx.READ_COMMITTED),// - })); - - final BindingSetPipelineOp join1Op = new PipelineJoin<E>(// - startOp, pred1Op,// - NV.asMap(new NV[] {// - new NV(Predicate.Annotations.BOP_ID, joinId1),// - })); - - final BindingSetPipelineOp join2Op = new PipelineJoin<E>(// - join1Op, pred2Op,// - NV.asMap(new NV[] {// - new NV(Predicate.Annotations.BOP_ID, joinId2),// - })); - - final BindingSetPipelineOp query = join2Op; - - System.err.println(toString(query)); - - } - private static String toString(BOp bop) { StringBuilder sb = new StringBuilder(); @@ -218,6 +249,13 @@ for (BOp arg : args) { toString(arg, sb, indent+4); } + IConstraint[] constraints = + bop.getProperty(PipelineJoin.Annotations.CONSTRAINTS); + if (constraints != null) { + for (IConstraint c : constraints) { + toString(c, sb, indent+4); + } + } } } @@ -228,6 +266,8 @@ * @param program * * @return + * + * FIXME What is the pattern for UNION? */ public static BindingSetPipelineOp convert(final Program program) { Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2010-09-22 23:36:12 UTC (rev 3613) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataEvaluationStrategyImpl.java 2010-09-22 23:37:50 UTC (rev 3614) @@ -592,6 +592,11 @@ IStep query = createNativeQuery(join); if (query == null) { + + if (log.isDebugEnabled()) { + log.debug("query == null"); + } + return new EmptyIteration<BindingSet, QueryEvaluationException>(); } @@ -1522,8 +1527,12 @@ result = com.bigdata.bop.Var.var(name); } else { final IV iv = val.getIV(); - if (iv == null) + if (iv == null) { + if (log.isDebugEnabled()) { + log.debug("null IV: " + val); + } return null; + } result = new Constant<IV>(iv); } return result; @@ -1584,6 +1593,7 @@ if (log.isDebugEnabled()) { log.debug("var: " + var); log.debug("constant: " + constant); + log.debug("constant.getIV(): " + constant.getIV()); } if (var == null || constant == null || constant.getIV() == null) { if (log.isDebugEnabled()) { @@ -1644,15 +1654,16 @@ final IStep step) throws Exception { + final QueryEngine queryEngine = tripleSource.getSail().getQueryEngine(); + final int startId = 1; - final BindingSetPipelineOp query = Rule2BOpUtility.convert(step, startId); + final BindingSetPipelineOp query = + Rule2BOpUtility.convert(step, startId, queryEngine); if (log.isInfoEnabled()) { log.info(query); } - final QueryEngine queryEngine = tripleSource.getSail().getQueryEngine(); - final UUID queryId = UUID.randomUUID(); final RunningQuery runningQuery = queryEngine.eval(queryId, query, new LocalChunkMessage<IBindingSet>(queryEngine, queryId, Modified: branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java 2010-09-22 23:36:12 UTC (rev 3613) +++ branches/QUADS_QUERY_BRANCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBOps.java 2010-09-22 23:37:50 UTC (rev 3614) @@ -29,6 +29,7 @@ import java.util.Collection; import java.util.LinkedList; import java.util.Properties; +import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.openrdf.model.Literal; import org.openrdf.model.URI; @@ -44,6 +45,7 @@ import org.openrdf.query.TupleQueryResult; import org.openrdf.query.impl.BindingImpl; import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.lexicon.LexiconRelation; import com.bigdata.rdf.store.BD; import com.bigdata.rdf.vocab.NoVocabulary; @@ -137,27 +139,209 @@ "select * " + "WHERE { " + " ?s rdf:type ns:Person . " + - " ?s ns:likes ns:RDF . " + -// " ?s rdfs:label ?label . " + + " ?s ns:likes ?likes . " + + " ?s rdfs:label ?label . " + "}"; final TupleQuery tupleQuery = cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); TupleQueryResult result = tupleQuery.evaluate(); - while (result.hasNext()) { - System.err.println(result.next()); - } +// while (result.hasNext()) { +// System.err.println(result.next()); +// } Collection<BindingSet> solution = new LinkedList<BindingSet>(); solution.add(createBindingSet(new Binding[] { new BindingImpl("s", mike), -// new BindingImpl("likes", rdf), -// new BindingImpl("label", l1) + new BindingImpl("likes", rdf), + new BindingImpl("label", l1) })); solution.add(createBindingSet(new Binding[] { new BindingImpl("s", bryan), -// new BindingImpl("likes", rdf), + new BindingImpl("likes", rdf), + new BindingImpl("label", l2) + })); + + compare(result, solution); + + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + + public void testSimpleConstraint() throws Exception { + + final BigdataSail sail = getSail(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + try { + + final ValueFactory vf = sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + URI jill = new URIImpl(ns+"Jill"); + URI jane = new URIImpl(ns+"Jane"); + URI person = new URIImpl(ns+"Person"); + URI age = new URIImpl(ns+"age"); + URI IQ = new URIImpl(ns+"IQ"); + Literal l1 = new LiteralImpl("Jill"); + Literal l2 = new LiteralImpl("Jane"); + Literal age1 = vf.createLiteral(20); + Literal age2 = vf.createLiteral(30); + Literal IQ1 = vf.createLiteral(130); + Literal IQ2 = vf.createLiteral(140); +/**/ + cxn.setNamespace("ns", ns); + + cxn.add(jill, RDF.TYPE, person); + cxn.add(jill, RDFS.LABEL, l1); + cxn.add(jill, age, age1); + cxn.add(jill, IQ, IQ1); + cxn.add(jane, RDF.TYPE, person); + cxn.add(jane, RDFS.LABEL, l2); + cxn.add(jane, age, age2); + cxn.add(jane, IQ, IQ2); + + /* + * Note: The either flush() or commit() is required to flush the + * statement buffers to the database before executing any operations + * that go around the sail. + */ + cxn.flush();//commit(); + cxn.commit();// + + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore()); + } + + { + + String query = + "PREFIX rdf: <"+RDF.NAMESPACE+"> " + + "PREFIX rdfs: <"+RDFS.NAMESPACE+"> " + + "PREFIX ns: <"+ns+"> " + + + "select * " + + "WHERE { " + + " ?s rdf:type ns:Person . " + + " ?s ns:age ?age . " + + " ?s ns:IQ ?iq . " + + " ?s rdfs:label ?label . " + + " FILTER( ?age < 25 && ?iq > 125 ) . " + + "}"; + + final TupleQuery tupleQuery = + cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + TupleQueryResult result = tupleQuery.evaluate(); + +// while (result.hasNext()) { +// System.err.println(result.next()); +// } + + Collection<BindingSet> solution = new LinkedList<BindingSet>(); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", jill), + new BindingImpl("age", age1), + new BindingImpl("iq", IQ1), + new BindingImpl("label", l1) + })); + + compare(result, solution); + + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + + public void testSimpleOptional() throws Exception { + + final BigdataSail sail = getSail(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + try { + + final ValueFactory vf = sail.getValueFactory(); + + final String ns = BD.NAMESPACE; + + URI mike = new URIImpl(ns+"Mike"); + URI bryan = new URIImpl(ns+"Bryan"); + URI person = new URIImpl(ns+"Person"); + URI likes = new URIImpl(ns+"likes"); + URI rdf = new URIImpl(ns+"RDF"); + Literal l1 = new LiteralImpl("Mike"); + Literal l2 = new LiteralImpl("Bryan"); +/**/ + cxn.setNamespace("ns", ns); + + cxn.add(mike, RDF.TYPE, person); + cxn.add(mike, likes, rdf); + cxn.add(mike, RDFS.LABEL, l1); + cxn.add(bryan, RDF.TYPE, person); + cxn.add(bryan, likes, rdf); +// cxn.add(bryan, RDFS.LABEL, l2); + + /* + * Note: The either flush() or commit() is required to flush the + * statement buffers to the database before executing any operations + * that go around the sail. + */ + cxn.flush();//commit(); + cxn.commit();// + + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore()); + } + + { + + String query = + "PREFIX rdf: <"+RDF.NAMESPACE+"> " + + "PREFIX rdfs: <"+RDFS.NAMESPACE+"> " + + "PREFIX ns: <"+ns+"> " + + + "select * " + + "WHERE { " + + " ?s rdf:type ns:Person . " + + " ?s ns:likes ?likes . " + + " OPTIONAL { ?s rdfs:label ?label . } " + + "}"; + + final TupleQuery tupleQuery = + cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + TupleQueryResult result = tupleQuery.evaluate(); + +// while (result.hasNext()) { +// System.err.println(result.next()); +// } + + Collection<BindingSet> solution = new LinkedList<BindingSet>(); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", mike), + new BindingImpl("likes", rdf), + new BindingImpl("label", l1) + })); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", bryan), + new BindingImpl("likes", rdf), // new BindingImpl("label", l2) })); @@ -172,4 +356,96 @@ } + public void testOrEquals() throws Exception { + + final BigdataSail sail = getSail(); + sail.initialize(); + final BigdataSailRepository repo = new BigdataSailRepository(sail); + final BigdataSailRepositoryConnection cxn = + (BigdataSailRepositoryConnection) repo.getConnection(); + cxn.setAutoCommit(false); + + try { + + final ValueFactory vf = sail.getValueFactory(); + + final LexiconRelation lex = sail.getDatabase().getLexiconRelation(); + + final String ns = BD.NAMESPACE; + + URI mike = new URIImpl(ns+"Mike"); + URI bryan = new URIImpl(ns+"Bryan"); + URI martyn = new URIImpl(ns+"Martyn"); + URI person = new URIImpl(ns+"Person"); + URI p = new URIImpl(ns+"p"); + Literal l1 = new LiteralImpl("Mike"); + Literal l2 = new LiteralImpl("Bryan"); + Literal l3 = new LiteralImpl("Martyn"); +/**/ + cxn.setNamespace("ns", ns); + + cxn.add(mike, RDF.TYPE, person); + cxn.add(mike, RDFS.LABEL, l1); + cxn.add(bryan, RDF.TYPE, person); + cxn.add(bryan, RDFS.COMMENT, l2); + cxn.add(martyn, RDF.TYPE, person); + cxn.add(martyn, p, l3); + + /* + * Note: The either flush() or commit() is required to flush the + * statement buffers to the database before executing any operations + * that go around the sail. + */ + cxn.flush();//commit(); + cxn.commit();// + + if (log.isInfoEnabled()) { + log.info("\n" + sail.getDatabase().dumpStore()); + } + + { + + String query = + "PREFIX rdf: <"+RDF.NAMESPACE+"> " + + "PREFIX rdfs: <"+RDFS.NAMESPACE+"> " + + "PREFIX ns: <"+ns+"> " + + + "select * " + + "WHERE { " + + " ?s rdf:type ns:Person . " + + " ?s ?p ?label . " + + " FILTER ( ?p = rdfs:label || ?p = rdfs:comment ) . " + + "}"; + + final TupleQuery tupleQuery = + cxn.prepareTupleQuery(QueryLanguage.SPARQL, query); + TupleQueryResult result = tupleQuery.evaluate(); + +// while (result.hasNext()) { +// System.err.println(result.next()); +// } + + Collection<BindingSet> solution = new LinkedList<BindingSet>(); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", mike), + new BindingImpl("p", RDFS.LABEL), + new BindingImpl("label", l1) + })); + solution.add(createBindingSet(new Binding[] { + new BindingImpl("s", bryan), + new BindingImpl("p", RDFS.COMMENT), + new BindingImpl("label", l2) + })); + + compare(result, solution); + + } + + } finally { + cxn.close(); + sail.__tearDownUnitTest(); + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2010-09-22 23:36:19
|
Revision: 3613 http://bigdata.svn.sourceforge.net/bigdata/?rev=3613&view=rev Author: mrpersonick Date: 2010-09-22 23:36:12 +0000 (Wed, 22 Sep 2010) Log Message: ----------- pulling out some rangeCount stuff into a superclass for use by the evaluation plan Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java Added Paths: ----------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-22 21:39:51 UTC (rev 3612) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-22 23:36:12 UTC (rev 3613) @@ -60,7 +60,7 @@ * @param <E> * The generic type of the objects processed by the operator. */ -public class BOpContext<E> { +public class BOpContext<E> extends BOpContextBase { static private final Logger log = Logger.getLogger(BOpContext.class); @@ -102,6 +102,7 @@ * {@link IBigdataFederation}, this reference provides access to the * scale-out view of the indices and to other bigdata services. */ + @Override public IBigdataFederation<?> getFederation() { return runningQuery.getFederation(); } @@ -112,7 +113,8 @@ * wise and this {@link IIndexManager} MUST be able to read on the * {@link ILocalBTreeView}. */ - public final IIndexManager getIndexManager() { + @Override + public IIndexManager getIndexManager() { return runningQuery.getIndexManager(); } @@ -266,6 +268,9 @@ public BOpContext(final IRunningQuery runningQuery,final int partitionId, final BOpStats stats, final IAsynchronousIterator<E[]> source, final IBlockingBuffer<E[]> sink, final IBlockingBuffer<E[]> sink2) { + + super(null); + this.runningQuery = runningQuery; // if (indexManager == null) // throw new IllegalArgumentException(); @@ -300,223 +305,6 @@ } /** - * Locate and return the view of the relation(s) identified by the - * {@link IPredicate}. - * <p> - * Note: This method is responsible for returning a fused view when more - * than one relation name was specified for the {@link IPredicate}. It - * SHOULD be used whenever the {@link IRelation} is selected based on a - * predicate in the tail of an {@link IRule} and could therefore be a fused - * view of more than one relation instance. (The head of the {@link IRule} - * must be a simple {@link IRelation} and not a view.) - * <p> - * Note: The implementation should choose the read timestamp for each - * relation in the view using {@link #getReadTimestamp(String)}. - * - * @param pred - * The {@link IPredicate}, which MUST be a tail from some - * {@link IRule}. - * - * @return The {@link IRelation}. - * - * @todo Replaces {@link IJoinNexus#getTailRelationView(IPredicate)}. In - * order to support mutation operator we will also have to pass in the - * {@link #writeTimestamp} or differentiate this in the method name. - */ - public IRelation getRelation(final IPredicate<?> pred) { - - /* - * Note: This uses the federation as the index manager when locating a - * resource for scale-out. However, s/o reads must use the local index - * manager when actually obtaining the index view for the relation. - */ - final IIndexManager tmp = getFederation() == null ? getIndexManager() - : getFederation(); - - final long timestamp = (Long) pred - .getRequiredProperty(BOp.Annotations.TIMESTAMP); - - return (IRelation<?>) tmp.getResourceLocator().locate( - pred.getOnlyRelationName(), timestamp); - - } - -// /** -// * Return a writable view of the relation. -// * -// * @param namespace -// * The namespace of the relation. -// * -// * @return A writable view of the relation. -// * -// * @deprecated by getRelation() -// */ -// public IRelation getWriteRelation(final String namespace) { -// -// /* -// * @todo Cache the resource locator? -// * -// * @todo This should be using the federation as the index manager when -// * locating a resource for scale-out, right? But s/o writes must use -// * the local index manager when actually obtaining the index view for -// * the relation. -// */ -// return (IRelation) getIndexManager().getResourceLocator().locate( -// namespace, getWriteTimestamp()); -// -// } - - /** - * Obtain an access path reading from relation for the specified predicate - * (from the tail of some rule). - * <p> - * Note that passing in the {@link IRelation} is important since it - * otherwise must be discovered using the {@link IResourceLocator}. By - * requiring the caller to resolve it before hand and pass it into this - * method the contention and demand on the {@link IResourceLocator} cache is - * reduced. - * - * @param relation - * The relation. - * @param pred - * The predicate. When {@link IPredicate#getPartitionId()} is - * set, the returned {@link IAccessPath} MUST read on the - * identified local index partition (directly, not via RMI). - * - * @return The access path. - * - * @todo replaces - * {@link IJoinNexus#getTailAccessPath(IRelation, IPredicate)}. - */ - @SuppressWarnings("unchecked") - public IAccessPath<?> getAccessPath(final IRelation<?> relation, - final IPredicate<?> predicate) { - - if (relation == null) - throw new IllegalArgumentException(); - - if (predicate == null) - throw new IllegalArgumentException(); - // FIXME This should be as assigned by the query planner so the query is fully declarative. - final IKeyOrder keyOrder = relation.getKeyOrder((IPredicate) predicate); - - if (keyOrder == null) - throw new RuntimeException("No access path: " + predicate); - - final int partitionId = predicate.getPartitionId(); - - final long timestamp = (Long) predicate - .getRequiredProperty(BOp.Annotations.TIMESTAMP); - - final int flags = predicate.getProperty( - PipelineOp.Annotations.FLAGS, - PipelineOp.Annotations.DEFAULT_FLAGS) - | (TimestampUtility.isReadOnly(timestamp) ? IRangeQuery.READONLY - : 0); - - final int chunkOfChunksCapacity = predicate.getProperty( - PipelineOp.Annotations.CHUNK_OF_CHUNKS_CAPACITY, - PipelineOp.Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); - - final int chunkCapacity = predicate.getProperty( - PipelineOp.Annotations.CHUNK_CAPACITY, - PipelineOp.Annotations.DEFAULT_CHUNK_CAPACITY); - - final int fullyBufferedReadThreshold = predicate.getProperty( - PipelineOp.Annotations.FULLY_BUFFERED_READ_THRESHOLD, - PipelineOp.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD); - - final IIndexManager indexManager = getIndexManager(); - - if (predicate.getPartitionId() != -1) { - - /* - * Note: This handles a read against a local index partition. For - * scale-out, the [indexManager] will be the data service's local - * index manager. - * - * Note: Expanders ARE NOT applied in this code path. Expanders - * require a total view of the relation, which is not available - * during scale-out pipeline joins. Likewise, the [backchain] - * property will be ignored since it is handled by an expander. - * - * @todo Replace this with IRelation#getAccessPathForIndexPartition() - */ -// return ((AbstractRelation<?>) relation) -// .getAccessPathForIndexPartition(indexManager, -// (IPredicate) predicate); - /* - * @todo This condition should probably be an error since the expander - * will be ignored. - */ -// if (predicate.getSolutionExpander() != null) -// throw new IllegalArgumentException(); - - final String namespace = relation.getNamespace();//predicate.getOnlyRelationName(); - - // The name of the desired index partition. - final String name = DataService.getIndexPartitionName(namespace - + "." + keyOrder.getIndexName(), partitionId); - - // MUST be a local index view. - final ILocalBTreeView ndx = (ILocalBTreeView) indexManager - .getIndex(name, timestamp); - - return new AccessPath(relation, indexManager, timestamp, - predicate, keyOrder, ndx, flags, chunkOfChunksCapacity, - chunkCapacity, fullyBufferedReadThreshold).init(); - - } - - /* - * Find the best access path for the predicate for that relation. - * - * @todo Replace this with IRelation#getAccessPath(IPredicate) once the - * bop conversion is done. It is the same logic. - */ - IAccessPath accessPath; - { - -// accessPath = relation.getAccessPath((IPredicate) predicate); - - final IIndex ndx = relation.getIndex(keyOrder); - - if (ndx == null) { - - throw new IllegalArgumentException("no index? relation=" - + relation.getNamespace() + ", timestamp=" - + timestamp + ", keyOrder=" + keyOrder + ", pred=" - + predicate + ", indexManager=" + getIndexManager()); - - } - - accessPath = new AccessPath((IRelation) relation, indexManager, - timestamp, (IPredicate) predicate, - (IKeyOrder) keyOrder, ndx, flags, chunkOfChunksCapacity, - chunkCapacity, fullyBufferedReadThreshold).init(); - - } - - /* - * @todo No expander's for bops, at least not right now. They could be - * added in easily enough, which would support additional features for - * standalone query evaluation (runtime materialization of some - * entailments). - */ - // final ISolutionExpander expander = predicate.getSolutionExpander(); - // - // if (expander != null) { - // - // // allow the predicate to wrap the access path - // accessPath = expander.getAccessPath(accessPath); - // - // } - - // return that access path. - return accessPath; - } - - /** * Binds variables from a visited element. * <p> * Note: The bindings are propagated before the constraints are verified so Added: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java (rev 0) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContextBase.java 2010-09-22 23:36:12 UTC (rev 3613) @@ -0,0 +1,313 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Aug 26, 2010 + */ +package com.bigdata.bop; + +import org.apache.log4j.Logger; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.btree.IIndex; +import com.bigdata.btree.ILocalBTreeView; +import com.bigdata.btree.IRangeQuery; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.TimestampUtility; +import com.bigdata.relation.IRelation; +import com.bigdata.relation.accesspath.AccessPath; +import com.bigdata.relation.accesspath.IAccessPath; +import com.bigdata.relation.locator.IResourceLocator; +import com.bigdata.relation.rule.IRule; +import com.bigdata.relation.rule.ISolutionExpander; +import com.bigdata.relation.rule.eval.IJoinNexus; +import com.bigdata.service.DataService; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.striterator.IKeyOrder; + +/** + * The evaluation context for the operator (NOT serializable). + * + * @param <E> + * The generic type of the objects processed by the operator. + */ +public class BOpContextBase { + + static private final Logger log = Logger.getLogger(BOpContextBase.class); + + private final QueryEngine queryEngine; + + /** + * The <strong>local</strong> {@link IIndexManager}. Query evaluation occurs + * against the local indices. In scale-out, query evaluation proceeds shard + * wise and this {@link IIndexManager} MUST be able to read on the + * {@link ILocalBTreeView}. + */ + public IIndexManager getIndexManager() { + return queryEngine.getIndexManager(); + } + + /** + * The {@link IBigdataFederation} IFF the operator is being evaluated on an + * {@link IBigdataFederation}. When evaluating operations against an + * {@link IBigdataFederation}, this reference provides access to the + * scale-out view of the indices and to other bigdata services. + */ + public IBigdataFederation<?> getFederation() { + return queryEngine.getFederation(); + } + + /** + * + * @param indexManager + * The <strong>local</strong> {@link IIndexManager}. Query + * evaluation occurs against the local indices. In scale-out, + * query evaluation proceeds shard wise and this + * {@link IIndexManager} MUST be able to read on the + * {@link ILocalBTreeView}. + * + */ + public BOpContextBase(final QueryEngine queryEngine) { + this.queryEngine = queryEngine; + } + + /** + * Locate and return the view of the relation(s) identified by the + * {@link IPredicate}. + * <p> + * Note: This method is responsible for returning a fused view when more + * than one relation name was specified for the {@link IPredicate}. It + * SHOULD be used whenever the {@link IRelation} is selected based on a + * predicate in the tail of an {@link IRule} and could therefore be a fused + * view of more than one relation instance. (The head of the {@link IRule} + * must be a simple {@link IRelation} and not a view.) + * <p> + * Note: The implementation should choose the read timestamp for each + * relation in the view using {@link #getReadTimestamp(String)}. + * + * @param pred + * The {@link IPredicate}, which MUST be a tail from some + * {@link IRule}. + * + * @return The {@link IRelation}. + * + * @todo Replaces {@link IJoinNexus#getTailRelationView(IPredicate)}. In + * order to support mutation operator we will also have to pass in the + * {@link #writeTimestamp} or differentiate this in the method name. + */ + public IRelation getRelation(final IPredicate<?> pred) { + + /* + * Note: This uses the federation as the index manager when locating a + * resource for scale-out. However, s/o reads must use the local index + * manager when actually obtaining the index view for the relation. + */ + final IIndexManager tmp = getFederation() == null ? getIndexManager() + : getFederation(); + + final long timestamp = (Long) pred + .getRequiredProperty(BOp.Annotations.TIMESTAMP); + + return (IRelation<?>) tmp.getResourceLocator().locate( + pred.getOnlyRelationName(), timestamp); + + } + +// /** +// * Return a writable view of the relation. +// * +// * @param namespace +// * The namespace of the relation. +// * +// * @return A writable view of the relation. +// * +// * @deprecated by getRelation() +// */ +// public IRelation getWriteRelation(final String namespace) { +// +// /* +// * @todo Cache the resource locator? +// * +// * @todo This should be using the federation as the index manager when +// * locating a resource for scale-out, right? But s/o writes must use +// * the local index manager when actually obtaining the index view for +// * the relation. +// */ +// return (IRelation) getIndexManager().getResourceLocator().locate( +// namespace, getWriteTimestamp()); +// +// } + + /** + * Obtain an access path reading from relation for the specified predicate + * (from the tail of some rule). + * <p> + * Note that passing in the {@link IRelation} is important since it + * otherwise must be discovered using the {@link IResourceLocator}. By + * requiring the caller to resolve it before hand and pass it into this + * method the contention and demand on the {@link IResourceLocator} cache is + * reduced. + * + * @param relation + * The relation. + * @param pred + * The predicate. When {@link IPredicate#getPartitionId()} is + * set, the returned {@link IAccessPath} MUST read on the + * identified local index partition (directly, not via RMI). + * + * @return The access path. + * + * @todo replaces + * {@link IJoinNexus#getTailAccessPath(IRelation, IPredicate)}. + */ + @SuppressWarnings("unchecked") + public IAccessPath<?> getAccessPath(final IRelation<?> relation, + final IPredicate<?> predicate) { + + if (relation == null) + throw new IllegalArgumentException(); + + if (predicate == null) + throw new IllegalArgumentException(); + // FIXME This should be as assigned by the query planner so the query is fully declarative. + final IKeyOrder keyOrder = relation.getKeyOrder((IPredicate) predicate); + + if (keyOrder == null) + throw new RuntimeException("No access path: " + predicate); + + final int partitionId = predicate.getPartitionId(); + + final long timestamp = (Long) predicate + .getRequiredProperty(BOp.Annotations.TIMESTAMP); + + final int flags = predicate.getProperty( + PipelineOp.Annotations.FLAGS, + PipelineOp.Annotations.DEFAULT_FLAGS) + | (TimestampUtility.isReadOnly(timestamp) ? IRangeQuery.READONLY + : 0); + + final int chunkOfChunksCapacity = predicate.getProperty( + PipelineOp.Annotations.CHUNK_OF_CHUNKS_CAPACITY, + PipelineOp.Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); + + final int chunkCapacity = predicate.getProperty( + PipelineOp.Annotations.CHUNK_CAPACITY, + PipelineOp.Annotations.DEFAULT_CHUNK_CAPACITY); + + final int fullyBufferedReadThreshold = predicate.getProperty( + PipelineOp.Annotations.FULLY_BUFFERED_READ_THRESHOLD, + PipelineOp.Annotations.DEFAULT_FULLY_BUFFERED_READ_THRESHOLD); + + final IIndexManager indexManager = getIndexManager(); + + if (predicate.getPartitionId() != -1) { + + /* + * Note: This handles a read against a local index partition. For + * scale-out, the [indexManager] will be the data service's local + * index manager. + * + * Note: Expanders ARE NOT applied in this code path. Expanders + * require a total view of the relation, which is not available + * during scale-out pipeline joins. Likewise, the [backchain] + * property will be ignored since it is handled by an expander. + * + * @todo Replace this with IRelation#getAccessPathForIndexPartition() + */ +// return ((AbstractRelation<?>) relation) +// .getAccessPathForIndexPartition(indexManager, +// (IPredicate) predicate); + /* + * @todo This condition should probably be an error since the expander + * will be ignored. + */ +// if (predicate.getSolutionExpander() != null) +// throw new IllegalArgumentException(); + + final String namespace = relation.getNamespace();//predicate.getOnlyRelationName(); + + // The name of the desired index partition. + final String name = DataService.getIndexPartitionName(namespace + + "." + keyOrder.getIndexName(), partitionId); + + // MUST be a local index view. + final ILocalBTreeView ndx = (ILocalBTreeView) indexManager + .getIndex(name, timestamp); + + return new AccessPath(relation, indexManager, timestamp, + predicate, keyOrder, ndx, flags, chunkOfChunksCapacity, + chunkCapacity, fullyBufferedReadThreshold).init(); + + } + + /* + * Find the best access path for the predicate for that relation. + * + * @todo Replace this with IRelation#getAccessPath(IPredicate) once the + * bop conversion is done. It is the same logic. + */ + IAccessPath accessPath; + { + +// accessPath = relation.getAccessPath((IPredicate) predicate); + + final IIndex ndx = relation.getIndex(keyOrder); + + if (ndx == null) { + + throw new IllegalArgumentException("no index? relation=" + + relation.getNamespace() + ", timestamp=" + + timestamp + ", keyOrder=" + keyOrder + ", pred=" + + predicate + ", indexManager=" + getIndexManager()); + + } + + accessPath = new AccessPath((IRelation) relation, indexManager, + timestamp, (IPredicate) predicate, + (IKeyOrder) keyOrder, ndx, flags, chunkOfChunksCapacity, + chunkCapacity, fullyBufferedReadThreshold).init(); + + } + + /* + * @todo No expander's for bops, at least not right now. They could be + * added in easily enough, which would support additional features for + * standalone query evaluation (runtime materialization of some + * entailments). + * + * FIXME temporarily enabled expanders (mikep) + */ + final ISolutionExpander<?> expander = predicate.getSolutionExpander(); + + if (expander != null) { + + // allow the predicate to wrap the access path + accessPath = expander.getAccessPath(accessPath); + + } + + // return that access path. + return accessPath; + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-22 21:39:58
|
Revision: 3612 http://bigdata.svn.sourceforge.net/bigdata/?rev=3612&view=rev Author: sgossard Date: 2010-09-22 21:39:51 +0000 (Wed, 22 Sep 2010) Log Message: ----------- [maven_scaleout] : Breaking cyclical dependencies with 'com.bigdata.rawstore'. Moved the call IResourceMetadata getResourceMetadata() on IRawStore to IJournal. All rawstore implementers stubbed out functionality or threw exceptions, and info returned by IResourceMetadata calls made sense for a journals or segments, but really didn't at all for rawstores. This convieniently broke all remaining transitive cycles for 'com.bigdata.cache', 'com.bigdata.io.compression', and 'com.bigdata.btree.data' Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedView.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractBufferStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/IJournal.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/BuildResult.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestBTree.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestTransientBTree.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/ReplicatedStore.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/TestReleaseResources.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/SimpleResourceMetadata.java Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -52,7 +52,6 @@ import com.bigdata.btree.IndexMetadata.Options; import com.bigdata.btree.IndexSegment.IndexSegmentTupleCursor; import com.bigdata.btree.data.IAbstractNodeData; -import com.bigdata.btree.data.ILeafData; import com.bigdata.btree.data.INodeData; import com.bigdata.btree.filter.IFilterConstructor; import com.bigdata.btree.filter.Reverserator; @@ -68,7 +67,6 @@ import com.bigdata.cache.HardReferenceQueueWithBatchingUpdates; import com.bigdata.cache.IHardReferenceQueue; import com.bigdata.cache.RingBuffer; -import com.bigdata.cache.IGlobalLRU.ILRUCache; import com.bigdata.counters.CounterSet; import com.bigdata.counters.ICounterSet; import com.bigdata.counters.Instrument; @@ -1357,7 +1355,7 @@ return new IResourceMetadata[] { - store.getResourceMetadata() + new SimpleResourceMetadata(store.getUUID()) }; Added: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/SimpleResourceMetadata.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/SimpleResourceMetadata.java (rev 0) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/SimpleResourceMetadata.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -0,0 +1,104 @@ +/* + * Created by IntelliJ IDEA. + * User: gossard + * Date: Sep 22, 2010 + * Time: 2:40:37 PM + */ +package com.bigdata.btree; + +import com.bigdata.mdi.IResourceMetadata; +import com.bigdata.rawstore.SimpleMemoryRawStore; + +import java.util.UUID; + +/** + * Dumb metadata object, used by a btree to return metadata about rawstores. + * This class was previously an inner-class in {@link com.bigdata.rawstore.SimpleMemoryRawStore SimpleMemoryRawStore}, + * but was moved into the btree package to remove a rawstore dependency on the mdi package. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class SimpleResourceMetadata implements IResourceMetadata { + + /** + * + */ + private static final long serialVersionUID = -8333003625527191826L; + + private final UUID uuid; + + public SimpleResourceMetadata(UUID uuid) { + if (uuid == null) + throw new NullPointerException("uuid cannot be null"); + this.uuid = uuid; + } + + @Override + public int hashCode() { + return uuid.hashCode(); + } + + //from java.lang.Object + public boolean equals(Object obj){ + if (obj instanceof SimpleResourceMetadata){ + SimpleResourceMetadata other = (SimpleResourceMetadata)obj; + return uuid.equals(other.uuid); + } else + return false; + } + + //from com.bigdata.mdi.IResourceMetadata, *NOT* java.lang.Object + public boolean equals(IResourceMetadata o) { + + return this.equals((Object)o); + } + + public long getCreateTime() { + + // does not support commit + return 0L; + + } + + public long getCommitTime() { + + // does not support commit + return 0L; + + } + + public String getFile() { + + // no backing file. + return null; + + } + + public UUID getUUID() { + + return uuid; + + } + + public boolean isIndexSegment() { + + // not index segment. + return false; + + } + + public boolean isJournal() { + + // not journal. + return false; + + } + +// public long size() { +// +// // #of bytes not available. +// return 0L; +// +// } + +} \ No newline at end of file Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/SimpleResourceMetadata.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedView.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedView.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/view/FusedView.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -399,7 +399,11 @@ for(AbstractBTree t : sources) { // for (int i = 0; i < srcs.length; i++) { - resources[i++] = t.getStore().getResourceMetadata(); + IResourceMetadata[] metaAboutBTree = t.getResourceMetadata(); + if (metaAboutBTree.length == 1) + resources[i++] = metaAboutBTree[0]; + else + throw new RuntimeException("BTree had wrong number of metadata items, should have been caught in unit tests."); } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractBufferStrategy.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractBufferStrategy.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractBufferStrategy.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -585,18 +585,6 @@ } /** - * Not supported - this is available on the {@link AbstractJournal}. - * - * @throws UnsupportedOperationException - * always - */ - public IResourceMetadata getResourceMetadata() { - - throw new UnsupportedOperationException(); - - } - - /** * Sets the <code>readOnly</code> flag. * <p> * Note: This method SHOULD be extended to release write caches, etc. Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/IJournal.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/IJournal.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/IJournal.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -26,6 +26,7 @@ import java.util.Properties; import com.bigdata.btree.keys.IKeyBuilderFactory; +import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.IMRMW; /** @@ -55,5 +56,9 @@ * Immediate shutdown. */ public void shutdownNow(); - + + /** + * A description of this store in support of the scale-out architecture. + */ + public IResourceMetadata getResourceMetadata(); } Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/RWStrategy.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -479,10 +479,6 @@ return m_fileMetadata.raf; } - public IResourceMetadata getResourceMetadata() { - // TODO Auto-generated method stub - return null; - } public UUID getUUID() { return m_fileMetadata.rootBlock.getUUID(); Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -400,61 +400,6 @@ } - /** - * Note: Temporary stores do not have persistent resource descriptions. - */ - final public IResourceMetadata getResourceMetadata() { - - final File file = buf.getFile(); - - final String fileStr = file == null ? "" : file.toString(); - - return new ResourceMetadata(this, fileStr); - - } - - /** - * Static class since must be {@link Serializable}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - static final class ResourceMetadata extends AbstractResourceMetadata { - - /** - * De-serializator ctor. - */ - public ResourceMetadata() { - - } - - public ResourceMetadata(final TemporaryRawStore store, - final String fileStr) { - - super(fileStr, // store.buf.getExtent() - store.uuid,// - store.createTime, // - 0L// commitTime - ); - - } - - private static final long serialVersionUID = 1L; - - public boolean isJournal() { - - return false; - - } - - public boolean isIndexSegment() { - - return false; - - } - - } - final public DiskOnlyStrategy getBufferStrategy() { return buf; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -36,7 +36,6 @@ import com.bigdata.counters.CounterSet; import com.bigdata.io.IByteArrayBuffer; import com.bigdata.journal.AbstractJournal; -import com.bigdata.mdi.IResourceMetadata; /** * <p> @@ -242,12 +241,7 @@ * Return the {@link UUID} which identifies this {@link IRawStore}. This * supports {@link #getResourceMetadata()} */ - public UUID getUUID(); - - /** - * A description of this store in support of the scale-out architecture. - */ - public IResourceMetadata getResourceMetadata(); + public UUID getUUID(); /** * True iff backed by stable storage. Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -28,7 +28,6 @@ package com.bigdata.rawstore; import java.io.File; -import java.io.Serializable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -37,8 +36,6 @@ import java.util.concurrent.ExecutorService; import com.bigdata.counters.CounterSet; -import com.bigdata.journal.TemporaryRawStore; -import com.bigdata.mdi.IResourceMetadata; /** * A purely transient append-only implementation useful when data need to be @@ -48,7 +45,7 @@ * implementation does not contain things like {@link ExecutorService}s that * would hang around unless explicitly shutdown. * - * @see {@link TemporaryRawStore}, which provides a more scalable solution for + * @see {@link com.bigdata.journal.TemporaryRawStore TemporaryRawStore}, which provides a more scalable solution for * temporary data. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> @@ -147,91 +144,8 @@ return uuid; } - - public IResourceMetadata getResourceMetadata() { - return new ResourceMetadata(uuid); - - } - /** - * Static class since must be {@link Serializable}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - private static class ResourceMetadata implements IResourceMetadata { - - /** - * - */ - private static final long serialVersionUID = -8333003625527191826L; - - private final UUID uuid; - - public ResourceMetadata(UUID uuid) { - - this.uuid = uuid; - - } - - public boolean equals(IResourceMetadata o) { - - return this == o; - - } - - public long getCreateTime() { - - // does not support commit - return 0L; - - } - - public long getCommitTime() { - - // does not support commit - return 0L; - - } - - public String getFile() { - - // no backing file. - return null; - - } - - public UUID getUUID() { - - return uuid; - - } - - public boolean isIndexSegment() { - - // not index segment. - return false; - - } - - public boolean isJournal() { - - // not journal. - return false; - - } - -// public long size() { -// -// // #of bytes not available. -// return 0L; -// -// } - - } - - /** * This always returns <code>null</code>. */ public File getFile() { Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/BuildResult.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/BuildResult.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/BuildResult.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -126,9 +126,11 @@ this.sources = new IResourceMetadata[sourceCount]; for (int i = 0; i < sourceCount; i++) { - - this.sources[i] = sources[i].getStore().getResourceMetadata(); - + IResourceMetadata[] metaAboutBTree = sources[i].getResourceMetadata(); + if (metaAboutBTree.length == 1) + this.sources[i] = metaAboutBTree[0]; + else + throw new RuntimeException("BTree had wrong number of metadata items, should have been caught in unit tests."); } this.segmentMetadata = segmentMetadata; Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestBTree.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestBTree.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestBTree.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -23,6 +23,8 @@ */ package com.bigdata.btree; +import com.bigdata.mdi.IResourceMetadata; + /** * Stress tests for basic tree operations (insert, lookup, and remove) without * causing node or leaf evictions (IO is disabled). @@ -198,6 +200,20 @@ } + public void test_verify_getResourceMetadata(){ + //transient requirements on getResourceMetadata() are verified in TestTrasientBTree. + + BTree tree = getBTree(3);//branching doesn't matter. + assertNotNull("didn't expect btree.store to be null in this test, transient tests are in TestTransientBTree",tree.store); + + IResourceMetadata[] metaList = tree.getResourceMetadata(); + + assertNotNull("cannot return null",metaList); + assertEquals("must return only one item", 1, metaList.length); + assertNotNull("item cannot be null",metaList[0]); + assertEquals("item uuid should match store uuid ",tree.getStore().getUUID(),metaList[0].getUUID()); + } + // /** // * The branching factors that will be used in the stress tests. The larger // * the branching factor, the longer the run for these tests. The very small Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestTransientBTree.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestTransientBTree.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestTransientBTree.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -36,6 +36,7 @@ import com.bigdata.btree.AbstractBTree.HardReference; import com.bigdata.btree.keys.TestKeyBuilder; +import com.bigdata.mdi.IResourceMetadata; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.SimpleMemoryRawStore; @@ -443,7 +444,7 @@ * Loop until GC activity has caused references to be cleared. */ final int limit = 100; - for (int x = 0; x < limit; x++) { + for (int x = 0; x < limit; x++) { System.gc(); @@ -473,4 +474,15 @@ } + public void test_verify_getResourceMetadata(){ + //non-transient requirements on getResourceMetadata() are verified in TestBTree. + BTree tree = BTree.createTransient(new IndexMetadata(UUID.randomUUID())); + + IResourceMetadata[] metaList = tree.getResourceMetadata(); + + assertNotNull("cannot return null",metaList); + assertEquals("must return only one item", 1, metaList.length); + assertNotNull("item cannot be null",metaList[0]); + } + } Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/ReplicatedStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/ReplicatedStore.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/ReplicatedStore.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -576,11 +576,7 @@ public UUID getUUID() { return localStore.getUUID(); - } - - public IResourceMetadata getResourceMetadata() { - return localStore.getResourceMetadata(); - } + } // public void packAddr(DataOutput out, long addr) throws IOException { // localStore.packAddr(out, addr); Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -138,97 +138,9 @@ return uuid; - } - - public IResourceMetadata getResourceMetadata() { + } - return new ResourceMetadata(uuid, file); - - } - /** - * Static class since must be {@link Serializable}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - */ - private static final class ResourceMetadata implements IResourceMetadata { - - /** - * - */ - private static final long serialVersionUID = -419665851049132640L; - - private final UUID uuid; - private final String fileStr; - -// private final long length; - - public ResourceMetadata(final UUID uuid, final File file) { - - this.uuid = uuid; - - this.fileStr = file.toString(); - -// this.length = file.length(); - - } - - public boolean equals(IResourceMetadata o) { - - return this == o; - - } - - public long getCreateTime() { - - // does not support commit - return 0L; - - } - - public long getCommitTime() { - - // does not support commit - return 0L; - - } - - public String getFile() { - - return fileStr; - - } - - public UUID getUUID() { - - return uuid; - - } - - public boolean isIndexSegment() { - - // not index segment. - return false; - - } - - public boolean isJournal() { - - // not journal. - return false; - - } - -// public long size() { -// -// return length; -// -// } - - } - - /** * This also releases the lock if any obtained by the constructor. */ public void close() { Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/AbstractResourceManagerTestCase.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -48,13 +48,8 @@ import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.btree.proc.IIndexProcedure; import com.bigdata.counters.CounterSet; -import com.bigdata.journal.AbstractLocalTransactionManager; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.ConcurrencyManager; -import com.bigdata.journal.IResourceLockService; +import com.bigdata.journal.*; //BTM import com.bigdata.journal.ITransactionService; -import com.bigdata.journal.RegisterIndexTask; -import com.bigdata.journal.TemporaryStore; import com.bigdata.mdi.IMetadataIndex; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.mdi.IndexPartitionCause; @@ -79,7 +74,6 @@ import com.bigdata.util.httpd.AbstractHTTPD; //BTM -import com.bigdata.journal.TransactionService; import com.bigdata.service.IServiceShutdown; import com.bigdata.service.LoadBalancer; import com.bigdata.service.Service; @@ -692,7 +686,7 @@ * @param expected * @param actual */ - protected void assertSameResources(IRawStore[] expected, Set<UUID> actual) { + protected void assertSameResources(IJournal[] expected, Set<UUID> actual) { if(log.isInfoEnabled()) { Modified: branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/TestReleaseResources.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/TestReleaseResources.java 2010-09-22 20:21:19 UTC (rev 3611) +++ branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/resources/TestReleaseResources.java 2010-09-22 21:39:51 UTC (rev 3612) @@ -35,6 +35,7 @@ import java.util.concurrent.ExecutionException; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IJournal; import com.bigdata.rawstore.IRawStore; import com.bigdata.service.AbstractTransactionService; @@ -262,7 +263,7 @@ final Set<UUID> actual = resourceManager.getResourcesForTimestamp(commitTime); - assertSameResources(new IRawStore[] { j0, j1 }, actual); + assertSameResources(new IJournal[] { j0, j1 }, actual); } @@ -278,7 +279,7 @@ final Set<UUID> actual = resourceManager .getResourcesForTimestamp(commitTime); - assertSameResources(new IRawStore[] { j1 }, actual); + assertSameResources(new IJournal[] { j1 }, actual); } @@ -434,7 +435,7 @@ System.err.println("resources="+actualResourceUUIDs); - assertSameResources(new IRawStore[] { j1 }, // + assertSameResources(new IJournal[] { j1 }, // actualResourceUUIDs); } @@ -554,7 +555,7 @@ * Verify that the resources required for [A] after overflow are * exactly [j1]. */ - assertSameResources(new IRawStore[] { j1 }, // + assertSameResources(new IJournal[] { j1 }, // resourceManager.getResourcesForTimestamp(j1.getRootBlockView() .getFirstCommitTime())); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-22 20:21:25
|
Revision: 3611 http://bigdata.svn.sourceforge.net/bigdata/?rev=3611&view=rev Author: thompsonbry Date: 2010-09-22 20:21:19 +0000 (Wed, 22 Sep 2010) Log Message: ----------- Changes to BOpContext. I am committing these so MikeP can pull out a base class with getRelation(), getAccessPath(), getIndexManager(), and getExecutorService(). Modified Paths: -------------- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java Modified: branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-22 18:51:04 UTC (rev 3610) +++ branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/BOpContext.java 2010-09-22 20:21:19 UTC (rev 3611) @@ -27,14 +27,15 @@ */ package com.bigdata.bop; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; + import org.apache.log4j.Logger; import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IChunkMessage; import com.bigdata.bop.engine.IRunningQuery; -import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.engine.RunningQuery; -import com.bigdata.bop.solutions.SliceOp; import com.bigdata.btree.IIndex; import com.bigdata.btree.ILocalBTreeView; import com.bigdata.btree.IRangeQuery; @@ -115,6 +116,16 @@ return runningQuery.getIndexManager(); } + /** + * Return the {@link Executor} on to which the operator may submit tasks. + * <p> + * Note: The is the {@link ExecutorService} associated with the + * <em>local</em> {@link #getIndexManager() index manager}. + */ + public final Executor getExecutorService() { + return runningQuery.getIndexManager().getExecutorService(); + } + // /** // * The timestamp or transaction identifier against which the query is // * reading. @@ -386,7 +397,7 @@ if (predicate == null) throw new IllegalArgumentException(); - + // FIXME This should be as assigned by the query planner so the query is fully declarative. final IKeyOrder keyOrder = relation.getKeyOrder((IPredicate) predicate); if (keyOrder == null) @@ -617,7 +628,7 @@ * * @return <code>true</code> iff the constraints are satisfied. */ - private boolean isConsistent(final IConstraint[] constraints, + public boolean isConsistent(final IConstraint[] constraints, final IBindingSet bindingSet) { for (int i = 0; i < constraints.length; i++) { @@ -652,25 +663,27 @@ } - /** - * Cancel the running query (normal termination). - * <p> - * Note: This method provides a means for an operator to indicate that the - * query should halt immediately. It used used by {@link SliceOp}, which - * needs to terminate the entire query once the slice has been satisfied. - * (If {@link SliceOp} just jumped out of its own evaluation loop then the - * query would not produce more results, but it would continue to run and - * the over produced results would just be thrown away.) - * <p> - * Note: When an individual {@link BOp} evaluation throws an exception, the - * {@link QueryEngine} will catch that exception and halt query evaluation - * with that thrown cause. - */ - public void halt() { - - runningQuery.halt(); - - } +// /** +// * Cancel the running query (normal termination). +// * <p> +// * Note: This method provides a means for an operator to indicate that the +// * query should halt immediately. It used used by {@link SliceOp}, which +// * needs to terminate the entire query once the slice has been satisfied. +// * (If {@link SliceOp} just jumped out of its own evaluation loop then the +// * query would not produce more results, but it would continue to run and +// * the over produced results would just be thrown away.) +// * <p> +// * Note: When an individual {@link BOp} evaluation throws an exception, the +// * {@link QueryEngine} will catch that exception and halt query evaluation +// * with that thrown cause. +// * +// * @see IRunningQuery#halt() +// */ +// public void halt() { +// +// runningQuery.halt(); +// +// } /* * I've replaced this with AbstractSplitter for the moment. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-22 18:51:10
|
Revision: 3610 http://bigdata.svn.sourceforge.net/bigdata/?rev=3610&view=rev Author: blevine218 Date: 2010-09-22 18:51:04 +0000 (Wed, 22 Sep 2010) Log Message: ----------- Remove our Assert subclass and all references to it. Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestJiniCoreServicesProcessHelper.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationZNodeEnum.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarter.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java Removed Paths: ------------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -39,15 +39,17 @@ import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.data.ACL; +import org.junit.Assert; + import com.bigdata.jini.start.config.ZookeeperClientConfig; import com.bigdata.jini.start.process.ProcessHelper; import com.bigdata.jini.start.process.ZookeeperProcessHelper; import com.bigdata.resources.ResourceFileFilter; import com.bigdata.service.jini.JiniClient; import com.bigdata.service.jini.JiniFederation; -import com.bigdata.test.util.Assert; + /** * Abstract base class for unit tests requiring a running zookeeper and a * running federation as configured from a test resource. Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestJiniCoreServicesProcessHelper.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestJiniCoreServicesProcessHelper.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestJiniCoreServicesProcessHelper.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -31,6 +31,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -50,8 +51,8 @@ import com.bigdata.jini.util.ConfigMath; import com.bigdata.service.jini.JiniClientConfig; import com.bigdata.service.jini.util.JiniServicesHelper; -import com.bigdata.test.util.Assert; + /** * Test suite for the {@link JiniCoreServicesProcessHelper} * Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -36,7 +36,9 @@ import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooKeeper; + import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -46,8 +48,8 @@ import com.bigdata.jini.start.config.ServiceConfiguration; import com.bigdata.jini.start.config.TransactionServerConfiguration; import com.bigdata.service.jini.TransactionServer; -import com.bigdata.test.util.Assert; + /** * Test suite for monitoring state changes for a {@link ServiceConfiguration} * and creating a new physical service instance. Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationZNodeEnum.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationZNodeEnum.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationZNodeEnum.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -27,12 +27,13 @@ package com.bigdata.jini.start; +import org.junit.Assert; import org.junit.Test; import com.bigdata.service.jini.TransactionServer; -import com.bigdata.test.util.Assert; + /** * Test suite for {@link ServiceConfigurationZNodeEnum}. * Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarter.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarter.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarter.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -42,7 +42,9 @@ import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; + import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -57,7 +59,6 @@ import com.bigdata.service.Service; import com.bigdata.service.jini.RemoteDestroyAdmin; import com.bigdata.service.jini.TransactionServer; -import com.bigdata.test.util.Assert; import com.bigdata.zookeeper.ZNodeDeletedWatcher; import com.bigdata.zookeeper.ZooHelper; Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/AbstractServerTestCase.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -36,6 +36,8 @@ import net.jini.core.lookup.ServiceRegistrar; import net.jini.core.lookup.ServiceTemplate; +import org.junit.Assert; + import com.bigdata.journal.ITx; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.mdi.LocalPartitionMetadata; @@ -43,7 +45,6 @@ import com.bigdata.service.DataService; import com.bigdata.service.IDataService; import com.bigdata.service.MetadataService; -import com.bigdata.test.util.Assert; import com.sun.jini.tool.ClassServer; import com.bigdata.util.config.NicUtil; Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/service/jini/TestBigdataClient.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -32,6 +32,7 @@ import java.util.UUID; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -46,7 +47,6 @@ import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; import com.bigdata.service.jini.util.JiniServicesHelper; -import com.bigdata.test.util.Assert; import com.bigdata.test.util.Util; /** @@ -198,8 +198,8 @@ while (itr.hasNext()) { final ITuple<?> tuple = itr.next(); - Assert.assertEquals(keys[i], tuple.getKey()); - Assert.assertEquals(vals[i], tuple.getValue()); + Assert.assertArrayEquals(keys[i], tuple.getKey()); + Assert.assertArrayEquals(vals[i], tuple.getValue()); i++; } Deleted: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java 2010-09-22 18:23:59 UTC (rev 3609) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/test/util/Assert.java 2010-09-22 18:51:04 UTC (rev 3610) @@ -1,82 +0,0 @@ -/** - -Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.test.util; - - -/** - * Utility class that extends JUnit's Assert class with additional utilities. - * - * @author blevine - * - */ -public class Assert extends org.junit.Assert -{ - public static void assertEquals(byte[] expected, byte[] actual) - { - assertEquals(null, expected, actual); - } - public static void assertEquals(String message, byte[] expected, byte[] actual) - { - if (expected == null && actual == null) - { - return; - } - - if ( (expected == null) && (actual != null) ) - { - assertNull(message, actual); - } - - if ( (expected != null) && (actual == null) ) - { - assertNotNull(message, actual); - } - - if (expected.length != actual.length) - { - String msg = "(array lengths do not match)."; - - if (message != null) - { - msg = message + " " + msg; - } - - fail(msg); - } - - for (int i = 0; i < expected.length; i++) - { - if (expected[i] != actual[i]) - { - String msg = "(index = i)."; - - if (message != null) - { - msg = message + " " + msg; - } - assertEquals(msg, expected[i], actual[i]); - } - } - } -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2010-09-22 18:24:05
|
Revision: 3609 http://bigdata.svn.sourceforge.net/bigdata/?rev=3609&view=rev Author: thompsonbry Date: 2010-09-22 18:23:59 +0000 (Wed, 22 Sep 2010) Log Message: ----------- Branch created for https://sourceforge.net/apps/trac/bigdata/ticket/166. Implement a feature for reporting the change set via the BigdataSail. The implementation will report on the delta in the statements materialized in database between the previous commit point and the current commit point. This report will capture, as of the new commit point: (1) all given statements written onto the database, if they were not previously present in the database, and all inferences materialized as a result of the statements asserted, if they were not already present in the database; and (2) all given statements which were retracted from the database, if they can no longer be proven based on the remaining statements, and all materialized inferences which are no longer proven, given the statements which were retracted, and which were therefore retracted from the database. In order to signal the intention of the application, it might be best to create a second "commit()" method, e.g., named "commitAndReportChangeSet()", which provides the additional reporting mechanisms. Added Paths: ----------- branches/CHANGE_SET_BRANCH/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <res...@us...> - 2010-09-21 22:19:03
|
Revision: 3608 http://bigdata.svn.sourceforge.net/bigdata/?rev=3608&view=rev Author: resendes Date: 2010-09-21 22:18:53 +0000 (Tue, 21 Sep 2010) Log Message: ----------- Merge from maven_scaleout branch. Modified Paths: -------------- branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/build.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/build.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/build.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/build.properties branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties branches/bbb_cleanup/bigdata-core/pom.xml branches/bbb_cleanup/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/Banner.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/btree/IndexMetadata.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentStore.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/btree/Node.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/BCHMGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/BCHMGlobalLRU2.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRURecycler.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRURecyclerExplicitDeleteRequired.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/IGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/StoreAndAddressLRUCache.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/WeakReferenceGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/AbstractProcessCollector.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/httpd/DummyEventReportingService.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/linux/StatisticsCollectorForLinux.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/linux/SysstatUtil.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/win/StatisticsCollectorForWindows.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/journal/DiskOnlyStrategy.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/metadata/EmbeddedShardLocator.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rawstore/AbstractRawStore.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/resources/StoreManager.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/AbstractFederation.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/AbstractService.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/DataService.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/DefaultClientDelegate.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/EmbeddedFederation.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/Event.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/IBigdataClient.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/LoadBalancerService.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/service/jini/AbstractServer.java branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/cache/StressTestGlobalLRU.xml branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/test.xml branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/AbstractIndexSegmentTestCase.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestAll_IndexSegment.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithCompactingMerge.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithIncrementalBuild.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithLargeTrees.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentWithBloomFilter.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestNullValues.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/StressTestGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithStripedLocks.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithStripedLocksAndLIRS.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithThreadLocalBuffers.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithThreadLocalBuffersAndLIRS.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRUWithLIRS.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/jini/start/config/TestServiceConfiguration.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/journal/AbstractRestartSafeTestCase.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/rdf/store/AbstractTestCase.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/service/AbstractEmbeddedFederationTestCase.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/service/StressTestConcurrent.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/service/TestMove.java branches/bbb_cleanup/bigdata-integ/pom.xml branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/JiniStartSuite.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/TestJiniCoreServicesProcessHelper.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoring.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceConfigurationMonitoringRemote.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarter.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/TestServiceStarterRemote.java branches/bbb_cleanup/pom.xml Added Paths: ----------- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/cache/LRUNexus.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/config/JiniStartConfigSuite.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/config/TestServiceConfiguration.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/config/TestServiceConfigurationRemote.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/jini/start/config/TestZookeeperServerEntry.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/ branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooBarrier.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooElection.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooQueue.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZookeeperAccessor.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/ZookeeperSuite.java Removed Paths: ------------- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/LRUNexus.java branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderCacheInteraction.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooBarrier.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooElection.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooQueue.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZookeeperAccessor.java branches/bbb_cleanup/bigdata-integ/src/test/java/com/bigdata/zookeeper/ZookeeperSuite.java Property Changed: ---------------- branches/bbb_cleanup/ branches/bbb_cleanup/bigdata-core/ branches/bbb_cleanup/bigdata-core/bigdata-perf/ branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/src/resources/ branches/bbb_cleanup/bigdata-core/dsi-utils/LEGAL/ branches/bbb_cleanup/bigdata-core/dsi-utils/lib/ branches/bbb_cleanup/bigdata-core/dsi-utils/src/ branches/bbb_cleanup/bigdata-core/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/bbb_cleanup/bigdata-core/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/bbb_cleanup/bigdata-core/osgi/ branches/bbb_cleanup/bigdata-core/src/main/deploy/bin/ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/boot/ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging/ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties branches/bbb_cleanup/bigdata-core/src/main/java/ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/attr/ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/disco/ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rdf/sail/bench/ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/rdf/util/ branches/bbb_cleanup/bigdata-core/src/samples-sail/com/bigdata/samples/fastload.properties branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/conf/bigdataStandaloneTesting.config branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/config/testfed.config branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/jini/start/testfed.config branches/bbb_cleanup/bigdata-core/src/test/deploy/testing/data/com/bigdata/service/jini/master/TestMappedRDFDataLoadMaster.config branches/bbb_cleanup/bigdata-core/src/test/java/ Property changes on: branches/bbb_cleanup ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463,3469-3470 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438 /trunk:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-3440,3443,3463,3469-3470 /branches/fko:3150-3194 /branches/maven_scaleout:3379-3438,3588-3607 /trunk:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core ___________________________________________________________________ Modified: svn:mergeinfo - /trunk:3499 + /branches/maven_scaleout/bigdata-core:3588-3607 /trunk:3499 Property changes on: branches/bbb_cleanup/bigdata-core/bigdata-perf ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/bigdata-perf:3379-3541 + /branches/maven_scaleout/bigdata-core/bigdata-perf:3588-3607 /trunk/bigdata-perf:3379-3541 Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/build.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/build.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/build.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -182,13 +182,13 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # -## -Dcom.bigdata.LRUNexus.percentHeap=.1 +## -Dcom.bigdata.cache.LRUNexus.percentHeap=.1 # all jvm args for query. queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/build.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/build.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/build.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -61,11 +61,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/build.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/build.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/build.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -129,11 +129,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${lubm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties Property changes on: branches/bbb_cleanup/bigdata-core/bigdata-perf/lubm/src/resources ___________________________________________________________________ Modified: svn:mergeinfo - /branches/dev-btm/bigdata-perf/lubm/src/resources:2574-3440 /trunk/bigdata-perf/lubm/src/resources:3379-3541 + /branches/dev-btm/bigdata-perf/lubm/src/resources:2574-3440 /branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/src/resources:3588-3607 /trunk/bigdata-perf/lubm/src/resources:3379-3541 Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/build.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/build.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/build.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -60,11 +60,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Modified: branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties =================================================================== --- branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties 2010-09-21 22:18:53 UTC (rev 3608) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Property changes on: branches/bbb_cleanup/bigdata-core/dsi-utils/LEGAL ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/LEGAL:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/dsi-utils/LEGAL:3588-3607 /trunk/dsi-utils/LEGAL:3379-3430,3499 Property changes on: branches/bbb_cleanup/bigdata-core/dsi-utils/lib ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/lib:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/dsi-utils/lib:3588-3607 /trunk/dsi-utils/lib:3379-3430,3499 Property changes on: branches/bbb_cleanup/bigdata-core/dsi-utils/src ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/dsi-utils/src:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/dsi-utils/src:3588-3607 /trunk/dsi-utils/src:3379-3430,3499 Property changes on: branches/bbb_cleanup/bigdata-core/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom:3588-3607 /trunk/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 Property changes on: branches/bbb_cleanup/bigdata-core/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom:3588-3607 /trunk/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom:3379-3430,3499 Property changes on: branches/bbb_cleanup/bigdata-core/osgi ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/osgi:3379-3430,3499 + /branches/maven_scaleout/bigdata-core/osgi:3588-3607 /trunk/osgi:3379-3430,3499 Modified: branches/bbb_cleanup/bigdata-core/pom.xml =================================================================== --- branches/bbb_cleanup/bigdata-core/pom.xml 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/pom.xml 2010-09-21 22:18:53 UTC (rev 3608) @@ -1,5 +1,4 @@ -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.bigdata</groupId> @@ -28,10 +27,8 @@ <configuration> <compilerArguments> <!-- - Apparently Javac may compile java source files inside jars put on the classpath. Weird. - Zookeeper 3.2.1 jar contained classes and sources, and under some circumstances, - the java files were getting recompiled and put into the bigdata jar. This setting - forces javac to only look for source in the current maven source directory. + Apparently Javac may compile java source files inside jars put on the classpath. Weird. Zookeeper 3.2.1 jar contained classes and sources, and under some circumstances, the + java files were getting recompiled and put into the bigdata jar. This setting forces javac to only look for source in the current maven source directory. --> <sourcepath>${project.build.sourceDirectory}</sourcepath> </compilerArguments> @@ -89,12 +86,12 @@ <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <configuration> - <!-- TODO: BLECH! All the tests are excluded from the regular unit test phase. - TODO: Need to split out the unit tests and the heavier integration tests, plus - TODO: get all the unit tests passing so regressions can properly fail the build. + <!-- + TODO: BLECH! All the tests are excluded from the regular unit test phase. TODO: Need to split out the unit tests and the heavier integration tests, plus TODO: get all the unit + tests passing so regressions can properly fail the build. --> <testFailureIgnore>true</testFailureIgnore> - <includes/> + <includes /> <excludes> <exclude>**/*</exclude> </excludes> @@ -102,8 +99,8 @@ </plugin> <plugin> - <!-- These are where the heavier tests can be run. Right now failsafe looks for tests starting or ending - with IT, aka FooIT.java or ITFoo.java, which don't exist yet, so nothing runs. + <!-- + These are where the heavier tests can be run. Right now failsafe looks for tests starting or ending with IT, aka FooIT.java or ITFoo.java, which don't exist yet, so nothing runs. --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-failsafe-plugin</artifactId> @@ -201,14 +198,14 @@ <dependencies> - + <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> <!-- TODO: look at maven-bundle-plugin from felix to provide osgi support. bndlib version 0.0.357 in central. --> - + <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>cweb-extser</artifactId> @@ -230,7 +227,7 @@ <groupId>${thirdParty.groupId}</groupId> <!-- TODO: An older version (5.0.9) is available in central. --> <artifactId>unimi-fastutil</artifactId> <version>5.1.5</version> - </dependency> + </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>lgpl-utils</artifactId> @@ -246,22 +243,22 @@ <artifactId>dsi-utils</artifactId> <version>1.0.6-020610</version> </dependency> - <dependency> + <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>high-scale-lib</artifactId> <version>1.1.2</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> + <groupId>${thirdParty.groupId}</groupId> <artifactId>iris</artifactId> <version>0.58</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> + <groupId>${thirdParty.groupId}</groupId> <artifactId>nxparser</artifactId> <version>6-22-2010</version> </dependency> - + <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>zookeeper</artifactId> @@ -312,7 +309,7 @@ <dependency> <groupId>org.apache.river</groupId> <artifactId>browser</artifactId> - <version>2.1</version> + <version>2.1</version> </dependency> <!-- Note that these are dl jars, so they are provided and have a dl classifier. --> @@ -397,10 +394,9 @@ <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.15</version> - <!-- These exclusions are to address the fact that 1.2.15 added new features that depends on Sun specific - jars, but these jars cannot be made available due to Sun's click-through requirement on them. - We aren't using the new features anyway, so they are safe to exclude. log4j should have made these - optional in their POM. + <!-- + These exclusions are to address the fact that 1.2.15 added new features that depends on Sun specific jars, but these jars cannot be made available due to Sun's click-through + requirement on them. We aren't using the new features anyway, so they are safe to exclude. log4j should have made these optional in their POM. --> <exclusions> <exclusion> @@ -551,10 +547,37 @@ <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>3.1</version> - </dependency> - + </dependency> + </dependencies> + <reporting> + <plugins> + + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-report-plugin</artifactId> + <version>2.5</version> + </plugin> + + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-checkstyle-plugin</artifactId> + <version>2.5</version> + <configuration> + <configLocation>${basedir}/src/main/config/checkstyle.xml</configLocation> + </configuration> + </plugin> + + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>findbugs-maven-plugin</artifactId> + <version>2.3</version> + </plugin> + + </plugins> + </reporting> + <profiles> <profile> <id>bigdata-clover</id> @@ -579,7 +602,7 @@ </configuration> </plugin> </plugins> - </build> + </build> </profile> </profiles> Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/bin ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/bin:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/bin:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/bin:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/bin:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/bin:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/bin:3463 /branches/fko/bigdata-core/src/main/deploy/bin:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/bin:3588-3607 /trunk/bigdata-core/src/main/deploy/bin:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Modified: branches/bbb_cleanup/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh 2010-09-21 22:18:53 UTC (rev 3608) @@ -4,11 +4,11 @@ # # usage: [interval [count]] # -# See com.bigdata.counters.AbstractStatisticsCollector#main(String[]) +# See com.bigdata.counters.httpd.AbstractStatisticsCollector#main(String[]) source `dirname $0`/bigdataenv java ${JAVA_OPTS} \ -cp ${CLASSPATH} \ - com.bigdata.counters.AbstractStatisticsCollector \ + com.bigdata.counters.httpd.AbstractStatisticsCollector \ $* Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini ___________________________________________________________________ Modified: svn:mergeinfo - /trunk/src/main/deploy/var/config/jini:3499 /trunk/src/resources/config:3516-3528 + /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini:3588-3607 /trunk/src/main/deploy/var/config/jini:3499 /trunk/src/resources/config:3516-3528 Modified: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-21 22:18:53 UTC (rev 3608) @@ -629,15 +629,15 @@ "jini", "org.apache.zookeeper.server.quorum.QuorumPeerMain", -//BTM "com.bigdata.service.jini.TransactionServer", -//BTM "com.bigdata.service.jini.MetadataServer", + "com.bigdata.service.jini.TransactionServer", + "com.bigdata.service.jini.MetadataServer", "com.bigdata.service.jini.DataServer", -//BTM "com.bigdata.service.jini.LoadBalancerServer", + "com.bigdata.service.jini.LoadBalancerServer", "com.bigdata.service.jini.ClientServer", -"com.bigdata.transaction.ServiceImpl", -"com.bigdata.metadata.ServiceImpl", -"com.bigdata.loadbalancer.ServiceImpl" +//BTM "com.bigdata.transaction.ServiceImpl", +//BTM "com.bigdata.metadata.ServiceImpl", +//BTM "com.bigdata.loadbalancer.ServiceImpl" }; Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/src/resources/config/bigdataCluster.config:3516-3528 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/src/resources/config/bigdataCluster.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/src/resources/config/bigdataCluster.config:3516-3528 Modified: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-21 22:18:53 UTC (rev 3608) @@ -322,15 +322,15 @@ /* * Override the LRU buffer capacity. * - * See com.bigdata.LRUNexus.Options for configuration info. Note that if + * See com.bigdata.cache.LRUNexus.Options for configuration info. Note that if * you disable the LRUNexus you will loose the leaf cache for the index * segments, which is a big penalty. - //"-Dcom.bigdata.LRUNexus.enabled=false", + //"-Dcom.bigdata.cache.LRUNexus.enabled=false", // option may be used to select the higher throughput impl. - "-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired", + "-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired", // option may be used to allocate more heap to the LRUNexus. - "-Dcom.bigdata.LRUNexus.percentHeap=.2", - "-Dcom.bigdata.LRUNexus.indexSegmentBuildPopulatesCache=true", // default true + "-Dcom.bigdata.cache.LRUNexus.percentHeap=.2", + "-Dcom.bigdata.cache.LRUNexus.indexSegmentBuildPopulatesCache=true", // default true */ }; Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/boot ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/boot:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/boot/config:3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/boot:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3438 /trunk/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/boot:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/boot:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/boot:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/boot/config:3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/boot:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3438,3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/jini/boot:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2594-3237 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/shardlocator.config:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/jini/shardlocator.config:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463,3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging:2594-3237 /branches/dev-btm/bigdata/src/resources/logging:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging:2574-3440,3443,3463,3469-3470 /branches/fko/bigdata-core/src/main/deploy/var/config/logging:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging:3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/logging:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/shardlocator-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/metadata/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/logging/shardlocator-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2594-3237 /branches/dev-btm/bigdata/src/resources/logging/transaction-logging.properties:3463 /branches/dev-btm/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java/com/bigdata/transaction/config/logging.properties:3463 /branches/fko/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3588-3607 /trunk/bigdata-core/src/main/deploy/var/config/logging/transaction-logging.properties:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 Property changes on: branches/bbb_cleanup/bigdata-core/src/main/java ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java:3463,3469-3470 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /trunk/bigdata/src/java:3507 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/bigdata-rdf/src/java:3542 + /branches/BTREE_BUFFER_BRANCH/bigdata-core/src/main/java:2004-2045 /branches/DEV_BRANCH_27_OCT_2009/bigdata-core/src/main/java:2270-2546,2548-2782 /branches/LEXICON_REFACTOR_BRANCH/bigdata-core/src/main/java:2633-3304 /branches/bugfix-btm/bigdata-core/src/main/java:2594-3237 /branches/dev-btm/bigdata/src/java:3463 /branches/dev-btm/bigdata-core/src/main/java:2574-3440,3443,3463,3469-3470 /branches/dev-btm/bigdata-jini/src/java:3463,3469-3470 /branches/dev-btm/bigdata-rdf/src/java:3463 /branches/dev-btm/bigdata-sails/src/java:3463 /branches/fko/bigdata-core/src/main/java:3150-3194 /branches/maven_scaleout/bigdata-core/src/main/java:3588-3607 /trunk/bigdata/src/java:3507 /trunk/bigdata-core/src/main/java:3379-3430,3432-3460,3476-3499,3503,3507,3516-3528,3542 /trunk/bigdata-rdf/src/java:3542 Modified: branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/Banner.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/Banner.java 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/Banner.java 2010-09-21 22:18:53 UTC (rev 3608) @@ -31,12 +31,12 @@ import java.lang.reflect.Method; import java.util.Date; +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.NicUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.system.SystemUtil; -import com.bigdata.counters.AbstractStatisticsCollector; - /** * Class has a static method which writes a copyright banner on stdout once per * JVM. This method is invoked from several core classes in order to ensure that @@ -48,6 +48,17 @@ public class Banner { private static boolean didBanner; + private static final String HOSTNAME; + static { + String val; + try { + val = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); + } catch(Throwable t) {//for now, maintain same failure logic as used previously + t.printStackTrace(); + val = NicUtil.getIpAddressByLocalHost(); + } + HOSTNAME = val; + } /** * Environment variables understood by the {@link Banner} class. @@ -153,7 +164,7 @@ "\n"+// "\nCopyright SYSTAP, LLC 2006-2010. All rights reserved."+// "\n"+// - "\n"+AbstractStatisticsCollector.fullyQualifiedHostName+// + "\n"+HOSTNAME+// "\n"+new Date()+// "\n"+SystemUtil.operatingSystem() + "/" + SystemUtil.osVersion() + " " + SystemUtil.architecture() + // Modified: branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java 2010-09-21 22:18:53 UTC (rev 3608) @@ -27,7 +27,6 @@ package com.bigdata; -import com.bigdata.jini.start.process.ProcessHelper; /** * A class for those few statics that it makes sense to reference from other @@ -50,9 +49,9 @@ * {@link System#out} when that child process is executed. This makes it * easy to track down why a child process dies during service start. If you * want to see more output from the child process, then you should set the - * log level for the {@link ProcessHelper} class to INFO. + * log level for the {@link com.bigdata.jini.start.process.ProcessHelper} class to INFO. * - * @see ProcessHelper + * @see com.bigdata.jini.start.process.ProcessHelper */ public static int echoProcessStartupLineCount = 20; Deleted: branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/LRUNexus.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-21 20:49:50 UTC (rev 3607) +++ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-21 22:18:53 UTC (rev 3608) @@ -1,950 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Sep 8, 2009 - */ - -package com.bigdata; - -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryPoolMXBean; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.Properties; - -import org.apache.log4j.Logger; - -import com.bigdata.io.BytesUtil; -import com.bigdata.btree.IndexMetadata; -import com.bigdata.btree.IndexSegment; -import com.bigdata.btree.IndexSegmentBuilder; -import com.bigdata.cache.BCHMGlobalLRU; -import com.bigdata.cache.BCHMGlobalLRU2; -import com.bigdata.cache.HardReferenceGlobalLRU; -import com.bigdata.cache.HardReferenceGlobalLRURecycler; -import com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired; -import com.bigdata.cache.IGlobalLRU; -import com.bigdata.cache.WeakReferenceGlobalLRU; -import com.bigdata.cache.IGlobalLRU.ILRUCache; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IBufferStrategy; -import com.bigdata.journal.TemporaryRawStore; -import com.bigdata.rawstore.AbstractRawStore; -import com.bigdata.rawstore.Bytes; -import com.bigdata.rawstore.IAddressManager; -import com.bigdata.rawstore.IRawStore; -import com.bigdata.rawstore.WormAddressManager; - -/** - * Static singleton factory used to configure the record level cache behavior - * for bigdata within the current JVM. The configuration is specified using - * system properties defined by {@link Options}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * ... [truncated message content] |
From: <sgo...@us...> - 2010-09-21 20:49:58
|
Revision: 3607 http://bigdata.svn.sourceforge.net/bigdata/?rev=3607&view=rev Author: sgossard Date: 2010-09-21 20:49:50 +0000 (Tue, 21 Sep 2010) Log Message: ----------- [maven_scaleout] : Moved AbstractStatisticsCollector into 'com.bigdata.counters.httpd' package to break dependency cycles with 'com.bigdata.counters' package. This has also broken out a few other packages that had transitive cycles, notably 'com.bigdata.io' and 'com.bigdata.util.concurrent'. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractProcessCollector.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/DummyEventReportingService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/StatisticsCollectorForLinux.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/SysstatUtil.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/win/StatisticsCollectorForWindows.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/DiskOnlyStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/metadata/EmbeddedShardLocator.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/AbstractService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DataService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/DefaultClientDelegate.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/EmbeddedFederation.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/Event.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/IBigdataClient.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/LoadBalancerService.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/service/jini/AbstractServer.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/StressTestConcurrent.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/TestMove.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/legacy/scripts/testStatisticsCollector.sh 2010-09-21 20:49:50 UTC (rev 3607) @@ -4,11 +4,11 @@ # # usage: [interval [count]] # -# See com.bigdata.counters.AbstractStatisticsCollector#main(String[]) +# See com.bigdata.counters.httpd.AbstractStatisticsCollector#main(String[]) source `dirname $0`/bigdataenv java ${JAVA_OPTS} \ -cp ${CLASSPATH} \ - com.bigdata.counters.AbstractStatisticsCollector \ + com.bigdata.counters.httpd.AbstractStatisticsCollector \ $* Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractProcessCollector.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractProcessCollector.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractProcessCollector.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -78,8 +78,8 @@ /** * Creates the {@link ActiveProcess} and the - * {@link ActiveProcess#start(com.bigdata.counters.AbstractStatisticsCollector.AbstractProcessReader)}s - * it passing in the value returned by the {@link #getProcessReader()} + * {@link ActiveProcess#start}s + * it, passing in the value returned by the {@link #getProcessReader()} */ public void start() { Deleted: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -1,720 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -/* - * Created on Mar 13, 2008 - */ - -package com.bigdata.counters; - -import java.io.IOException; -import java.lang.management.GarbageCollectorMXBean; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.Enumeration; -import java.util.List; -import java.util.Properties; -import java.util.UUID; - -import org.apache.log4j.Logger; -import org.apache.system.SystemUtil; - -import com.bigdata.counters.httpd.CounterSetHTTPD; -import com.bigdata.counters.linux.StatisticsCollectorForLinux; -import com.bigdata.counters.win.StatisticsCollectorForWindows; -import com.bigdata.io.DirectBufferPool; -import com.bigdata.rawstore.Bytes; -import com.bigdata.util.config.ConfigDeployUtil; -import com.bigdata.util.config.NicUtil; -import com.bigdata.util.httpd.AbstractHTTPD; - -/** - * Base class for collecting data on a host. The data are described by a - * hierarchical collection of {@link ICounterSet}s and {@link ICounter}s. A - * {@link IRequiredHostCounters minimum set of counters} is defined which SHOULD - * be available for decision-making. Implementations are free to report any - * additional data which they can make available. Reporting is assumed to be - * periodic, e.g., every 60 seconds or so. The purpose of these data is to - * support decision-making concerning the over- and under-utilization of hosts - * in support of load balancing of services deployed over those hosts. - * <p> - * An effort has been made to align the core set of counters for both Windows - * and Un*x platforms so as to support the declared counters on all platforms. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - */ -abstract public class AbstractStatisticsCollector implements IStatisticsCollector { - - protected static final String ps = ICounterSet.pathSeparator; - - final protected static Logger log = Logger - .getLogger(AbstractStatisticsCollector.class); - - /** {@link InetAddress#getCanonicalHostName()} for this host. */ - static final public String fullyQualifiedHostName; - - /** The path prefix under which all counters for this host are found. */ - static final public String hostPathPrefix; - - static { - - String s; - try { - s = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); - } catch(Throwable t) {//for now, maintain same failure logic as used previously - t.printStackTrace(); - s = NicUtil.getIpAddressByLocalHost(); - } - - fullyQualifiedHostName = s; - - hostPathPrefix = ICounterSet.pathSeparator + fullyQualifiedHostName - + ICounterSet.pathSeparator; - - if (log.isInfoEnabled()) { -// log.info("hostname : " + hostname); - log.info("FQDN : " + fullyQualifiedHostName); - log.info("hostPrefix: " + hostPathPrefix); - } - - } - - /** Reporting interval in seconds. */ - final protected int interval; - - /** - * The interval in seconds at which the counter values are read from the - * host platform. - */ - public int getInterval() { - - return interval; - - } - - protected AbstractStatisticsCollector(int interval) { - - if (interval <= 0) - throw new IllegalArgumentException(); - - if(log.isInfoEnabled()) log.info("interval=" + interval); - - this.interval = interval; - - } - -// /** -// * Return the load average for the last minute if available and -1 -// * otherwise. -// * <p> -// * Note: The load average is available on 1.6+ JVMs. -// * -// * @see OperatingSystemMXBean -// */ -// public double getSystemLoadAverage() -// { -// -//// double version = Double.parseDouble(System.getProperty("java.vm.version")); -//// if(version>=1.6) { -// -// double loadAverage = -1; -// -// final OperatingSystemMXBean mbean = ManagementFactory -// .getOperatingSystemMXBean(); -// -// /* -// * Use reflection since method is only available as of 1.6 -// */ -// Method method; -// try { -// method = mbean.getClass().getMethod("getSystemLoadAverage", -// new Class[] {}); -// loadAverage = (Double) method.invoke(mbean, new Object[] {}); -// } catch (SecurityException e) { -// log.warn(e.getMessage(), e); -// } catch (NoSuchMethodException e) { -// // Note: method is only defined since 1.6 -// log.warn(e.getMessage(), e); -// } catch (IllegalAccessException e) { -// log.warn(e.getMessage(), e); -// } catch (InvocationTargetException e) { -// log.warn(e.getMessage(), e); -// } -// -// return loadAverage; -// -// } - - /** - * {@link CounterSet} hierarchy. - */ - private CounterSet countersRoot; - - /** - * Return the counter hierarchy. The returned hierarchy only includes those - * counters whose values are available from the JVM. This collection is - * normally augmented with platform specific performance counters collected - * using an {@link AbstractProcessCollector}. - * <p> - * Note: Subclasses MUST extend this method to initialize their own - * counters. - */ - synchronized public CounterSet getCounters() { - - if (countersRoot == null) { - - countersRoot = new CounterSet(); - - // os.arch - countersRoot.addCounter(hostPathPrefix - + IHostCounters.Info_Architecture, - new OneShotInstrument<String>(System.getProperty("os.arch"))); - - // os.name - countersRoot.addCounter(hostPathPrefix - + IHostCounters.Info_OperatingSystemName, - new OneShotInstrument<String>(System.getProperty("os.name"))); - - // os.version - countersRoot.addCounter(hostPathPrefix - + IHostCounters.Info_OperatingSystemVersion, - new OneShotInstrument<String>(System.getProperty("os.version"))); - - // #of processors. - countersRoot.addCounter(hostPathPrefix - + IHostCounters.Info_NumProcessors, - new OneShotInstrument<Integer>(SystemUtil.numProcessors())); - - // processor info - countersRoot.addCounter(hostPathPrefix - + IHostCounters.Info_ProcessorInfo, - new OneShotInstrument<String>(SystemUtil.cpuInfo())); - - } - - return countersRoot; - - } - - /** - * Adds the Info and Memory counter sets under the <i>serviceRoot</i>. - * - * @param serviceRoot - * The {@link CounterSet} corresponding to the service (or - * client). - * @param serviceName - * The name of the service. - * @param serviceIface - * The class or interface that best represents the service or - * client. - * @param properties - * The properties used to configure that service or client. - */ - static public void addBasicServiceOrClientCounters(CounterSet serviceRoot, - String serviceName, Class serviceIface, Properties properties) { - - // Service info. - { - - final CounterSet serviceInfoSet = serviceRoot.makePath("Info"); - - serviceInfoSet.addCounter("Service Type", - new OneShotInstrument<String>(serviceIface.getName())); - - serviceInfoSet.addCounter("Service Name", - new OneShotInstrument<String>(serviceName)); - - AbstractStatisticsCollector.addServiceProperties(serviceInfoSet, - properties); - - } - - // Service per-process memory data - { - - serviceRoot.addCounter( - IProcessCounters.Memory_runtimeMaxMemory, - new OneShotInstrument<Long>(Runtime.getRuntime().maxMemory())); - - serviceRoot.addCounter(IProcessCounters.Memory_runtimeFreeMemory, - new Instrument<Long>() { - public void sample() { - setValue(Runtime.getRuntime().freeMemory()); - } - }); - - serviceRoot.addCounter(IProcessCounters.Memory_runtimeTotalMemory, - new Instrument<Long>() { - public void sample() { - setValue(Runtime.getRuntime().totalMemory()); - } - }); - - // add counters for garbage collection. - AbstractStatisticsCollector - .addGarbageCollectorMXBeanCounters(serviceRoot - .makePath(ICounterHierarchy.Memory_GarbageCollectors)); - - // Moved since counters must be dynamically reattached to reflect pool hierarchy. -// /* -// * Add counters reporting on the various DirectBufferPools. -// */ -// { -// -// serviceRoot.makePath( -// IProcessCounters.Memory + ICounterSet.pathSeparator -// + "DirectBufferPool").attach( -// DirectBufferPool.getCounters()); -// -// } - - } - - } - - /** - * Lists out all of the properties and then report each property using a - * {@link OneShotInstrument}. - * - * @param serviceInfoSet - * The {@link ICounterHierarchy#Info} {@link CounterSet} for the - * service. - * @param properties - * The properties to be reported out. - */ - static public void addServiceProperties(final CounterSet serviceInfoSet, - final Properties properties) { - - final CounterSet ptmp = serviceInfoSet.makePath("Properties"); - - final Enumeration<?> e = properties.propertyNames(); - - while (e.hasMoreElements()) { - - final String name; - final String value; - try { - - name = (String) e.nextElement(); - - value = (String) properties.getProperty(name); - - } catch (ClassCastException ex) { - - log.warn(ex.getMessage()); - - continue; - - } - - if (value == null) - continue; - - ptmp.addCounter(name, new OneShotInstrument<String>(value)); - - } - - } - - /** - * Adds/updates counters relating to JVM Garbage Collection. These counters - * should be located within a per-service path. - * - * @param counterSet - * The counters set that is the direct parent. - */ - static public void addGarbageCollectorMXBeanCounters(CounterSet counterSet) { - - final String name_pools = "Memory Pool Names"; - - final String name_count = "Collection Count"; - - final String name_time = "Cumulative Collection Time"; - - synchronized (counterSet) { - - final List<GarbageCollectorMXBean> list = ManagementFactory - .getGarbageCollectorMXBeans(); - - for (final GarbageCollectorMXBean bean : list) { - - final String name = bean.getName(); - - // counter set for this GC bean (may be pre-existing). - final CounterSet tmp = counterSet.makePath(name); - - synchronized (tmp) { - - // memory pool names. - { - if (tmp.getChild(name_pools) == null) { - - tmp.addCounter(name_pools, - new Instrument<String>() { - - @Override - protected void sample() { - - setValue(Arrays.toString(bean - .getMemoryPoolNames())); - - } - - }); - - } - - } - - // collection count. - { - if (tmp.getChild(name_count) == null) { - tmp.addCounter(name_count, new Instrument<Long>() { - - @Override - protected void sample() { - - setValue(bean.getCollectionCount()); - - } - }); - } - } - - // collection time. - { - if (tmp.getChild(name_time) == null) { - tmp.addCounter(name_time, new Instrument<Long>() { - - @Override - protected void sample() { - - setValue(bean.getCollectionTime()); - - } - }); - } - } - - } - - } - - } - - } - - /** - * Start collecting host performance data -- must be extended by the - * concrete subclass. - */ - public void start() { - - if (log.isInfoEnabled()) - log.info("Starting collection."); - - installShutdownHook(); - - } - - /** - * Stop collecting host performance data -- must be extended by the concrete - * subclass. - */ - public void stop() { - - if (log.isInfoEnabled()) - log.info("Stopping collection."); - - } - - /** - * Installs a {@link Runtime#addShutdownHook(Thread)} that executes - * {@link #stop()}. - * <p> - * Note: The runtime shutdown hook appears to be a robust way to handle ^C - * by providing a clean service termination. However, under eclipse (at - * least when running under Windows) you may find that the shutdown hook - * does not run when you terminate a Java application and that typedef - * process build up in the Task Manager as a result. This should not be the - * case during normal deployment. - */ - protected void installShutdownHook() { - - final Thread t = new Thread() { - - public void run() { - - AbstractStatisticsCollector.this.stop(); - - } - - }; - - t.setDaemon(true); - - Runtime.getRuntime().addShutdownHook(t); - - } - - /** - * Options for {@link AbstractStatisticsCollector} - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - */ - public interface Options { - - /** - * The interval in seconds at which the performance counters of the host - * platform will be sampled (default 60). - */ - public String PERFORMANCE_COUNTERS_SAMPLE_INTERVAL = AbstractStatisticsCollector.class - .getPackage().getName() - + ".interval"; - - public String DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL = "60"; - - /** - * The name of the process whose per-process performance counters are to - * be collected (required, no default). This causes the per-process - * counters to be reported using the path: - * - * <strong>/<i>fullyQualifiedHostname</i>/<i>processName</i>/...</strong> - * <p> - * Note: Services are generally associated with a {@link UUID} and that - * {@link UUID} is generally used as the service name. A single host may - * run many different services and will report the counters for each - * service using the path formed as described above. - */ - public String PROCESS_NAME = AbstractStatisticsCollector.class - .getPackage().getName() - + ".processName"; - - } - - /** - * Create an instance appropriate for the operating system on which the JVM - * is running. - * - * @param properties - * See {@link Options} - * - * @throws UnsupportedOperationException - * If there is no implementation available on the operating - * system on which you are running. - * - * @see Options - */ - public static AbstractStatisticsCollector newInstance( - final Properties properties) { - - final int interval = Integer.parseInt(properties.getProperty( - Options.PERFORMANCE_COUNTERS_SAMPLE_INTERVAL, - Options.DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL)); - - if (interval <= 0) - throw new IllegalArgumentException(); - - final String processName = properties.getProperty(Options.PROCESS_NAME); - - if (processName == null) - throw new IllegalArgumentException( - "Required option not specified: " + Options.PROCESS_NAME); - - final String osname = System.getProperty("os.name").toLowerCase(); - - if(osname.equalsIgnoreCase("linux")) { - - return new StatisticsCollectorForLinux(interval, processName); - - } else if(osname.contains("windows")) { - - return new StatisticsCollectorForWindows(interval); - - } else { - - throw new UnsupportedOperationException( - "No implementation available on " - + System.getProperty("os.getname")); - - } - - } - - /** - * Utility runs the {@link AbstractStatisticsCollector} appropriate for your - * operating system. Before performance counter collection starts the static - * counters will be written on stdout. The appropriate process(es) are then - * started to collect the dynamic performance counters. Collection will - * occur every {@link Options#PERFORMANCE_COUNTERS_SAMPLE_INTERVAL} seconds. - * The program will make 10 collections by default and will write the - * updated counters on stdout every - * {@link Options#PERFORMANCE_COUNTERS_SAMPLE_INTERVAL} seconds. - * <p> - * Parameters also may be specified using <code>-D</code>. See - * {@link Options}. - * - * @param args <code>[<i>interval</i> [<i>count</i>]]</code> - * <p> - * <i>interval</i> is the collection interval in seconds and - * defaults to - * {@link Options#DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL}. - * <p> - * <i>count</i> is the #of collections to be made and defaults - * to <code>10</code>. Specify zero (0) to run until halted. - * - * @throws InterruptedException - * @throws RuntimeException - * if the arguments are not valid. - * @throws UnsupportedOperationException - * if no implementation is available for your operating system. - */ - public static void main(final String[] args) throws InterruptedException { - - final int DEFAULT_COUNT = 10; - final int nargs = args.length; - final int interval; - final int count; - if (nargs == 0) { - interval = Integer.parseInt(Options.DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL); - count = DEFAULT_COUNT; - } else if (nargs == 1) { - interval = Integer.parseInt(args[0]); - count = DEFAULT_COUNT; - } else if (nargs == 2) { - interval = Integer.parseInt(args[0]); - count = Integer.parseInt(args[1]); - } else { - throw new RuntimeException("usage: [interval [count]]"); - } - - if (interval <= 0) - throw new RuntimeException("interval must be positive"); - - if (count < 0) - throw new RuntimeException("count must be non-negative"); - - Properties properties = new Properties(System.getProperties()); - - if (nargs != 0) { - - // Override the interval property from the command line. - properties.setProperty(Options.PERFORMANCE_COUNTERS_SAMPLE_INTERVAL,""+interval); - - } - - if(properties.getProperty(Options.PROCESS_NAME)==null) { - - /* - * Set a default process name if none was specified in the - * environment. - * - * Note: Normally the process name is specified explicitly by the - * service which instantiates the performance counter collection for - * that process. We specify a default here since main() is used for - * testing purposes only. - */ - - properties.setProperty(Options.PROCESS_NAME,"testService"); - - } - - final AbstractStatisticsCollector client = AbstractStatisticsCollector - .newInstance( properties ); - - // write counters before we start the client - System.out.println(client.getCounters().toString()); - - System.err.println("Starting performance counter collection: interval=" - + client.interval + ", count=" + count); - - client.start(); - - /* - * HTTPD service reporting out statistics. - */ - AbstractHTTPD httpd = null; - { - final int port = 8080; - if (port != 0) { - try { - httpd = new CounterSetHTTPD(port,client.countersRoot); - } catch (IOException e) { - log.warn("Could not start httpd: port=" + port+" : "+e); - } - } - - } - - int n = 0; - - final long begin = System.currentTimeMillis(); - - // Note: runs until killed when count==0. - while (count == 0 || n < count) { - - Thread.sleep(client.interval * 1000/*ms*/); - - final long elapsed = System.currentTimeMillis() - begin; - - System.err.println("Report #"+n+" after "+(elapsed/1000)+" seconds "); - - System.out.println(client.getCounters().toString()); - - n++; - - } - - System.err.println("Stopping performance counter collection"); - - client.stop(); - - if (httpd != null) - httpd.shutdown(); - - System.err.println("Done"); - - } - - /** - * Converts KB to bytes. - * - * @param kb - * The #of kilobytes. - * - * @return The #of bytes. - */ - static public Double kb2b(final String kb) { - - final double d = Double.parseDouble(kb); - - final double x = d * Bytes.kilobyte32; - - return x; - - } - -} Copied: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java (from rev 3601, branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java) =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java (rev 0) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -0,0 +1,718 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +/* + * Created on Mar 13, 2008 + */ + +package com.bigdata.counters.httpd; + +import java.io.IOException; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.util.Arrays; +import java.util.Enumeration; +import java.util.List; +import java.util.Properties; +import java.util.UUID; + +import com.bigdata.counters.*; +import org.apache.log4j.Logger; +import org.apache.system.SystemUtil; + +import com.bigdata.counters.linux.StatisticsCollectorForLinux; +import com.bigdata.counters.win.StatisticsCollectorForWindows; +import com.bigdata.rawstore.Bytes; +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.NicUtil; +import com.bigdata.util.httpd.AbstractHTTPD; + +/** + * Base class for collecting data on a host. The data are described by a + * hierarchical collection of {@link com.bigdata.counters.ICounterSet}s and {@link com.bigdata.counters.ICounter}s. A + * {@link com.bigdata.counters.IRequiredHostCounters minimum set of counters} is defined which SHOULD + * be available for decision-making. Implementations are free to report any + * additional data which they can make available. Reporting is assumed to be + * periodic, e.g., every 60 seconds or so. The purpose of these data is to + * support decision-making concerning the over- and under-utilization of hosts + * in support of load balancing of services deployed over those hosts. + * <p> + * An effort has been made to align the core set of counters for both Windows + * and Un*x platforms so as to support the declared counters on all platforms. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +abstract public class AbstractStatisticsCollector implements IStatisticsCollector { + + protected static final String ps = ICounterSet.pathSeparator; + + final protected static Logger log = Logger + .getLogger(AbstractStatisticsCollector.class); + + /** {@link InetAddress#getCanonicalHostName()} for this host. */ + static final public String fullyQualifiedHostName; + + /** The path prefix under which all counters for this host are found. */ + static final public String hostPathPrefix; + + static { + + String s; + try { + s = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); + } catch(Throwable t) {//for now, maintain same failure logic as used previously + t.printStackTrace(); + s = NicUtil.getIpAddressByLocalHost(); + } + + fullyQualifiedHostName = s; + + hostPathPrefix = ICounterSet.pathSeparator + fullyQualifiedHostName + + ICounterSet.pathSeparator; + + if (log.isInfoEnabled()) { +// log.info("hostname : " + hostname); + log.info("FQDN : " + fullyQualifiedHostName); + log.info("hostPrefix: " + hostPathPrefix); + } + + } + + /** Reporting interval in seconds. */ + final protected int interval; + + /** + * The interval in seconds at which the counter values are read from the + * host platform. + */ + public int getInterval() { + + return interval; + + } + + protected AbstractStatisticsCollector(int interval) { + + if (interval <= 0) + throw new IllegalArgumentException(); + + if(log.isInfoEnabled()) log.info("interval=" + interval); + + this.interval = interval; + + } + +// /** +// * Return the load average for the last minute if available and -1 +// * otherwise. +// * <p> +// * Note: The load average is available on 1.6+ JVMs. +// * +// * @see OperatingSystemMXBean +// */ +// public double getSystemLoadAverage() +// { +// +//// double version = Double.parseDouble(System.getProperty("java.vm.version")); +//// if(version>=1.6) { +// +// double loadAverage = -1; +// +// final OperatingSystemMXBean mbean = ManagementFactory +// .getOperatingSystemMXBean(); +// +// /* +// * Use reflection since method is only available as of 1.6 +// */ +// Method method; +// try { +// method = mbean.getClass().getMethod("getSystemLoadAverage", +// new Class[] {}); +// loadAverage = (Double) method.invoke(mbean, new Object[] {}); +// } catch (SecurityException e) { +// log.warn(e.getMessage(), e); +// } catch (NoSuchMethodException e) { +// // Note: method is only defined since 1.6 +// log.warn(e.getMessage(), e); +// } catch (IllegalAccessException e) { +// log.warn(e.getMessage(), e); +// } catch (InvocationTargetException e) { +// log.warn(e.getMessage(), e); +// } +// +// return loadAverage; +// +// } + + /** + * {@link com.bigdata.counters.CounterSet} hierarchy. + */ + private CounterSet countersRoot; + + /** + * Return the counter hierarchy. The returned hierarchy only includes those + * counters whose values are available from the JVM. This collection is + * normally augmented with platform specific performance counters collected + * using an {@link com.bigdata.counters.AbstractProcessCollector}. + * <p> + * Note: Subclasses MUST extend this method to initialize their own + * counters. + */ + synchronized public CounterSet getCounters() { + + if (countersRoot == null) { + + countersRoot = new CounterSet(); + + // os.arch + countersRoot.addCounter(hostPathPrefix + + IHostCounters.Info_Architecture, + new OneShotInstrument<String>(System.getProperty("os.arch"))); + + // os.name + countersRoot.addCounter(hostPathPrefix + + IHostCounters.Info_OperatingSystemName, + new OneShotInstrument<String>(System.getProperty("os.name"))); + + // os.version + countersRoot.addCounter(hostPathPrefix + + IHostCounters.Info_OperatingSystemVersion, + new OneShotInstrument<String>(System.getProperty("os.version"))); + + // #of processors. + countersRoot.addCounter(hostPathPrefix + + IHostCounters.Info_NumProcessors, + new OneShotInstrument<Integer>(SystemUtil.numProcessors())); + + // processor info + countersRoot.addCounter(hostPathPrefix + + IHostCounters.Info_ProcessorInfo, + new OneShotInstrument<String>(SystemUtil.cpuInfo())); + + } + + return countersRoot; + + } + + /** + * Adds the Info and Memory counter sets under the <i>serviceRoot</i>. + * + * @param serviceRoot + * The {@link CounterSet} corresponding to the service (or + * client). + * @param serviceName + * The name of the service. + * @param serviceIface + * The class or interface that best represents the service or + * client. + * @param properties + * The properties used to configure that service or client. + */ + static public void addBasicServiceOrClientCounters(CounterSet serviceRoot, + String serviceName, Class serviceIface, Properties properties) { + + // Service info. + { + + final CounterSet serviceInfoSet = serviceRoot.makePath("Info"); + + serviceInfoSet.addCounter("Service Type", + new OneShotInstrument<String>(serviceIface.getName())); + + serviceInfoSet.addCounter("Service Name", + new OneShotInstrument<String>(serviceName)); + + AbstractStatisticsCollector.addServiceProperties(serviceInfoSet, + properties); + + } + + // Service per-process memory data + { + + serviceRoot.addCounter( + IProcessCounters.Memory_runtimeMaxMemory, + new OneShotInstrument<Long>(Runtime.getRuntime().maxMemory())); + + serviceRoot.addCounter(IProcessCounters.Memory_runtimeFreeMemory, + new Instrument<Long>() { + public void sample() { + setValue(Runtime.getRuntime().freeMemory()); + } + }); + + serviceRoot.addCounter(IProcessCounters.Memory_runtimeTotalMemory, + new Instrument<Long>() { + public void sample() { + setValue(Runtime.getRuntime().totalMemory()); + } + }); + + // add counters for garbage collection. + AbstractStatisticsCollector + .addGarbageCollectorMXBeanCounters(serviceRoot + .makePath(ICounterHierarchy.Memory_GarbageCollectors)); + + // Moved since counters must be dynamically reattached to reflect pool hierarchy. +// /* +// * Add counters reporting on the various DirectBufferPools. +// */ +// { +// +// serviceRoot.makePath( +// IProcessCounters.Memory + ICounterSet.pathSeparator +// + "DirectBufferPool").attach( +// DirectBufferPool.getCounters()); +// +// } + + } + + } + + /** + * Lists out all of the properties and then report each property using a + * {@link OneShotInstrument}. + * + * @param serviceInfoSet + * The {@link ICounterHierarchy#Info} {@link CounterSet} for the + * service. + * @param properties + * The properties to be reported out. + */ + static public void addServiceProperties(final CounterSet serviceInfoSet, + final Properties properties) { + + final CounterSet ptmp = serviceInfoSet.makePath("Properties"); + + final Enumeration<?> e = properties.propertyNames(); + + while (e.hasMoreElements()) { + + final String name; + final String value; + try { + + name = (String) e.nextElement(); + + value = (String) properties.getProperty(name); + + } catch (ClassCastException ex) { + + log.warn(ex.getMessage()); + + continue; + + } + + if (value == null) + continue; + + ptmp.addCounter(name, new OneShotInstrument<String>(value)); + + } + + } + + /** + * Adds/updates counters relating to JVM Garbage Collection. These counters + * should be located within a per-service path. + * + * @param counterSet + * The counters set that is the direct parent. + */ + static public void addGarbageCollectorMXBeanCounters(CounterSet counterSet) { + + final String name_pools = "Memory Pool Names"; + + final String name_count = "Collection Count"; + + final String name_time = "Cumulative Collection Time"; + + synchronized (counterSet) { + + final List<GarbageCollectorMXBean> list = ManagementFactory + .getGarbageCollectorMXBeans(); + + for (final GarbageCollectorMXBean bean : list) { + + final String name = bean.getName(); + + // counter set for this GC bean (may be pre-existing). + final CounterSet tmp = counterSet.makePath(name); + + synchronized (tmp) { + + // memory pool names. + { + if (tmp.getChild(name_pools) == null) { + + tmp.addCounter(name_pools, + new Instrument<String>() { + + @Override + protected void sample() { + + setValue(Arrays.toString(bean + .getMemoryPoolNames())); + + } + + }); + + } + + } + + // collection count. + { + if (tmp.getChild(name_count) == null) { + tmp.addCounter(name_count, new Instrument<Long>() { + + @Override + protected void sample() { + + setValue(bean.getCollectionCount()); + + } + }); + } + } + + // collection time. + { + if (tmp.getChild(name_time) == null) { + tmp.addCounter(name_time, new Instrument<Long>() { + + @Override + protected void sample() { + + setValue(bean.getCollectionTime()); + + } + }); + } + } + + } + + } + + } + + } + + /** + * Start collecting host performance data -- must be extended by the + * concrete subclass. + */ + public void start() { + + if (log.isInfoEnabled()) + log.info("Starting collection."); + + installShutdownHook(); + + } + + /** + * Stop collecting host performance data -- must be extended by the concrete + * subclass. + */ + public void stop() { + + if (log.isInfoEnabled()) + log.info("Stopping collection."); + + } + + /** + * Installs a {@link Runtime#addShutdownHook(Thread)} that executes + * {@link #stop()}. + * <p> + * Note: The runtime shutdown hook appears to be a robust way to handle ^C + * by providing a clean service termination. However, under eclipse (at + * least when running under Windows) you may find that the shutdown hook + * does not run when you terminate a Java application and that typedef + * process build up in the Task Manager as a result. This should not be the + * case during normal deployment. + */ + protected void installShutdownHook() { + + final Thread t = new Thread() { + + public void run() { + + AbstractStatisticsCollector.this.stop(); + + } + + }; + + t.setDaemon(true); + + Runtime.getRuntime().addShutdownHook(t); + + } + + /** + * Options for {@link AbstractStatisticsCollector} + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + public interface Options { + + /** + * The interval in seconds at which the performance counters of the host + * platform will be sampled (default 60). + */ + public String PERFORMANCE_COUNTERS_SAMPLE_INTERVAL = AbstractStatisticsCollector.class + .getPackage().getName() + + ".interval"; + + public String DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL = "60"; + + /** + * The name of the process whose per-process performance counters are to + * be collected (required, no default). This causes the per-process + * counters to be reported using the path: + * + * <strong>/<i>fullyQualifiedHostname</i>/<i>processName</i>/...</strong> + * <p> + * Note: Services are generally associated with a {@link UUID} and that + * {@link UUID} is generally used as the service name. A single host may + * run many different services and will report the counters for each + * service using the path formed as described above. + */ + public String PROCESS_NAME = AbstractStatisticsCollector.class + .getPackage().getName() + + ".processName"; + + } + + /** + * Create an instance appropriate for the operating system on which the JVM + * is running. + * + * @param properties + * See {@link Options} + * + * @throws UnsupportedOperationException + * If there is no implementation available on the operating + * system on which you are running. + * + * @see Options + */ + public static AbstractStatisticsCollector newInstance( + final Properties properties) { + + final int interval = Integer.parseInt(properties.getProperty( + Options.PERFORMANCE_COUNTERS_SAMPLE_INTERVAL, + Options.DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL)); + + if (interval <= 0) + throw new IllegalArgumentException(); + + final String processName = properties.getProperty(Options.PROCESS_NAME); + + if (processName == null) + throw new IllegalArgumentException( + "Required option not specified: " + Options.PROCESS_NAME); + + final String osname = System.getProperty("os.name").toLowerCase(); + + if(osname.equalsIgnoreCase("linux")) { + + return new StatisticsCollectorForLinux(interval, processName); + + } else if(osname.contains("windows")) { + + return new StatisticsCollectorForWindows(interval); + + } else { + + throw new UnsupportedOperationException( + "No implementation available on " + + System.getProperty("os.getname")); + + } + + } + + /** + * Utility runs the {@link AbstractStatisticsCollector} appropriate for your + * operating system. Before performance counter collection starts the static + * counters will be written on stdout. The appropriate process(es) are then + * started to collect the dynamic performance counters. Collection will + * occur every {@link Options#PERFORMANCE_COUNTERS_SAMPLE_INTERVAL} seconds. + * The program will make 10 collections by default and will write the + * updated counters on stdout every + * {@link Options#PERFORMANCE_COUNTERS_SAMPLE_INTERVAL} seconds. + * <p> + * Parameters also may be specified using <code>-D</code>. See + * {@link Options}. + * + * @param args <code>[<i>interval</i> [<i>count</i>]]</code> + * <p> + * <i>interval</i> is the collection interval in seconds and + * defaults to + * {@link Options#DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL}. + * <p> + * <i>count</i> is the #of collections to be made and defaults + * to <code>10</code>. Specify zero (0) to run until halted. + * + * @throws InterruptedException + * @throws RuntimeException + * if the arguments are not valid. + * @throws UnsupportedOperationException + * if no implementation is available for your operating system. + */ + public static void main(final String[] args) throws InterruptedException { + + final int DEFAULT_COUNT = 10; + final int nargs = args.length; + final int interval; + final int count; + if (nargs == 0) { + interval = Integer.parseInt(Options.DEFAULT_PERFORMANCE_COUNTERS_SAMPLE_INTERVAL); + count = DEFAULT_COUNT; + } else if (nargs == 1) { + interval = Integer.parseInt(args[0]); + count = DEFAULT_COUNT; + } else if (nargs == 2) { + interval = Integer.parseInt(args[0]); + count = Integer.parseInt(args[1]); + } else { + throw new RuntimeException("usage: [interval [count]]"); + } + + if (interval <= 0) + throw new RuntimeException("interval must be positive"); + + if (count < 0) + throw new RuntimeException("count must be non-negative"); + + Properties properties = new Properties(System.getProperties()); + + if (nargs != 0) { + + // Override the interval property from the command line. + properties.setProperty(Options.PERFORMANCE_COUNTERS_SAMPLE_INTERVAL,""+interval); + + } + + if(properties.getProperty(Options.PROCESS_NAME)==null) { + + /* + * Set a default process name if none was specified in the + * environment. + * + * Note: Normally the process name is specified explicitly by the + * service which instantiates the performance counter collection for + * that process. We specify a default here since main() is used for + * testing purposes only. + */ + + properties.setProperty(Options.PROCESS_NAME,"testService"); + + } + + final AbstractStatisticsCollector client = AbstractStatisticsCollector + .newInstance( properties ); + + // write counters before we start the client + System.out.println(client.getCounters().toString()); + + System.err.println("Starting performance counter collection: interval=" + + client.interval + ", count=" + count); + + client.start(); + + /* + * HTTPD service reporting out statistics. + */ + AbstractHTTPD httpd = null; + { + final int port = 8080; + if (port != 0) { + try { + httpd = new CounterSetHTTPD(port,client.countersRoot); + } catch (IOException e) { + log.warn("Could not start httpd: port=" + port+" : "+e); + } + } + + } + + int n = 0; + + final long begin = System.currentTimeMillis(); + + // Note: runs until killed when count==0. + while (count == 0 || n < count) { + + Thread.sleep(client.interval * 1000/*ms*/); + + final long elapsed = System.currentTimeMillis() - begin; + + System.err.println("Report #"+n+" after "+(elapsed/1000)+" seconds "); + + System.out.println(client.getCounters().toString()); + + n++; + + } + + System.err.println("Stopping performance counter collection"); + + client.stop(); + + if (httpd != null) + httpd.shutdown(); + + System.err.println("Done"); + + } + + /** + * Converts KB to bytes. + * + * @param kb + * The #of kilobytes. + * + * @return The #of bytes. + */ + static public Double kb2b(final String kb) { + + final double d = Double.parseDouble(kb); + + final double x = d * Bytes.kilobyte32; + + return x; + + } + +} Property changes on: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/AbstractStatisticsCollector.java ___________________________________________________________________ Added: svn:keywords + Id Date Revision Author HeadURL Added: svn:eol-style + native Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/DummyEventReportingService.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/DummyEventReportingService.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/httpd/DummyEventReportingService.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -8,7 +8,6 @@ import java.util.List; import java.util.UUID; -import com.bigdata.counters.AbstractStatisticsCollector; import com.bigdata.service.Event; import com.bigdata.service.EventReceiver; import com.bigdata.service.IEventReportingService; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/StatisticsCollectorForLinux.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/StatisticsCollectorForLinux.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/StatisticsCollectorForLinux.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -2,7 +2,7 @@ import java.util.UUID; -import com.bigdata.counters.AbstractStatisticsCollector; +import com.bigdata.counters.httpd.AbstractStatisticsCollector; import com.bigdata.counters.CounterSet; import com.bigdata.counters.PIDUtil; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/SysstatUtil.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/SysstatUtil.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/linux/SysstatUtil.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -34,10 +34,9 @@ import java.util.Arrays; import java.util.Map; +import com.bigdata.counters.httpd.AbstractStatisticsCollector; import org.apache.log4j.Logger; -import com.bigdata.counters.AbstractStatisticsCollector; - /** * Some utility methods related to integration with <code>sysstat</code>. * Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/win/StatisticsCollectorForWindows.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/win/StatisticsCollectorForWindows.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/win/StatisticsCollectorForWindows.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -1,6 +1,6 @@ package com.bigdata.counters.win; -import com.bigdata.counters.AbstractStatisticsCollector; +import com.bigdata.counters.httpd.AbstractStatisticsCollector; import com.bigdata.counters.CounterSet; /** Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/DiskOnlyStrategy.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/DiskOnlyStrategy.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -39,7 +39,7 @@ import com.bigdata.BigdataStatics; import com.bigdata.btree.BTree.Counter; -import com.bigdata.counters.AbstractStatisticsCollector; +import com.bigdata.counters.httpd.AbstractStatisticsCollector; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.counters.OneShotInstrument; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java 2010-09-21 20:49:50 UTC (rev 3607) @@ -40,7 +40,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import com.bigdata.btree.BTree.Counter; -import com.bigdata.counters.AbstractStatisticsCollector; import com.bigdata.counters.CounterSet; import com.bigdata.counters.Instrument; import com.bigdata.counters.OneShotInstrument; @@ -85,7 +84,7 @@ * </pre> * * @todo report whether or not the on-disk write cache is enabled for each - * platform in {@link AbstractStatisticsCollector}. offer guidence on how + * platform in {@link com.bigdata.counters.httpd.AbstractStatisticsCollector}. offer guidence on how * to disable that write cache. * * @todo The flush of the write cache could be made asynchronous if we had two Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java 2010-09-21 18:45:41 UTC (rev 3606) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/loadbalancer/EmbeddedLoadBalancer.java ... [truncated message content] |
From: <res...@us...> - 2010-09-21 18:45:47
|
Revision: 3606 http://bigdata.svn.sourceforge.net/bigdata/?rev=3606&view=rev Author: resendes Date: 2010-09-21 18:45:41 +0000 (Tue, 21 Sep 2010) Log Message: ----------- Test coverage for BootStateUtil Modified Paths: -------------- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/util/BootStateUtil.java branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestAll.java Added Paths: ----------- branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestBootStateUtil.java Modified: branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/util/BootStateUtil.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/util/BootStateUtil.java 2010-09-21 16:47:18 UTC (rev 3605) +++ branches/bbb_cleanup/bigdata-core/src/main/java/com/bigdata/util/BootStateUtil.java 2010-09-21 18:45:41 UTC (rev 3606) @@ -45,7 +45,6 @@ import com.sun.jini.config.Config; import net.jini.config.Configuration; import net.jini.config.ConfigurationException; -import net.jini.config.NoSuchEntryException; import net.jini.core.lookup.ServiceID; /** @@ -80,7 +79,7 @@ private File persistenceDir = null; private UUID proxyId = null; private ServiceID serviceId = null; - private String stateKey = null; + private String stateKey = null; //TODO -- not used? public BootStateUtil(final Configuration config, final String componentName, @@ -128,22 +127,6 @@ recoverBootState(defaultServiceId); } - - public BootStateUtil(File persistenceDir, - Class entityImplType, - ServiceID defaultServiceId) - throws IOException, ClassNotFoundException - { - if(entityImplType == null) { - throw new NullPointerException("entityImplType null"); - } - this.entityImplType = entityImplType; - this.logger = Logger.getLogger(this.getClass()); - this.persistenceDir = persistenceDir; - - recoverBootState(defaultServiceId); - } - /** * Returns the entity's unique <i>proxy id</i> that is generated/recoverd * as part of the boot state maintained by this class. @@ -161,44 +144,6 @@ return serviceId; } - /** - * Returns the <code>String</code> representing the path of the - * directory in which the entity's <i>boot state</i> is located. - * If the value returned is <code>null</code>, then the entity - * was configured to run in <i>transient</i> mode. - */ - public String getPersistenceDirectory() { - return (persistenceDir != null) ? persistenceDir.toString() : null; - } - - /** - * Returns the <code>true</code> if the entity was configured to - * run in <i>persistent</i> mode; <code>false</code> otherwise. - */ - public boolean isPersistent() { - return (persistenceDir != null); - } - - /** - * If the entity is currently configured to run in <i>persistent</i> - * mode, returns the name-based key under which an entity's (non-boot) - * state was persisted during previous runs of the entity. If the - * entity is currently configured to run in <i>transient</i> mode, - * a non-<code>null</code>, randomly-generated key value is returned. - */ - public String getStateKey() { - return stateKey; - } - - /** - * Returns the <code>Class</code> type of the entity whose boot state - * is associated with the current instance of this utility. - */ - public Class getType() { - return entityImplType; - } - - /* Performs the actual retrieval of the entity's boot state. This * method is called only once, in this utility's constructor. * It recovers the entity's boot state from local persistence storage, @@ -338,6 +283,7 @@ /** * @see java.io.ObjectInputStream#resolveClass */ + @Override protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { @@ -354,7 +300,7 @@ * An interface is specified here to support evolution of new * versions of BootState. */ - interface BootState { + private interface BootState { Class getType(); UUID getProxyId(); String getKey(); Modified: branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestAll.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestAll.java 2010-09-21 16:47:18 UTC (rev 3605) +++ branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestAll.java 2010-09-21 18:45:41 UTC (rev 3606) @@ -76,7 +76,7 @@ suite.addTestSuite(TestByteBufferBitVector.class); suite.addTestSuite( TestCSVReader.class ); - + suite.addTestSuite( TestBootStateUtil.class ); return suite; } Added: branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestBootStateUtil.java =================================================================== --- branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestBootStateUtil.java (rev 0) +++ branches/bbb_cleanup/bigdata-core/src/test/java/com/bigdata/util/TestBootStateUtil.java 2010-09-21 18:45:41 UTC (rev 3606) @@ -0,0 +1,354 @@ +package com.bigdata.util; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.io.StreamCorruptedException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +import net.jini.config.Configuration; +import net.jini.config.ConfigurationFile; +import net.jini.config.EmptyConfiguration; +import net.jini.config.ConfigurationException; +import net.jini.config.NoSuchEntryException; +import net.jini.core.lookup.ServiceID; + +import org.apache.log4j.Logger; + +import junit.framework.TestCase; +import junit.framework.TestCase2; + +public class TestBootStateUtil extends TestCase2 { + + public TestBootStateUtil(String name) { + super(name); + } + + public void testBootStateUtilNullConsArgs() throws SecurityException, NoSuchMethodException { + Configuration dummyConfiguration = EmptyConfiguration.INSTANCE; + String dummyString = "dummy"; + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + Logger dummyLogger = Logger.getLogger(dummyClass); + + // Command lines with a null in first three args are invalid + Object [][] badCommandLines = { + {null, null, null, null}, + {null, null, null, dummyLogger}, + {null, null, dummyClass, null}, + {null, null, dummyClass, dummyLogger}, + {null, dummyString, null, null}, + {null, dummyString, null, dummyLogger}, + {null, dummyString, dummyClass, null}, + {null, dummyString, dummyClass, dummyLogger}, + {dummyConfiguration, null, null, null}, + {dummyConfiguration, null, null, dummyLogger}, + {dummyConfiguration, null, dummyClass, null}, + {dummyConfiguration, null, dummyClass, dummyLogger}, + {dummyConfiguration, dummyString, null, null}, + {dummyConfiguration, dummyString, null, dummyLogger}, + }; + + Constructor cons = + BootStateUtil.class.getConstructor(Configuration.class, + String.class, Class.class, Logger.class); + for (int i=0; i < badCommandLines.length; i++) { + try { + cons.newInstance(badCommandLines[i]); + fail("Successfully called constructor with null arg: " + + Arrays.asList(badCommandLines[i])); + } catch (IllegalArgumentException e) { + fail("unexpected exception: " + e.toString()); + } catch (InstantiationException e) { + fail("unexpected exception: " + e.toString()); + } catch (IllegalAccessException e) { + fail("unexpected exception: " + e.toString()); + } catch (InvocationTargetException e) { + if (! (e.getCause() instanceof NullPointerException)){ + fail("unexpected exception: " + e.getCause().toString()); + } + //Otherwise ignore -- expected + } + } + } + + public void testBootStateUtilGoodConsArgs() throws SecurityException, NoSuchMethodException { + Configuration dummyConfiguration = EmptyConfiguration.INSTANCE; + String dummyString = "dummy"; + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + Logger dummyLogger = Logger.getLogger(dummyClass); + + // Command lines with a non-null in first three args are valid + Object [][] goodCommandLines = { + {dummyConfiguration, dummyString, dummyClass, null}, + {dummyConfiguration, dummyString, dummyClass, dummyLogger}, + }; + + testGoodConsArgs(goodCommandLines); + } + + public void testBootStateUtilNoEntries() + throws SecurityException, NoSuchMethodException, IOException, + ConfigurationException, ClassNotFoundException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + Configuration dummyConfiguration = builder.buildConfiguration(); + Logger dummyLogger = Logger.getLogger(className); + + BootStateUtil bsu = + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + + assertTrue(bsu.getProxyId() != null); + assertTrue(bsu.getServiceId() != null); + + } + + public void testBootStateUtilNonExistentPersistenceDir() + throws SecurityException, NoSuchMethodException, IOException, ConfigurationException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + builder.setPersistenceDir(persistenceDir); + Configuration dummyConfiguration = builder.buildConfiguration(); + Logger dummyLogger = Logger.getLogger(className); + + // Command lines with a non-null in first three args are valid + Object [][] goodCommandLines = { + {dummyConfiguration, className, dummyClass, null}, + {dummyConfiguration, className, dummyClass, dummyLogger}, + }; + testGoodConsArgs(goodCommandLines); + + assertTrue(persistenceDir.exists()); + persistenceDir.delete(); + } + + public void testBootStateUtilExistentPersistenceDir() + throws SecurityException, NoSuchMethodException, IOException, ConfigurationException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + assertTrue(persistenceDir.mkdirs()); + persistenceDir.deleteOnExit(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + builder.setPersistenceDir(persistenceDir); + Configuration dummyConfiguration = builder.buildConfiguration(); + Logger dummyLogger = Logger.getLogger(className); + + // Command lines with a non-null in first three args are valid + Object [][] goodCommandLines = { + {dummyConfiguration, className, dummyClass, null}, + {dummyConfiguration, className, dummyClass, dummyLogger}, + }; + testGoodConsArgs(goodCommandLines); + persistenceDir.delete(); + } + + public void testBootStateUtilWithEmptyBootState() + throws SecurityException, NoSuchMethodException, IOException, + ConfigurationException, ClassNotFoundException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + assertTrue(persistenceDir.mkdirs()); + persistenceDir.deleteOnExit(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + builder.setPersistenceDir(persistenceDir); + File bootStateFile = new File(persistenceDir, "boot.state"); + bootStateFile.createNewFile(); + Configuration dummyConfiguration = builder.buildConfiguration(); + + try { + BootStateUtil bsu = + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + fail("Created boot state instance with emtpy state information file"); + } catch (IOException e) { + //ignore -- expected + } + + } + + public void testBootStateUtilWithInvalidBootState() + throws SecurityException, NoSuchMethodException, IOException, + ConfigurationException, ClassNotFoundException + { + //Create boot state dir/file with default information + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + assertTrue(persistenceDir.mkdirs()); + persistenceDir.deleteOnExit(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + builder.setPersistenceDir(persistenceDir); + Configuration dummyConfiguration = builder.buildConfiguration(); + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + // Mangle boot state file by writing junk into the beginning of the file + File bootStateFile = new File(persistenceDir, "boot.state"); + RandomAccessFile raf = new RandomAccessFile(bootStateFile, "rws"); + raf.seek(0); + raf.writeUTF("Bogus data"); + raf.close(); + + //Try to recover from bogus data + try { + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + } catch (StreamCorruptedException e) { + //ignore -- expected + } + } + + public void testBootStateUtilConsWithDefaultServiceId() + throws SecurityException, NoSuchMethodException, IOException, + ConfigurationException, ClassNotFoundException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + ServiceID defaultServiceId = new ServiceID(1L, 2L); + builder.setDefaultServiceId(defaultServiceId); + Configuration dummyConfiguration = builder.buildConfiguration(); + Logger dummyLogger = Logger.getLogger(className); + + BootStateUtil bsu = + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + assertTrue(bsu.getServiceId().equals(defaultServiceId)); + UUID defaultProxyID = + new UUID( + defaultServiceId.getMostSignificantBits(), + defaultServiceId.getLeastSignificantBits()); + assertTrue(bsu.getProxyId().equals(defaultProxyID)); + } + + public void testBootStateUtilDefaultServiceId() + throws SecurityException, NoSuchMethodException, IOException, + ConfigurationException, ClassNotFoundException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + File persistenceDir = File.createTempFile(className, ".tmp"); + persistenceDir.delete(); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + ServiceID defaultServiceID = new ServiceID(1L, 1L); + builder.setDefaultServiceId(defaultServiceID); + Configuration dummyConfiguration = builder.buildConfiguration(); + + BootStateUtil bsu = + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + assertTrue(bsu.getServiceId().equals(defaultServiceID)); + UUID proxyID = bsu.getProxyId(); + assertTrue(proxyID.getLeastSignificantBits() == defaultServiceID.getLeastSignificantBits()); + assertTrue(proxyID.getMostSignificantBits() == defaultServiceID.getMostSignificantBits()); + } + + public void testBootStateUtilBadPersistenceDir() + throws SecurityException, NoSuchMethodException, IOException, ConfigurationException, ClassNotFoundException + { + Class<? extends TestBootStateUtil> dummyClass = this.getClass(); + String className = dummyClass.getName(); + //create temp file -- should fail dir creation, below + File persistenceDir = File.createTempFile(className, ".tmp"); + ConfigurationBuilder builder = new ConfigurationBuilder(); + builder.setComponentName(className); + builder.setPersistenceDir(persistenceDir); + Configuration dummyConfiguration = builder.buildConfiguration(); + + try { + new BootStateUtil(dummyConfiguration, className, dummyClass, null); + fail("Created BootStateUtil with bad dir file: " + + persistenceDir.getAbsolutePath()); + } catch (IOException e) { + //ignore -- expected + } + } + + + private static void testGoodConsArgs(Object[][] goodCommandLines) + throws SecurityException, NoSuchMethodException + { + Constructor<BootStateUtil> cons = + BootStateUtil.class.getConstructor(Configuration.class, + String.class, Class.class, Logger.class); + for (int i=0; i < goodCommandLines.length; i++) { + try { + cons.newInstance(goodCommandLines[i]); + } catch (IllegalArgumentException e) { + fail("unexpected exception: " + e.toString()); + } catch (InstantiationException e) { + fail("unexpected exception: " + e.toString()); + } catch (IllegalAccessException e) { + fail("unexpected exception: " + e.toString()); + } catch (InvocationTargetException e) { + fail("unexpected exception: " + e.getCause().toString()); + } + } + } + + private static class ConfigurationBuilder { + private String componentName = null; + private File persistenceDir = null; + private ServiceID defaultServiceId = null; + + ConfigurationBuilder() { + + } + + void setComponentName(String componentName) { + this.componentName = componentName; + } + + void setPersistenceDir(File persistenceDirFile) { + this.persistenceDir = persistenceDirFile; + } + + void setDefaultServiceId(ServiceID defaultServiceId) { + this.defaultServiceId = defaultServiceId; + } + + Configuration buildConfiguration() throws ConfigurationException { + List<String> args = new ArrayList<String>(); + args.add("-"); + if(persistenceDir != null) { + args.add( + componentName + ".persistenceDirectory=" + + "\"" + persistenceDir.getAbsolutePath().replace("\\", "\\\\") + + "\""); + } + if (defaultServiceId != null){ + args.add( + componentName + ".defaultServiceId=" + + "new net.jini.core.lookup.ServiceID(" + + defaultServiceId.getMostSignificantBits() + + ", " + + defaultServiceId.getLeastSignificantBits() + + ")"); + } + ConfigurationFile dummyConfiguration = + new ConfigurationFile(args.toArray(new String[0])); + return dummyConfiguration; + + } + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-21 16:47:28
|
Revision: 3605 http://bigdata.svn.sourceforge.net/bigdata/?rev=3605&view=rev Author: blevine218 Date: 2010-09-21 16:47:18 +0000 (Tue, 21 Sep 2010) Log Message: ----------- Adding zookeeper suite of tests All tests now run without failures Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/JiniStartSuite.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/config/JiniStartConfigSuite.java Added Paths: ----------- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooBarrier.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooElection.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZooQueue.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZookeeperAccessor.java branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/ZookeeperSuite.java Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-21 15:00:39 UTC (rev 3604) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-21 16:47:18 UTC (rev 3605) @@ -108,7 +108,11 @@ <include>**/Test*Remote.java</include> </includes> <excludes> + <!-- Don't include suites as part of the normal test run --> <exclude>**/*Suite.java</exclude> + + <!-- Don't include anonymous inner classes that happen to match the test pattern --> + <exclude>**/*$*.java</exclude> </excludes> <systemPropertyVariables> Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java 2010-09-21 15:00:39 UTC (rev 3604) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/AbstractFedZooTestCase.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -38,9 +38,8 @@ import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.data.ACL; -import org.junit.Rule; -import org.junit.rules.TestName; + import com.bigdata.jini.start.config.ZookeeperClientConfig; import com.bigdata.jini.start.process.ProcessHelper; import com.bigdata.jini.start.process.ZookeeperProcessHelper; Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/JiniStartSuite.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/JiniStartSuite.java 2010-09-21 15:00:39 UTC (rev 3604) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/JiniStartSuite.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -1,17 +1,4 @@ -/*********************************************************************** - * - * $Id$ - * - * Copyright (c) 2000-2010 Nokia Corporation. - * - * This material, including documentation and any related computer - * programs, is protected by copyright controlled by Nokia Corporation. - * All rights are reserved. Copying, including reproducing, storing, - * adapting or translating, any or all of this material requires the prior - * written consent of Nokia Corporation. This material also contains - * confidential information which may not be disclosed to others without - * the prior written consent of Nokia Corporation. - **********************************************************************/ + package com.bigdata.jini.start; import org.junit.runner.RunWith; Modified: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/config/JiniStartConfigSuite.java =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/config/JiniStartConfigSuite.java 2010-09-21 15:00:39 UTC (rev 3604) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/jini/start/config/JiniStartConfigSuite.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -1,17 +1,4 @@ -/*********************************************************************** - * - * $Id$ - * - * Copyright (c) 2000-2010 Nokia Corporation. - * - * This material, including documentation and any related computer - * programs, is protected by copyright controlled by Nokia Corporation. - * All rights are reserved. Copying, including reproducing, storing, - * adapting or translating, any or all of this material requires the prior - * written consent of Nokia Corporation. This material also contains - * confidential information which may not be disclosed to others without - * the prior written consent of Nokia Corporation. - **********************************************************************/ + package com.bigdata.jini.start.config; import org.junit.runner.RunWith; Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/AbstractZooTestCase.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -0,0 +1,528 @@ +/* + + Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + + Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * Created on Jan 4, 2009 + */ + +package com.bigdata.zookeeper; + +import java.io.File; +import java.io.IOException; +import java.net.BindException; +import java.net.ServerSocket; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +import com.bigdata.DataFinder; +import net.jini.config.Configuration; +import net.jini.config.ConfigurationProvider; + +import org.apache.log4j.Logger; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.KeeperException.NodeExistsException; +import org.apache.zookeeper.KeeperException.SessionExpiredException; +import org.apache.zookeeper.ZooDefs.Ids; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.server.quorum.QuorumPeerMain; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import com.bigdata.jini.start.MockListener; +import com.bigdata.jini.start.config.ZookeeperServerConfiguration; +import com.bigdata.jini.start.process.ProcessHelper; +import com.bigdata.jini.start.process.ZookeeperProcessHelper; +import com.bigdata.jini.util.ConfigMath; +import com.bigdata.resources.ResourceFileFilter; + +/** + * Abstract base class for zookeeper integration tests. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public abstract class AbstractZooTestCase { + + public final static Logger log = Logger.getLogger(AbstractZooTestCase.class); + + /** + * Constructor + */ + public AbstractZooTestCase() { + + } + + /** + * Return an open port on current machine. Try the suggested port first. If + * suggestedPort is zero, just select a random port + */ + protected static int getPort(final int suggestedPort) throws IOException { + + ServerSocket openSocket; + + try { + openSocket = new ServerSocket(suggestedPort); + } catch (BindException ex) { + // the port is busy, so look for a random open port + openSocket = new ServerSocket(0); + } + + final int port = openSocket.getLocalPort(); + + openSocket.close(); + + return port; + } + + /** + * A configuration file used by some of the unit tests in this package. It + * contains a description of the zookeeper server instance in case we need + * to start one. + */ + protected final String configFile = DataFinder.bestURI("testing/data/com/bigdata/zookeeper/testzoo.config").toASCIIString(); + + /** + * Note: The sessionTimeout is computed as 2x the tickTime as read out of + * the configuration file by {@link #setUp()}. This corresponds to the + * actual sessionTimeout for the client rather than a requested value. + */ + int sessionTimeout; + + /** + * The initial {@link ZooKeeper} instance obtained from the + * {@link #zookeeperAccessor} when the test was setup. + * <p> + * Note: Some unit tests use {@link #expireSession(ZooKeeper)} to expire the + * session associated with this {@link ZooKeeper} instance. + * + * @see ZooKeeperAccessor + */ + protected ZooKeeper zookeeper; + + /** + * Factory for {@link ZooKeeper} instances using the configured hosts and + * session timeout. + */ + protected ZooKeeperAccessor zookeeperAccessor; + + /** + * ACL used by the unit tests. + */ + protected final List<ACL> acl = Ids.OPEN_ACL_UNSAFE; + + protected final MockListener listener = new MockListener(); + + private File dataDir = null; + + // the chosen client port. + int clientPort = -1; + + @Before + public void setUp() throws Exception { + + try { + // find ports that are not in use. + clientPort = getPort(2181/* suggestedPort */); + final int peerPort = getPort(2888/* suggestedPort */); + final int leaderPort = getPort(3888/* suggestedPort */); + final String servers = "1=localhost:" + peerPort + ":" + leaderPort; + + // create a temporary file for zookeeper's state. + dataDir = File.createTempFile("test", ".zoo"); + // delete the file so that it can be re-created as a directory. + dataDir.delete(); + // recreate the file as a directory. + dataDir.mkdirs(); + + final String[] args = new String[] { + // The configuration file (overrides follow). + configFile, + // overrides the clientPort to be unique. + QuorumPeerMain.class.getName() + "." + + ZookeeperServerConfiguration.Options.CLIENT_PORT + "=" + + clientPort, + // overrides servers declaration. + QuorumPeerMain.class.getName() + "." + + ZookeeperServerConfiguration.Options.SERVERS + "=\"" + + servers + "\"", + // overrides the dataDir + QuorumPeerMain.class.getName() + "." + + ZookeeperServerConfiguration.Options.DATA_DIR + + "=new java.io.File(" + + ConfigMath.q(dataDir.toString()) + ")"// + }; + + System.err.println("args=" + Arrays.toString(args)); + + final Configuration config = ConfigurationProvider.getInstance(args); + + final int tickTime = (Integer) config.getEntry(QuorumPeerMain.class + .getName(), ZookeeperServerConfiguration.Options.TICK_TIME, + Integer.TYPE); + + /* + * Note: This is the actual session timeout that the zookeeper service + * will impose on the client. + */ + this.sessionTimeout = tickTime * 2; + + // if necessary, start zookeeper (a server instance). + ZookeeperProcessHelper.startZookeeper(config, listener); + + zookeeperAccessor = new ZooKeeperAccessor("localhost:" + clientPort, sessionTimeout); + + zookeeper = zookeeperAccessor.getZookeeper(); + + try { + + /* + * Since all unit tests use children of this node we must make sure + * that it exists. + */ + zookeeper + .create("/test", new byte[] {}, acl, CreateMode.PERSISTENT); + + } catch (NodeExistsException ex) { + + if (log.isInfoEnabled()) + log.info("/test already exits."); + + } + + } catch (Throwable t) { + + // don't leave around the dataDir if the setup fails. + recursiveDelete(dataDir); + + throw new Exception(t); + + } + + } + + @After + public void tearDown() throws Exception { + try { + if (zookeeperAccessor != null) { + zookeeperAccessor.close(); + } + + for (ProcessHelper h : listener.running) { + // destroy zookeeper service iff we started it. + h.kill(true/* immediateShutdown */); + } + + if (dataDir != null) { + // clean out the zookeeper data dir. + recursiveDelete(dataDir); + } + + } catch (Throwable t) { + log.error(t, t); + } + } + + /** + * Return a new {@link Zookeeper} instance that is connected to the same + * zookeeper ensemble but which has a distinct session. + * + * @return + * @throws IOException + * @throws InterruptedException + */ + protected ZooKeeper getDistinctZooKeeperWithDistinctSession() throws IOException, InterruptedException { + + final ZooKeeper zookeeper2 = new ZooKeeper(zookeeperAccessor.hosts, + zookeeperAccessor.sessionTimeout, new Watcher() { + public void process(WatchedEvent e) { + + } + }); + + /* + * Wait until this instance is connected. + */ + final long timeout = TimeUnit.MILLISECONDS.toNanos(1000/* ms */); + + final long begin = System.nanoTime(); + + while (zookeeper2.getState() != ZooKeeper.States.CONNECTED + && zookeeper2.getState().isAlive()) { + + final long elapsed = System.nanoTime() - begin; + + if (elapsed > timeout) { + Assert.fail("ZooKeeper session did not connect? elapsed="+ TimeUnit.NANOSECONDS.toMillis(elapsed)); + } + + if (log.isInfoEnabled()) { + log.info("Awaiting connected."); + } + + Thread.sleep(100/* ms */); + + } + + if (!zookeeper2.getState().isAlive()) { + Assert.fail("Zookeeper died?"); + } + + if(log.isInfoEnabled()) + log.info("Zookeeper connected."); + + return zookeeper2; + + } + + /** + * Return a new {@link ZooKeeper} instance that is connected to the same + * zookeeper ensemble as the given instance and is using the same session + * but is nevertheless a distinct instance. + * <p> + * Note: This is used by some unit tests to force the given + * {@link ZooKeeper} to report a {@link SessionExpiredException} by closing + * the returned instance. + * + * @param zookeeper + * A zookeeper instance. + * + * @return A distinct instance associated with the same session. + * + * @throws IOException + * @throws InterruptedException + */ + protected ZooKeeper getDistinctZooKeeperForSameSession(ZooKeeper zookeeper1) + throws IOException, InterruptedException { + + final ZooKeeper zookeeper2 = new ZooKeeper(zookeeperAccessor.hosts, + zookeeperAccessor.sessionTimeout, new Watcher() { + public void process(WatchedEvent e) { + + } + }, zookeeper1.getSessionId(), zookeeper1.getSessionPasswd()); + + /* + * Wait until this instance is connected. + */ + final long timeout = TimeUnit.MILLISECONDS.toNanos(1000/* ms */); + final long begin = System.nanoTime(); + + while (zookeeper2.getState() != ZooKeeper.States.CONNECTED + && zookeeper2.getState().isAlive()) { + final long elapsed = System.nanoTime() - begin; + + if (elapsed > timeout) { + Assert.fail("ZooKeeper session did not connect? elapsed=" + TimeUnit.NANOSECONDS.toMillis(elapsed)); + } + + if (log.isInfoEnabled()) { + log.info("Awaiting connected."); + } + + Thread.sleep(100/* ms */); + } + + if (!zookeeper2.getState().isAlive()) { + Assert.fail("Zookeeper died?"); + } + + if(log.isInfoEnabled()) + log.info("Zookeeper connected."); + + return zookeeper2; + } + + /** + * Expires the session associated with the {@link Zookeeper} client + * instance. + * + * @param zookeeper + * + * @throws IOException + * @throws InterruptedException + */ + protected void expireSession(ZooKeeper zookeeper) throws IOException,InterruptedException { + + /* + * Obtain a distinct ZooKeeper instance associated with the _same_ + * session. + */ + final ZooKeeper zookeeper2 = getDistinctZooKeeperForSameSession(zookeeper); + + /* + * Close this instance, forcing the original instance to report a + * SessionExpiredException. Note that this is not synchronous so we need + * to wait until the original ZooKeeper instance notices that its + * session is expired. + */ + zookeeper2.close(); + + /* + * Wait up to the session timeout and then wait some more so that the + * events triggered by that timeout have time to propagate. + */ + final long timeout = TimeUnit.MILLISECONDS.toNanos(sessionTimeout * 2); + final long begin = System.nanoTime(); + + while (zookeeper.getState().isAlive()) { + final long elapsed = System.nanoTime() - begin; + + if (elapsed > timeout) { + Assert.fail("ZooKeeper session did not expire? elapsed=" + + TimeUnit.NANOSECONDS.toMillis(elapsed) + + ", sessionTimeout=" + sessionTimeout); + } + + if(log.isInfoEnabled()) { + log.info("Awaiting session expired."); + } + + Thread.sleep(500/* ms */); + } + + if (log.isInfoEnabled()) { + final long elapsed = System.nanoTime() - begin; + + log.info("Session was expired: elapsed=" + + TimeUnit.NANOSECONDS.toMillis(elapsed) + + ", sessionTimeout=" + sessionTimeout); + } + } + + /** + * Class used to test concurrency primitives. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ + abstract protected class ClientThread extends Thread { + + private final Thread main; + protected final ReentrantLock lock; + + /** + * + * @param main + * The thread in which the test is running. + * @param lock + * A lock. + */ + public ClientThread(final Thread main, final ReentrantLock lock) { + + if (main == null) + throw new IllegalArgumentException(); + + if (lock == null) + throw new IllegalArgumentException(); + + this.main = main; + this.lock = lock; + setDaemon(true); + } + + public void run() { + try { + run2(); + } catch (Throwable t) { + + // log error since won't be seen otherwise. + log.error(t.getLocalizedMessage(), t); + + // interrupt the main thread. + main.interrupt(); + } + } + + abstract void run2() throws Exception; + } + + /** + * Recursively removes any files and subdirectories and then removes the + * file (or directory) itself. + * <p> + * Note: Files that are not recognized will be logged by the + * {@link ResourceFileFilter}. + * + * @param f + * A file or directory. + */ + private void recursiveDelete(final File f) { + + if (f.isDirectory()) { + + final File[] children = f.listFiles(); + + if (children == null) { + // The directory does not exist. + return; + } + + for (int i = 0; i < children.length; i++) { + recursiveDelete(children[i]); + } + } + + if(log.isInfoEnabled()) + log.info("Removing: " + f); + + if (f.exists() && !f.delete()) { + log.warn("Could not remove: " + f); + } + } + + /** + * Recursive delete of znodes. + * + * @param zpath + * + * @throws KeeperException + * @throws InterruptedException + */ + protected void destroyZNodes(final ZooKeeper zookeeper, final String zpath) + throws KeeperException, InterruptedException { + + // System.err.println("enter : " + zpath); + + final List<String> children = zookeeper.getChildren(zpath, false); + + for (String child : children) { + destroyZNodes(zookeeper, zpath + "/" + child); + } + + if(log.isInfoEnabled()) + log.info("delete: " + zpath); + + zookeeper.delete(zpath, -1/* version */); + } + + protected String getName() { + return getClass().getSimpleName(); + } +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestHierarchicalZNodeWatcher.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -0,0 +1,317 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jan 12, 2009 + */ + +package com.bigdata.zookeeper; + +import java.util.concurrent.TimeUnit; + +import junit.framework.AssertionFailedError; + +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher.Event; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test suite for {@link HierarchicalZNodeWatcher}. + * <p> + * Note: Zookeeper has other events that could appear during these unit tests, + * such as the connection status change events. However the unit test are not + * expecting such events during testing. If they appear, those events could + * cause test failures when we examine the queue. Basically, the tests are not + * robust if your zookeeper client is flakey. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestHierarchicalZNodeWatcher extends AbstractZooTestCase implements + HierarchicalZNodeWatcherFlags { + + /** + * + */ + public TestHierarchicalZNodeWatcher() { + + } + + + protected String zroot; + + /** + * Sets up a unique {@link #zroot}. + */ + @Before + public void setUp() throws Exception { + super.setUp(); + zroot = zookeeper.create("/test", new byte[0], acl, CreateMode.PERSISTENT_SEQUENTIAL); + } + + /** + * Destroys the {@link #zroot} and its children. + */ + @After + public void tearDown() throws Exception { + if (zroot != null) { + destroyZNodes(zookeeperAccessor.getZookeeper(), zroot); + } + super.tearDown(); + } + + /** + * Test when the node at the root of the hierarchy does not exist when we + * setup the watcher, then create the znode and verify that we see the event + * in the queue. + * + * @throws KeeperException + * @throws InterruptedException + */ + @Test + public void test_noticeCreate() throws KeeperException, + InterruptedException { + + WatchedEvent e; + + final String zroot = this.zroot + "/" + "a"; + final HierarchicalZNodeWatcher watcher = new HierarchicalZNodeWatcher( + zookeeper, zroot, EXISTS) { + + @Override + protected int watch(String path, String child) { + return NONE; + } + }; + + Assert.assertTrue(watcher.queue.isEmpty()); + Assert.assertTrue(watcher.isWatched(zroot)); + + /* + * Create the zroot and verify the event is placed into the queue. + */ + + zookeeper.create(zroot, new byte[0], acl, CreateMode.PERSISTENT); + + // look for the create event. + e= watcher.queue.poll(1000, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(e); + Assert.assertEquals(zroot,e.getPath()); + Assert.assertEquals(Event.EventType.NodeCreated,e.getType()); + Assert.assertTrue(watcher.queue.isEmpty()); + + /* + * Delete the znode and verify the event is placed into the queue. + */ + + zookeeper.delete(zroot, -1/*version*/); + + // look for the delete event. + e = watcher.queue.poll(1000, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(e); + Assert.assertEquals(zroot,e.getPath()); + Assert.assertEquals(Event.EventType.NodeDeleted, e.getType()); + Assert.assertTrue(watcher.queue.isEmpty()); + + /* + * Re-create the zroot and verify the event is placed into the queue + * (this makes sure that we are keeping the watch in place). + */ + + zookeeper.create(zroot, new byte[0], acl, CreateMode.PERSISTENT); + + // look for the create event. + e = watcher.queue.poll(1000, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(e); + Assert.assertEquals(zroot, e.getPath()); + Assert.assertEquals(Event.EventType.NodeCreated, e.getType()); + Assert.assertTrue(watcher.queue.isEmpty()); + + /* + * cancel the watcher and verify that it does not notice a delete of the + * zroot after it was cancelled. + */ + watcher.cancel(); + + /* + * Delete the znode - no event should appear. + */ + + zookeeper.delete(zroot, -1/*version*/); + + // look for the delete event. + e = watcher.queue.poll(1000, TimeUnit.MILLISECONDS); + + Assert.assertNull(e); + + } + + /** + * Unit test verifies that we notice specific children as they are created + * and destroyed. "red" znodes are ignored. if the znode is "blue" then we + * extend the watch over its children as well. + * + * @throws KeeperException + * @throws InterruptedException + * + * @todo test queue when data is changed. + */ + @Test + public void test_noticeChildren() throws InterruptedException, KeeperException { + + WatchedEvent e; + + HierarchicalZNodeWatcher watcher = new HierarchicalZNodeWatcher(zookeeper, zroot, EXISTS | CHILDREN) { + + @Override + protected int watch(String path, String child) { + + if (child.equals("red")) + return NONE; + + if (child.equals("blue")) + return EXISTS | CHILDREN; + + if (child.equals("green")) + return DATA; + + throw new AssertionFailedError("Not expecting: path=" + path + + ", child=" + child); + + } + + }; + + zookeeper.create(zroot + "/" + "red", new byte[0], acl, CreateMode.PERSISTENT); + + e = watcher.queue.poll(1000,TimeUnit.MILLISECONDS); + Assert.assertNotNull(e); + Assert.assertEquals(zroot,e.getPath()); + Assert.assertEquals(Event.EventType.NodeChildrenChanged,e.getType()); + + zookeeper.create(zroot + "/" + "blue", new byte[0], acl, + CreateMode.PERSISTENT); + +// e = watcher.queue.poll(1000,TimeUnit.MILLISECONDS); +// assertNotNull(e); +// assertEquals(zroot+"/"+"red",e.getPath()); +// assertEquals(Event.EventType.NodeCreated,e.getType()); + + zookeeper.create(zroot + "/" + "blue" + "/" + "green", new byte[0], + acl, CreateMode.PERSISTENT); + + Assert.assertEquals(NONE, watcher.getFlags(zroot + "/" + "red")); + Assert.assertEquals(EXISTS | CHILDREN, watcher.getFlags(zroot + "/" + "blue")); + Assert.assertEquals(DATA, watcher.getFlags(zroot + "/" + "blue" + "/" + "green")); + + // clear any events in the queue. + watcher.queue.clear(); + + // update the data. + zookeeper.setData(zroot + "/" + "blue" + "/" + "green", new byte[] { 1 }, -1/* version */); + + // verify event. + e = watcher.queue.poll(1000,TimeUnit.MILLISECONDS); + Assert.assertNotNull(e); + Assert.assertEquals(zroot + "/" + "blue" + "/" + "green",e.getPath()); + Assert.assertEquals(Event.EventType.NodeDataChanged,e.getType()); + + // won't been seen since a "red" path. + zookeeper.create(zroot + "/" + "red" + "/" + "blue", new byte[0], acl, CreateMode.PERSISTENT); + + Assert.assertEquals(NONE, watcher.getFlags(zroot + "/" + "red" + "/" + "blue")); + + /* + * There should be three watched znodes: zroot; zroot/blue; and + * zroot/blue/green + */ + Assert.assertEquals(3,watcher.getWatchedSize()); + + watcher.cancel(); + + Assert.assertEquals(0,watcher.getWatchedSize()); + Assert.assertFalse(watcher.isWatched(zroot)); + Assert.assertFalse(watcher.isWatched(zroot+"/"+"blue")); + Assert.assertFalse(watcher.isWatched(zroot+"/"+"blue"+"/"+"green")); + + Assert.assertTrue(watcher.queue.isEmpty()); + + /* + * Setup a new watcher that wathes all paths but the red ones. The + * znodes already exist. Now verify that we receive various notices when + * the watcher is created. + */ + watcher = new HierarchicalZNodeWatcher(zookeeper, zroot, ALL, true/* pumpMockEventsDuringStartup */) { + + @Override + protected int watch(String path, String child) { + + return ALL; + + } + +// @Override +// protected void addedWatch(String path, int flags) { +// +// placeMockEventInQueue(path, flags); +// +// } + + }; + + /* + * We created 4 znodes plus the pre-existing zroot, so there should be + * five nodes picked up by the new watcher. + */ + final String[] nodes = new String[] { + zroot, + zroot + "/" + "red", + zroot + "/" + "red" + "/" + "blue", + zroot + "/" + "blue", + zroot + "/" + "green" + "/" + "green", + }; + + // verify new watched size. + Assert.assertEquals(nodes.length, watcher.getWatchedSize()); + + /* + * Verify mock events were pumped into the queue. Since we specified + * ALL, there should be three events for each znode. + */ + Assert.assertEquals(3 * 5, watcher.queue.size()); + + while ((e = watcher.queue.poll()) != null) { + System.err.println("mockEvent: "+e); + } + + watcher.cancel(); + } +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestUnknownChildrenWatcher.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -0,0 +1,53 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jan 22, 2009 + */ + +package com.bigdata.zookeeper; + +import org.junit.Test; + +/** + * Test suite for watcher for unknown children (those not previously seen). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + */ +public class TestUnknownChildrenWatcher extends AbstractZooTestCase { + + /** + * + */ + public TestUnknownChildrenWatcher() { + } + + + /** @todo place holder for unit tests. */ + @Test + public void test_nothing() { + + } + +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZLockImpl.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -0,0 +1,626 @@ +/* + + Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + + Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * Created on Jan 7, 2009 + */ + +package com.bigdata.zookeeper; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.KeeperException.NoNodeException; +import org.apache.zookeeper.KeeperException.SessionExpiredException; +import org.junit.Assert; +import org.junit.Test; + +import com.bigdata.util.concurrent.DaemonThreadFactory; + +/** + * Test suite for {@link ZLockImpl}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + * + * @todo do test w/ ensemble where we kill the server to which the client is + * connected and verify that the client transparently reconnects to + * another server and continues to await the lock. + */ +public class TestZLockImpl extends AbstractZooTestCase { + + /** + * + */ + public TestZLockImpl() { + } + + + /** + * Simple lock protocol test. + * + * @todo test w/ timeout. + * + * @throws KeeperException + * @throws InterruptedException + */ + @Test + public void test_lock() throws KeeperException, InterruptedException { + + final Thread mainThread = Thread.currentThread(); + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + try { + /* + * verify no such node (should be unique and therefore not + * preexist). + */ + zookeeper.getChildren(zpath, false); + Assert.fail("zpath exists: " + zpath); + } catch (NoNodeException ex) { + // ignore. + } + + // instances that can contend for the lock. + final ZLockImpl lock1 = ZLockImpl.getLock(zookeeper, zpath, acl); + + // znode not created during ctor (more robust this way). + Assert.assertNull(zookeeper.exists(zpath, false)); + + final ZLockImpl lock2 = ZLockImpl.getLock(zookeeper, zpath, acl); + + // znode not created during ctor (more robust this way). + Assert.assertNull(zookeeper.exists(zpath, false)); + + // obtain the lock. + lock1.lock(); + + log.info("lock1 was granted in main thread"); + + // one child in the queue - the one that holds the lock. + Assert.assertEquals(1, zookeeper.getChildren(zpath, false).size()); + + Assert.assertTrue(lock1.isLockHeld()); + + // run a thread that will contend for the lock. + final Thread t2 = new Thread() { + + public void run() { + + try { + + log.info("Starting 2nd thread."); + + Assert.assertTrue(lock1.isLockHeld()); + + log.info("Should block seeking lock2 in 2nd thread."); + + lock2.lock(); + + log.info("lock2 was granted"); + + // one child in the queue - the one that holds the lock. + Assert.assertEquals(1, zookeeper.getChildren(zpath, false).size()); + + } catch (Throwable t) { + + // log error + log.error(t, t); + + // interrupt the main thread. + mainThread.interrupt(); + + } + } + + }; + + t2.setDaemon(true); + + t2.start(); + + // wait until the other child is also contending for the lock + for (int i = 0; i < 10; i++) { + + final int n = zookeeper.getChildren(zpath, false).size(); + + log.info("nchildren=" + n); + + if (n == 2) + break; + + Thread.sleep(10/* ms */); + + } + + // should be exactly two children in the queue. + Assert.assertEquals(2, zookeeper.getChildren(zpath, false).size()); + + log.info("Will release lock1."); + + // release the lock. + lock1.unlock(); + + log.info("Released lock1."); + + // wait until the other thread gains the lock. + for (int i = 0; i < 10 && !lock2.isLockHeld(); i++) { + + Thread.sleep(10/* ms */); + + } + + log.info("Verifying lock2 is held."); + + // verify lock is held. + Assert.assertTrue(lock2.isLockHeld()); + + log.info("Verifying queue contains only lock2."); + + // verify one child in the queue. + Assert.assertEquals(1, zookeeper.getChildren(zpath, false).size()); + + log.info("Releasing lock2 from main thread."); + + // release the lock. + lock2.unlock(); + + log.info("Verifying queue is empty."); + + // queue is empty. + Assert.assertEquals(0, zookeeper.getChildren(zpath, false).size()); + + log.info("Test done."); + + } + + /** + * Unit test explores behavior when someone stomps on the zchild while a + * lock is held and another lock is in the queue (note that you can not + * delete the parent without deleting the children in zookeeper, so you will + * always see a queue purged of children before the queue node itself is + * deleted). + * + * @throws InterruptedException + * @throws KeeperException + */ + @Test + public void test_breakLock() throws KeeperException, InterruptedException { + + final Thread mainThread = Thread.currentThread(); + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + try { + /* + * verify no such node (should be unique and therefore not + * preexist). + */ + zookeeper.getChildren(zpath, false); + Assert.fail("zpath exists: " + zpath); + } catch (NoNodeException ex) { + // ignore. + } + + // instances that can contend for the lock. + final ZLockImpl lock1 = ZLockImpl.getLock(zookeeper, zpath, acl); + + // znode not created during ctor (more robust this way). + Assert.assertNull(zookeeper.exists(zpath, false)); + + final ZLockImpl lock2 = ZLockImpl.getLock(zookeeper, zpath, acl); + + // znode not created during ctor (more robust this way). + Assert.assertNull(zookeeper.exists(zpath, false)); + + // obtain the lock. + lock1.lock(); + + log.info("lock1 was granted"); + + // one child in the queue - the one that holds the lock. + Assert.assertEquals(1, zookeeper.getChildren(zpath, false).size()); + + Assert.assertTrue(lock1.isLockHeld()); + + // run a thread that will contend for the lock. + final Thread t2 = new Thread() { + + public void run() { + + try { + + Assert.assertTrue(lock1.isLockHeld()); + + lock2.lock(); + + log.info("lock2 granted."); + + } catch (Throwable t) { + + // log error + log.error(t, t); + + // interrupt the main thread. + mainThread.interrupt(); + + } + + } + + }; + + t2.setDaemon(true); + + t2.start(); + + // wait until the other child is also contending for the lock + for (int i = 0; i < 10 + && zookeeper.getChildren(zpath, false).size() != 2; i++) { + + Thread.sleep(10/* ms */); + + } + + // should be exactly two children in the queue. + Assert.assertEquals(2, zookeeper.getChildren(zpath, false).size()); + + // break the lock. + { + final String z = zpath + "/" + + ((ZLockImpl) lock1).getLockRequestZNode(); + log.info("breaking lock: deleting " + z); + zookeeper.delete(z, -1/* version */); + log.info("broke lock: deleted " + z); + } + + Assert.assertTrue(!lock1.isLockHeld()); + + Assert.assertTrue(lock2.isLockHeld()); + + log.info("lock1.unlock() - begin"); + + lock1.unlock(); + + log.info("lock1.unlock() - done"); + + Assert.assertFalse(lock1.isLockHeld()); + + } + + /** + * Unit test verifies that a {@link Thread} holding a {@link ZLock} may NOT + * acquire it again. + * + * @throws InterruptedException + * @throws KeeperException + */ + @Test + public void test_notReentrant() throws KeeperException, + InterruptedException { + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper, zpath, acl); + + zlock.lock(); + + Assert.assertTrue(zlock.isLockHeld()); + + try { + zlock.lock(500, TimeUnit.MILLISECONDS); + Assert.fail("Expecting: " + TimeoutException.class); + } catch (TimeoutException ex) { + log.info("Expected exception: " + ex); + } + + // Assert.assertTrue(zlock.isLockHeld()); + // + // zlock.unlock(); + // + // Assert.assertTrue(zlock.isLockHeld()); + // + // zlock.unlock(); + // + // Assert.assertFalse(zlock.isLockHeld()); + + } + + /** + * Unit test where the session is expired before the lock is requested. + * lock() should throw out the {@link SessionExpiredException}. We then + * verify that we can obtain a new {@link ZooKeeper} instance associated + * with a new session and request and obtain the zlock. + * + * @throws IOException + * @throws InterruptedException + * @throws KeeperException + */ + @Test + public void test_sessionExpiredBeforeLockRequest() throws IOException, + KeeperException, InterruptedException { + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + expireSession(zookeeper); + + { + + // obtain a lock object. + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper, zpath, acl); + + try { + + zlock.lock(); + + Assert.fail("Expecting: " + SessionExpiredException.class); + + } catch (SessionExpiredException ex) { + + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + + } + + } + + // get a new instance associated with a new session. + zookeeper = zookeeperAccessor.getZookeeper(); + + // obtain a lock object. + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper, zpath, acl); + zlock.lock(); + try { + + } finally { + zlock.unlock(); + } + + } + + /** + * Unit test where the session is expired while the caller is holding the + * lock. The test verifies that isLockHeld() throws a + * {@link SessionExpiredException}. + * + * @throws IOException + * @throws InterruptedException + * @throws KeeperException + */ + @Test + public void test_sessionExpiredWhileHoldingLock() throws IOException, + KeeperException, InterruptedException { + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + // obtain a lock object. + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper, zpath, acl); + zlock.lock(); + try { + + Assert.assertTrue(zlock.isLockHeld()); + + expireSession(zookeeper); + + try { + + zlock.isLockHeld(); + + Assert.fail("Expecting: " + SessionExpiredException.class); + + } catch (SessionExpiredException ex) { + + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + + } + + } finally { + /* + * Note: This also verifies that NO exception is thrown here even + * though the session has been expired. This is done to avoid have + * the expired session problem appear to arise from unlock() when it + * matters more that people see if when testing to verify that they + * hold the lock. + */ + zlock.unlock(); + } + + } + + /** + * Unit test for destroying a lock which is actively contended by other + * processes. + * + * @throws InterruptedException + * @throws KeeperException + * @throws ExecutionException + */ + @Test + public void test_destroyLock() throws KeeperException, + InterruptedException, ExecutionException { + + // a node that is guarenteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + final int ntasks = 4; + + final ExecutorService service = Executors.newFixedThreadPool(ntasks, + DaemonThreadFactory.defaultThreadFactory()); + + final LinkedList<Callable<Void>> tasks = new LinkedList<Callable<Void>>(); + + for (int i = 0; i < ntasks; i++) { + + tasks.add(new Callable<Void>() { + + /** + * Contends for the zlock. + * <p> + * Note: Task uses a distinct ZooKeeper having a distinct + * session. + */ + public Void call() throws Exception { + + final ZooKeeper zookeeper2 = getDistinctZooKeeperWithDistinctSession(); + + // obtain a lock object. + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper2, + zpath, acl); + + zlock.lock(); + try { + + Assert.fail("Should not have obtained the lock."); + + } finally { + + zlock.unlock(); + + } + + return null; + + } + + }); + + } + + final List<Future<Void>> futures = new LinkedList<Future<Void>>(); + try { + + // obtain a lock object. + final ZLockImpl zlock = ZLockImpl.getLock(zookeeper, zpath, acl); + + zlock.lock(); + try { + + // verify that the main thread holds the zlock. + Assert.assertTrue(zlock.isLockHeld()); + + // start the other tasks. they will contend for the same zlock. + for (Callable<Void> task : tasks) { + + futures.add(service.submit(task)); + + } + + // wait until everyone is contending for the lock. + int queueSize; + while ((queueSize = zlock.getQueue().length) < ntasks + 1) { + + if (log.isInfoEnabled()) + log.info("Waiting for other processes: queueSize=" + + queueSize); + + Thread.sleep(100/* ms */); + + } + + if (log.isInfoEnabled()) + log.info("Main thread will now destroy the lock."); + + zlock.destroyLock(); + + // verify lock no longer held. + Assert.assertFalse(zlock.isLockHeld()); + + } finally { + + // note: should quitely succeed if the lock was destroyed. + zlock.unlock(); + + } + + } finally { + + service.shutdownNow(); + + } + + // verify all tasks started. + Assert.assertEquals(ntasks, futures.size()); + + // check their futures. + for (Future<Void> f : futures) { + + try { + + f.get(); + + } catch (ExecutionException ex) { + + final Throwable cause = ex.getCause(); + + if (cause != null && cause instanceof InterruptedException) { + + /* + * When the lock znode is destroyed, the other processes + * contending for the zlock will notice in their + * ZLockWatcher. The ZLockWatcher will be set its + * [cancelled] flag and an InterruptedException will be + * thrown out of lock(). + */ + + if (log.isInfoEnabled()) { + + log.info("Ignoring expected exception: " + cause); + + } + + continue; + + } + + /* + * Rethrow the execption. + * + * Note: If any of the tasks gains the lock, then it will throw + * an AssertionFailedError. + */ + + throw ex; + + } + + } + } +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeCreatedWatcher.java 2010-09-21 16:47:18 UTC (rev 3605) @@ -0,0 +1,113 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Jan 7, 2009 + */ + +package com.bigdata.zookeeper; + +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooDefs.Ids; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test suite for {@link ZNodeCreatedWatcher}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id$ + * + * @todo do test where we kill and then restart the server while awaiting the + * event and verify that we reconnect to the server and continue to await + * the event. + * + * @todo do test w/ ensemble where we kill the server to which the client is + * connected and verify that reconnect to another server and continue to + * await the event. + */ +public class TestZNodeCreatedWatcher extends AbstractZooTestCase { + + /** + * + */ + public TestZNodeCreatedWatcher() { + } + + + /** + * Verify that we can detect the create of a znode. + * + * @throws KeeperException + * @throws InterruptedException + */ + @Test + public void test_awaitCreate() throws KeeperException, InterruptedException { + + // a node that is guaranteed to be unique w/in the test namespace. + final String zpath = "/test/" + getName() + UUID.randomUUID(); + + final Thread mainThread = Thread.currentThread(); + + final Thread t = new Thread() { + + public void run() { + + try { + + Thread.sleep(100/*ms*/); + + zookeeper.create(zpath, new byte[0], Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + + } catch (Throwable t) { + + // log error + log.error(t, t); + + // interrupt the main thread. + mainThread.interrupt(); + + } + } + + }; + + t.setDaemon(true); + + t.start(); + + ZNodeCreatedWatcher.awaitCreate(zookeeper, zpath, 250, + TimeUnit.MILLISECONDS); + +// ZNodeCreatedWatcher.awaitCreate(zookeeper, zpath, 250, +// TimeUnit.MILLISECONDS); + + // verify znode was created. + Assert.assertNotNull(zookeeper.exists(zpath, false)); + } +} Copied: branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java (from rev 3580, branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java) =================================================================== --- branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDeletedWatcher.java (rev 0) +++ branches/maven_scaleout/bigdata-integ/src/test/java/com/bigdata/zookeeper/TestZNodeDele... [truncated message content] |
From: <ble...@us...> - 2010-09-21 15:00:46
|
Revision: 3604 http://bigdata.svn.sourceforge.net/bigdata/?rev=3604&view=rev Author: blevine218 Date: 2010-09-21 15:00:39 +0000 (Tue, 21 Sep 2010) Log Message: ----------- added surefire reporting Modified Paths: -------------- branches/maven_scaleout/bigdata-integ/pom.xml Modified: branches/maven_scaleout/bigdata-integ/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-21 14:28:02 UTC (rev 3603) +++ branches/maven_scaleout/bigdata-integ/pom.xml 2010-09-21 15:00:39 UTC (rev 3604) @@ -220,4 +220,29 @@ </dependency> </dependencies> + <reporting> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-report-plugin</artifactId> + <version>2.6</version> + <reportSets> + <reportSet> + <id>integration-tests</id> + <reports> + <report>report-only</report> + </reports> + <configuration> + <outputName>failsafe-report</outputName> + <reportsDirectories> + <reportsDirectory>${project.build.directory}/failsafe-reports</reportsDirectory> + </reportsDirectories> + </configuration> + </reportSet> + </reportSets> + </plugin> + </plugins> + </reporting> + + </project> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-21 14:28:08
|
Revision: 3603 http://bigdata.svn.sourceforge.net/bigdata/?rev=3603&view=rev Author: blevine218 Date: 2010-09-21 14:28:02 +0000 (Tue, 21 Sep 2010) Log Message: ----------- move checkstyle and findbugs out of parent POM and into bigdata-core POM so that bigdata-integ project does not inherit these configs. Modified Paths: -------------- branches/maven_scaleout/pom.xml Modified: branches/maven_scaleout/pom.xml =================================================================== --- branches/maven_scaleout/pom.xml 2010-09-21 14:27:27 UTC (rev 3602) +++ branches/maven_scaleout/pom.xml 2010-09-21 14:28:02 UTC (rev 3603) @@ -102,28 +102,11 @@ <reporting> <plugins> - <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-report-plugin</artifactId> <version>2.5</version> </plugin> - - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-checkstyle-plugin</artifactId> - <version>2.5</version> - <configuration> - <configLocation>${basedir}/src/main/config/checkstyle.xml</configLocation> - </configuration> - </plugin> - - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>findbugs-maven-plugin</artifactId> - <version>2.3</version> - </plugin> - </plugins> </reporting> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ble...@us...> - 2010-09-21 14:27:33
|
Revision: 3602 http://bigdata.svn.sourceforge.net/bigdata/?rev=3602&view=rev Author: blevine218 Date: 2010-09-21 14:27:27 +0000 (Tue, 21 Sep 2010) Log Message: ----------- move checkstyle and findbugs out of parent POM and into bigdata-core POM so that bigdata-integ project does not inherit these configs. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/pom.xml Modified: branches/maven_scaleout/bigdata-core/pom.xml =================================================================== --- branches/maven_scaleout/bigdata-core/pom.xml 2010-09-20 23:46:12 UTC (rev 3601) +++ branches/maven_scaleout/bigdata-core/pom.xml 2010-09-21 14:27:27 UTC (rev 3602) @@ -1,5 +1,4 @@ -<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.bigdata</groupId> @@ -28,10 +27,8 @@ <configuration> <compilerArguments> <!-- - Apparently Javac may compile java source files inside jars put on the classpath. Weird. - Zookeeper 3.2.1 jar contained classes and sources, and under some circumstances, - the java files were getting recompiled and put into the bigdata jar. This setting - forces javac to only look for source in the current maven source directory. + Apparently Javac may compile java source files inside jars put on the classpath. Weird. Zookeeper 3.2.1 jar contained classes and sources, and under some circumstances, the + java files were getting recompiled and put into the bigdata jar. This setting forces javac to only look for source in the current maven source directory. --> <sourcepath>${project.build.sourceDirectory}</sourcepath> </compilerArguments> @@ -89,12 +86,12 @@ <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <configuration> - <!-- TODO: BLECH! All the tests are excluded from the regular unit test phase. - TODO: Need to split out the unit tests and the heavier integration tests, plus - TODO: get all the unit tests passing so regressions can properly fail the build. + <!-- + TODO: BLECH! All the tests are excluded from the regular unit test phase. TODO: Need to split out the unit tests and the heavier integration tests, plus TODO: get all the unit + tests passing so regressions can properly fail the build. --> <testFailureIgnore>true</testFailureIgnore> - <includes/> + <includes /> <excludes> <exclude>**/*</exclude> </excludes> @@ -102,8 +99,8 @@ </plugin> <plugin> - <!-- These are where the heavier tests can be run. Right now failsafe looks for tests starting or ending - with IT, aka FooIT.java or ITFoo.java, which don't exist yet, so nothing runs. + <!-- + These are where the heavier tests can be run. Right now failsafe looks for tests starting or ending with IT, aka FooIT.java or ITFoo.java, which don't exist yet, so nothing runs. --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-failsafe-plugin</artifactId> @@ -201,14 +198,14 @@ <dependencies> - + <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> <!-- ************************ Start of non-public dependencies ************************ --> <!-- TODO: look at maven-bundle-plugin from felix to provide osgi support. bndlib version 0.0.357 in central. --> - + <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>cweb-extser</artifactId> @@ -230,7 +227,7 @@ <groupId>${thirdParty.groupId}</groupId> <!-- TODO: An older version (5.0.9) is available in central. --> <artifactId>unimi-fastutil</artifactId> <version>5.1.5</version> - </dependency> + </dependency> <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>lgpl-utils</artifactId> @@ -246,22 +243,22 @@ <artifactId>dsi-utils</artifactId> <version>1.0.6-020610</version> </dependency> - <dependency> + <dependency> <groupId>${thirdParty.groupId}</groupId> <artifactId>high-scale-lib</artifactId> <version>1.1.2</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> + <groupId>${thirdParty.groupId}</groupId> <artifactId>iris</artifactId> <version>0.58</version> </dependency> <dependency> - <groupId>${thirdParty.groupId}</groupId> + <groupId>${thirdParty.groupId}</groupId> <artifactId>nxparser</artifactId> <version>6-22-2010</version> </dependency> - + <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>zookeeper</artifactId> @@ -312,7 +309,7 @@ <dependency> <groupId>org.apache.river</groupId> <artifactId>browser</artifactId> - <version>2.1</version> + <version>2.1</version> </dependency> <!-- Note that these are dl jars, so they are provided and have a dl classifier. --> @@ -397,10 +394,9 @@ <groupId>log4j</groupId> <artifactId>log4j</artifactId> <version>1.2.15</version> - <!-- These exclusions are to address the fact that 1.2.15 added new features that depends on Sun specific - jars, but these jars cannot be made available due to Sun's click-through requirement on them. - We aren't using the new features anyway, so they are safe to exclude. log4j should have made these - optional in their POM. + <!-- + These exclusions are to address the fact that 1.2.15 added new features that depends on Sun specific jars, but these jars cannot be made available due to Sun's click-through + requirement on them. We aren't using the new features anyway, so they are safe to exclude. log4j should have made these optional in their POM. --> <exclusions> <exclusion> @@ -551,10 +547,37 @@ <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>3.1</version> - </dependency> - + </dependency> + </dependencies> + <reporting> + <plugins> + + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-surefire-report-plugin</artifactId> + <version>2.5</version> + </plugin> + + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-checkstyle-plugin</artifactId> + <version>2.5</version> + <configuration> + <configLocation>${basedir}/src/main/config/checkstyle.xml</configLocation> + </configuration> + </plugin> + + <plugin> + <groupId>org.codehaus.mojo</groupId> + <artifactId>findbugs-maven-plugin</artifactId> + <version>2.3</version> + </plugin> + + </plugins> + </reporting> + <profiles> <profile> <id>bigdata-clover</id> @@ -579,7 +602,7 @@ </configuration> </plugin> </plugins> - </build> + </build> </profile> </profiles> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <sgo...@us...> - 2010-09-20 23:46:21
|
Revision: 3601 http://bigdata.svn.sourceforge.net/bigdata/?rev=3601&view=rev Author: sgossard Date: 2010-09-20 23:46:12 +0000 (Mon, 20 Sep 2010) Log Message: ----------- [maven_scaleout] : Broke all direct dependency cycles with package 'com.bigdata'. Major change involves most calls to com.bigdata.LRUNexus and moving it into the 'com.bigdata.cache' package. This class was a singleton used for accessing a global cache, and was disabled by default. Future caching work should use the decorator pattern to avoid static knowledge of a cache. Modified Paths: -------------- branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/build.properties branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties branches/maven_scaleout/bigdata-core/bigdata-perf/btc/build.properties branches/maven_scaleout/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/build.properties branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/build.properties branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/Banner.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexMetadata.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentBuilder.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/IndexSegmentStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/Node.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/BCHMGlobalLRU.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/BCHMGlobalLRU2.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRU.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRURecycler.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/HardReferenceGlobalLRURecyclerExplicitDeleteRequired.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/IGlobalLRU.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/StoreAndAddressLRUCache.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/WeakReferenceGlobalLRU.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/counters/AbstractStatisticsCollector.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/AbstractJournal.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/TemporaryRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/journal/WORMStrategy.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/AbstractRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/IRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rawstore/SimpleMemoryRawStore.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/rdf/sail/bench/NanoSparqlServer.java branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/resources/StoreManager.java branches/maven_scaleout/bigdata-core/src/test/deploy/testing/data/com/bigdata/cache/StressTestGlobalLRU.xml branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/AbstractIndexSegmentTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestAll_IndexSegment.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithCompactingMerge.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithIncrementalBuild.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithLargeTrees.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderWithSmallTree.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentWithBloomFilter.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestNullValues.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/StressTestGlobalLRU.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithStripedLocks.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithStripedLocksAndLIRS.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithThreadLocalBuffers.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRU2WithThreadLocalBuffersAndLIRS.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/cache/TestBCHMGlobalLRUWithLIRS.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/AbstractRestartSafeTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/journal/TestAbort.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rawstore/SimpleFileRawStore.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/rdf/store/AbstractTestCase.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/service/AbstractEmbeddedFederationTestCase.java Added Paths: ----------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/cache/LRUNexus.java Removed Paths: ------------- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java branches/maven_scaleout/bigdata-core/src/test/java/com/bigdata/btree/TestIndexSegmentBuilderCacheInteraction.java Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/build.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/build.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/build.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -182,13 +182,13 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # -## -Dcom.bigdata.LRUNexus.percentHeap=.1 +## -Dcom.bigdata.cache.LRUNexus.percentHeap=.1 # all jvm args for query. queryJvmArgs=-server -Xmx${bsbm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/bsbm/src/resources/logging/log4j.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/btc/build.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/btc/build.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/btc/build.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -61,11 +61,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/btc/src/resources/logging/log4j.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/build.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/build.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/lubm/build.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -129,11 +129,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${lubm.maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=log4j.properties Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/build.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/build.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/build.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -60,11 +60,11 @@ # The record cache (empty for the default cache). #cache= -cache=-Dcom.bigdata.LRUNexus.enabled=false -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.LRUNexus.threadLocalBuffers=true -#cache=-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.LRUNexus.limitingCapacity=2000000 -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache -#-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler +cache=-Dcom.bigdata.cache.LRUNexus.enabled=false +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU2 -Dcom.bigdata.cache.LRUNexus.threadLocalBuffers=true +#cache=-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.BCHMGlobalLRU -Dcom.bigdata.cache.LRUNexus.accessPolicy=LIRS -Dcom.bigdata.cache.LRUNexus.limitingCapacity=2000000 +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.StoreAndAddressLRUCache +#-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecycler # all jvm args for query. queryJvmArgs=-server -Xmx${maxMem} ${gcopts} ${gcdebug} ${profiler} ${cache} -Dlog4j.configuration=file:src/resources/logging/log4j.properties Modified: branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties =================================================================== --- branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/bigdata-perf/uniprot/src/resources/logging/log4j.properties 2010-09-20 23:46:12 UTC (rev 3601) @@ -13,7 +13,7 @@ # Note: logging here at INFO or DEBUG will significantly impact throughput! #log4j.logger.com.bigdata=INFO -log4j.logger.com.bigdata.LRUNexus=INFO +log4j.logger.com.bigdata.cache.LRUNexus=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataSail=INFO #log4j.logger.com.bigdata.rdf.sail.BigdataEvaluationStrategyImpl2=INFO Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster.config 2010-09-20 23:46:12 UTC (rev 3601) @@ -629,15 +629,15 @@ "jini", "org.apache.zookeeper.server.quorum.QuorumPeerMain", -//BTM "com.bigdata.service.jini.TransactionServer", -//BTM "com.bigdata.service.jini.MetadataServer", + "com.bigdata.service.jini.TransactionServer", + "com.bigdata.service.jini.MetadataServer", "com.bigdata.service.jini.DataServer", -//BTM "com.bigdata.service.jini.LoadBalancerServer", + "com.bigdata.service.jini.LoadBalancerServer", "com.bigdata.service.jini.ClientServer", -"com.bigdata.transaction.ServiceImpl", -"com.bigdata.metadata.ServiceImpl", -"com.bigdata.loadbalancer.ServiceImpl" +//BTM "com.bigdata.transaction.ServiceImpl", +//BTM "com.bigdata.metadata.ServiceImpl", +//BTM "com.bigdata.loadbalancer.ServiceImpl" }; Modified: branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/deploy/var/config/jini/bigdataCluster16.config 2010-09-20 23:46:12 UTC (rev 3601) @@ -322,15 +322,15 @@ /* * Override the LRU buffer capacity. * - * See com.bigdata.LRUNexus.Options for configuration info. Note that if + * See com.bigdata.cache.LRUNexus.Options for configuration info. Note that if * you disable the LRUNexus you will loose the leaf cache for the index * segments, which is a big penalty. - //"-Dcom.bigdata.LRUNexus.enabled=false", + //"-Dcom.bigdata.cache.LRUNexus.enabled=false", // option may be used to select the higher throughput impl. - "-Dcom.bigdata.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired", + "-Dcom.bigdata.cache.LRUNexus.class=com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired", // option may be used to allocate more heap to the LRUNexus. - "-Dcom.bigdata.LRUNexus.percentHeap=.2", - "-Dcom.bigdata.LRUNexus.indexSegmentBuildPopulatesCache=true", // default true + "-Dcom.bigdata.cache.LRUNexus.percentHeap=.2", + "-Dcom.bigdata.cache.LRUNexus.indexSegmentBuildPopulatesCache=true", // default true */ }; Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/Banner.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/Banner.java 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/Banner.java 2010-09-20 23:46:12 UTC (rev 3601) @@ -31,12 +31,12 @@ import java.lang.reflect.Method; import java.util.Date; +import com.bigdata.util.config.ConfigDeployUtil; +import com.bigdata.util.config.NicUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.system.SystemUtil; -import com.bigdata.counters.AbstractStatisticsCollector; - /** * Class has a static method which writes a copyright banner on stdout once per * JVM. This method is invoked from several core classes in order to ensure that @@ -48,6 +48,17 @@ public class Banner { private static boolean didBanner; + private static final String HOSTNAME; + static { + String val; + try { + val = NicUtil.getIpAddress("default.nic", ConfigDeployUtil.getString("node.serviceNetwork"), false); + } catch(Throwable t) {//for now, maintain same failure logic as used previously + t.printStackTrace(); + val = NicUtil.getIpAddressByLocalHost(); + } + HOSTNAME = val; + } /** * Environment variables understood by the {@link Banner} class. @@ -153,7 +164,7 @@ "\n"+// "\nCopyright SYSTAP, LLC 2006-2010. All rights reserved."+// "\n"+// - "\n"+AbstractStatisticsCollector.fullyQualifiedHostName+// + "\n"+HOSTNAME+// "\n"+new Date()+// "\n"+SystemUtil.operatingSystem() + "/" + SystemUtil.osVersion() + " " + SystemUtil.architecture() + // Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/BigdataStatics.java 2010-09-20 23:46:12 UTC (rev 3601) @@ -27,7 +27,6 @@ package com.bigdata; -import com.bigdata.jini.start.process.ProcessHelper; /** * A class for those few statics that it makes sense to reference from other @@ -50,9 +49,9 @@ * {@link System#out} when that child process is executed. This makes it * easy to track down why a child process dies during service start. If you * want to see more output from the child process, then you should set the - * log level for the {@link ProcessHelper} class to INFO. + * log level for the {@link com.bigdata.jini.start.process.ProcessHelper} class to INFO. * - * @see ProcessHelper + * @see com.bigdata.jini.start.process.ProcessHelper */ public static int echoProcessStartupLineCount = 20; Deleted: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/LRUNexus.java 2010-09-20 23:46:12 UTC (rev 3601) @@ -1,950 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -/* - * Created on Sep 8, 2009 - */ - -package com.bigdata; - -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryPoolMXBean; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.Properties; - -import org.apache.log4j.Logger; - -import com.bigdata.io.BytesUtil; -import com.bigdata.btree.IndexMetadata; -import com.bigdata.btree.IndexSegment; -import com.bigdata.btree.IndexSegmentBuilder; -import com.bigdata.cache.BCHMGlobalLRU; -import com.bigdata.cache.BCHMGlobalLRU2; -import com.bigdata.cache.HardReferenceGlobalLRU; -import com.bigdata.cache.HardReferenceGlobalLRURecycler; -import com.bigdata.cache.HardReferenceGlobalLRURecyclerExplicitDeleteRequired; -import com.bigdata.cache.IGlobalLRU; -import com.bigdata.cache.WeakReferenceGlobalLRU; -import com.bigdata.cache.IGlobalLRU.ILRUCache; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IBufferStrategy; -import com.bigdata.journal.TemporaryRawStore; -import com.bigdata.rawstore.AbstractRawStore; -import com.bigdata.rawstore.Bytes; -import com.bigdata.rawstore.IAddressManager; -import com.bigdata.rawstore.IRawStore; -import com.bigdata.rawstore.WormAddressManager; - -/** - * Static singleton factory used to configure the record level cache behavior - * for bigdata within the current JVM. The configuration is specified using - * system properties defined by {@link Options}. - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ - * - * FIXME LRUNexus : writes MUST must be "isolated" until the commit. - * Isolated indices MUST have their own cache backed by the shared LRU - * (actually, they are on the shared temporary store so that helps). - * Unisolated indices SHOULD have their own cache backed by the shared - * LRU. At commit, any records in the "isolated" cache for a B+Tree - * should be putAll() onto the unisolated cache for the backing store. - * This way, we do not need to do anything if there is an abort(). - * <p> - * There are two quick fixes: (1) Disable the Global LRU; and (2) - * discard the cache if there is an abort on a store. The latter is - * pretty easy since we only have one store with abort semantics, which - * is the {@link AbstractJournal}, so that is how this is being handled - * right now by {@link AbstractJournal#abort()}. - * <p> - * An optimization would essentially isolate the writes on the cache - * per BTree or between commits. At the commit point, the written - * records would be migrated into the "committed" cache for the store. - * The caller would read on the uncommitted cache, which would read - * through to the "committed" cache. This would prevent incorrect reads - * without requiring us to throw away valid records in the cache. This - * could be a significant performance gain if aborts are common on a - * machine with a lot of RAM. - * - * @todo Test w/ G1 <code>-XX:+UnlockExperimentalVMOptions -XX:+UseG1GC</code> - * <p> - * G1 appears faster for query, but somewhat slower for load. This is - * probably related to the increased memory demand during load (more of - * the data winds up buffered). G1 might work for both use cases with a - * smaller portion of the heap given over to buffers. - * <p> - * G1 can also trip a crash, at least during load. There is a Sun incident - * ID# 1609804 for this. - * - * @todo Look into the memory pool threshold notification mechanism. See - * {@link ManagementFactory#getMemoryPoolMXBeans()} and - * {@link MemoryPoolMXBean}. TonyP suggests that tracking the old - * generation occupancy may be a better metric (more stable). The tricky - * part is to identify which pool(s?) correspond(s) to the old generation. - * Once that is done, the idea is to set a notification threshold using - * {@link MemoryPoolMXBean#setUsageThreshold(long)} and to only clear - * references from the tail of the global LRU when we have exceeded that - * threshold. Reading the javadoc, it seems that threshold notification - * would probably come after a (full) GC. The goal would have to be - * something like reducing the bytesInMemory to some percentage of its - * value at threshold notification (e.g., 80%). Since we can't directly - * control that and the feedback from the JVM is only at full GC - * intervals, we need to simply discard some percentage of the references - * from the tail of the global LRU. We could actually adjust the desired - * #of references on the LRU if that metric appears to be relatively - * stable. However, note that the average #of bytes per reference and the - * average #of instances of a reference on the LRU are not necessarily - * stable values. We could also examine the recordCount (total cache size - * across all caches). If weak references are cleared on an ongoing basis - * rather than during the full GC mark phase, then that will be very close - * to the real hard reference count. - * - * @todo Does it make sense to both buffer the index segment nodes region and - * buffer the nodes and leaves? [buffering the nodes region is an option.] - * - * @todo Note that a r/w store will require an approach in which addresses are - * PURGED from the store's cache during the commit protocol. That might be - * handled at the tx layer. - * - * @todo Better ergonomics! Perhaps keep some minimum amount for the JVM and - * then set a trigger on the GC time and if it crosses 5-10% of the CPU - * time for the application, then reduce the maximum bytes allowed for the - * global LRU buffer. - * - * @see Options - */ -public class LRUNexus { - - protected static final transient Logger log = Logger - .getLogger(LRUNexus.class); - - /** - * These options are MUST BE specified as <em>ENVIRONMENT</em> variables on - * the command line when you start the JVM. The options control the - * existence of and behavior of the {@link LRUNexus#INSTANCE}. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - * @version $Id$ - */ - public static interface Options { - - /** - * Option may be used to (en|dis)able the {@link LRUNexus#INSTANCE} - * (default {@value #DEFAULT_ENABLED}). - */ - String ENABLED = LRUNexus.class.getName() + ".enabled"; - - /** - * Note: The {@link LRUNexus} is now disabled by default. With the - * removal of the synchronization bottlenecks in the low levels of the - * RW and WORM Journal modes, it is now more efficient to NOT use the - * embedded concurrent non-blocking caches and to pass through the - * burden of cache operations to the OS. Also, due to the large number - * of records retained by the embedded cache and the length of time that - * those records are retained, the embedded concurrent non-blocking - * caches have proven to be a challenge to the garbage collector. If - * enabled, it is advisable to use the - * <code>-XX:+UseParallelOldGC</code> GC option in order to avoid JVM - * hangs or crashes. - */ - String DEFAULT_ENABLED = "false"; - - /** - * The maximum heap capacity as a percentage of the JVM heap expressed - * as a value in <code>[0.0:1.0]</code>. This is used IFF - * {@link #MAX_HEAP} is not specified or is ZERO (0), which is its - * default value. If both options are zero, then the maximum heap is - * understood to be zero and the {@link LRUNexus#INSTANCE} will be - * disabled. - */ - String PERCENT_HEAP = LRUNexus.class.getName() + ".percentHeap"; - - /** - * The default ({@value #DEFAULT_PERCENT_HEAP}) is a bit conservative. - * It is designed to leave some room for application data objects and - * GC. You may be able to get away with significantly more on machines - * with large RAM, or just specify the buffer heap size directly using - * {@link #MAX_HEAP}. - */ - String DEFAULT_PERCENT_HEAP = ".1"; - - /** - * This option overrides {@link #PERCENT_HEAP} and directly specifies - * the maximum capacity of the {@link LRUNexus#INSTANCE} in bytes. If - * both options are zero, then the maximum heap is understood to be zero - * and the {@link LRUNexus#INSTANCE} will be disabled. Legal examples - * include: - * - * <pre> - * 30000000 - * 400m - * 2Gb - * </pre> - * - * @see BytesUtil#getByteCount(String) - */ - String MAX_HEAP = LRUNexus.class.getName() + ".maxHeap"; - - String DEFAULT_MAX_HEAP = "0"; - - /** - * The percent of the maximum bytes which the LRU may buffer to be - * cleared from the LRU when evicting the LRU entry (default - * {@value #DEFAULT_PERCENT_CLEARED}). This parameter provides some - * "batching" of evictions but is not used by all {@link IGlobalLRU} - * implementations. - */ - String PERCENT_CLEARED = LRUNexus.class.getName() + ".percentCleared"; - - String DEFAULT_PERCENT_CLEARED = ".01"; - - /** - * The name of {@link IGlobalLRU} implementation class. - * - * @see #DEFAULT_CLASS - */ - String CLASS = LRUNexus.class.getName() + ".class"; - - /** - * The default {@link IGlobalLRU} implementation class ( - * {@value #DEFAULT_CLASS}). - * - * FIXME The {@link HardReferenceGlobalLRURecycler} has less throughput - * than the {@link HardReferenceGlobalLRU} but I want to test the - * {@link HardReferenceGlobalLRU} more throughly on high throughput - * cluster data loads to make sure that it is performing correctly. - * <p> - * Note: It is also possible that the {@link HardReferenceGlobalLRU} - * causes problems with the tenured generation since the Entry instances - * are always new, but they could last quite a while before eviction - * from the LRU position if there is a large heap. - * <p> - * Scale-out should use the - * {@link HardReferenceGlobalLRURecyclerExplicitDeleteRequired} to avoid - * giving away the cached index segment records when an index segment - * store is closed by a timeout. - * <p> - * For scale-up, the {@link HardReferenceGlobalLRURecycler} is fine. We - * are not re-opening stores all the time so the weak value reference - * semantics of that class do not cause a problem. - * <p> - * The recently written {@link BCHMGlobalLRU2} implementation should be - * ideal for both scale-out and scale-up once it has been tested more - * throughly. Even better would be a LIRS access policy for that class. - */ - String DEFAULT_CLASS = HardReferenceGlobalLRURecycler.class.getName(); -// String DEFAULT_CLASS = BCHMGlobalLRU2.class.getName(); - - /** - * The load factor for the cache instances. - */ - String LOAD_FACTOR = LRUNexus.class.getName() + ".loadFactor"; - - String DEFAULT_LOAD_FACTOR = ".75"; - - /** - * The concurrency level for the backing hash map(s). This property is - * not understood by all implementations. A value of ZERO (0) is - * interpreted in a special manner by {@link BCHMGlobalLRU2}. - */ - String CONCURRENCY_LEVEL = LRUNexus.class.getName() - + ".concurrencyLevel"; - - String DEFAULT_CONCURRENCY_LEVEL = "16"; - - /** - * When <code>true</code> the cache will use true <em>per-thread</em> - * buffers to absorb access policy updates. When <code>false</code>, the - * cache will use striped locks protecting a fixed array of buffers. - * This property is not understood by all implementations. - * */ - String THREAD_LOCAL_BUFFERS = LRUNexus.class.getName() - + ".threadLocalBuffers"; - - String DEFAULT_THREAD_LOCAL_BUFFERS = "false"; - - /** - * The initial capacity for the cache instances. - */ - String INITIAL_CAPACITY = LRUNexus.class.getName() + ".initialCapacity"; - - String DEFAULT_INITIAL_CAPACITY = "16"; - - /** - * The limiting cache capacity across all cache instances. - * - * @see #DEFAULT_LIMITING_CAPACITY - * - * @deprecated At the moment, this option is only understood by the - * {@link BCHMGlobalLRU}. The option may or may not survive - * as that feature is alpha. - */ - String LIMITING_CAPACITY = LRUNexus.class.getName()+".limitingCapacity"; - - String DEFAULT_LIMITING_CAPACITY = "" + (2 * Bytes.megabyte); - - /** - * The capacity of the thread-local buffer used to amortize the cost of - * updating the access policy. This option is only understood by select - * {@link IGlobalLRU} implementations. - */ - String THREAD_LOCAL_BUFFER_CAPACITY = LRUNexus.class.getName() - + ".threadLocalBufferCapacity"; - - String DEFAULT_THREAD_LOCAL_BUFFER_CAPACITY = "128"; - - /** - * The access policy (LIRS, LRU, etc). At the moment, this option is - * only understood by the {@link BCHMGlobalLRU}. - * - * @see #DEFAULT_ACCESS_POLICY_ENUM - */ - String ACCESS_POLICY = LRUNexus.class.getName() + ".accessPolicy"; - - String DEFAULT_ACCESS_POLICY = AccessPolicyEnum.LRU.toString(); - - /** - * The minimum #of per-{@link IRawStore} cache instances that will be - * retained by hard references when using an {@link IGlobalLRU} based on - * a weak value hash map such as {@link WeakReferenceGlobalLRU}. This - * controls the size of a hard reference ring buffer backing a weak - * value hash map. The actual number of cache instances will be less if - * fewer stores have been opened or if open stores have been - * {@link IRawStore#deleteResources() destroyed}. More cache instances - * will exist if there are hard references to more {@link IRawStore} - * instances. - */ - String MIN_CACHE_SET_SIZE = LRUNexus.class.getName()+".minCacheSetSize"; - - String DEFAULT_MIN_CACHE_SET_SIZE = "5"; - - /** - * When <code>true</code>, the {@link IndexSegmentBuilder} will - * pre-populate the {@link IGlobalLRU} cache with the nodes and leaves - * of the new index segment during the build or merge operation (default - * {@value #DEFAULT_INDEX_SEGMENT_BUILD_POPULATES_CACHE}). - */ - String INDEX_SEGMENT_BUILD_POPULATES_CACHE = LRUNexus.class.getName() - + ".indexSegmentBuildPopulatesCache"; - - /** - * FIXME Verify that [true] is the right behavior for scale-out. This is - * being tested in combination with the - * {@link HardReferenceGlobalLRURecyclerExplicitDeleteRequired}. - */ - String DEFAULT_INDEX_SEGMENT_BUILD_POPULATES_CACHE = "true"; - - } - - - /** - * Global instance. - * <p> - * Note: A <a href="http://bugs.sun.com/view_bug.do?bug_id=6880903">Sun G1 - * bug in JDK 1.6.0_16</a> provides a false estimate of the available - * memory. - * - * @see Options - */ - public static final IGlobalLRU<Long, Object> INSTANCE; - - /** - * The access policy. Not all {@link IGlobalLRU} implementations support - * multiple access policies. Check the specific implementation to see which - * policies it supports. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - * @version $Id$ - */ - public static enum AccessPolicyEnum { - - /** - * Least-recently used access policy. - */ - LRU, - - /** - * An access policy designed to avoid displacement of frequently used - * cache entries by scans of infrequently used items. - * - * @see <a - * href="http://portal.acm.org/citation.cfm?doid=511334.511340">LIRS: - * an efficient low inter-reference recency set replacement policy - * to improve buffer cache performance</a> and <a - * href="http://www.ece.eng.wayne.edu/~sjiang/Projects/LIRS/sig02.ppt" - * >LIRS : An Efficient Replacement Policy to Improve Buffer Cache - * Performance.</a> - */ - LIRS; - - private AccessPolicyEnum() { - } - - } - - /** - * A class which reflects the configuration {@link Options}. - * - * @author <a href="mailto:tho...@us...">Bryan - * Thompson</a> - * @version $Id$ - */ - public static class CacheSettings { - - /** - * @see Options#ENABLED - */ - public final boolean enabled; - - /** - * @see Options#INDEX_SEGMENT_BUILD_POPULATES_CACHE - */ - public final boolean indexSegmentBuildPopulatesCache; - - /** - * The {@link IGlobalLRU} implementation class. - * - * @see Options#CLASS - */ - public final Class<? extends IGlobalLRU> cls; - - /** - * The load factor for the backing hash map(s). - * - * @see Options#LOAD_FACTOR - */ - public final float loadFactor; - - /** - * The concurrency level for the backing hash map(s). This property is - * not understood by all implementations. - * - * @see Options#CONCURRENCY_LEVEL - */ - public final int concurrencyLevel; - - /** - * When <code>true</code> the cache will use true <em>per-thread</em> - * buffers to absorb access policy updates. When <code>false</code>, the - * cache will use striped locks protecting a fixed array of buffers. - * This property is not understood by all implementations. - * - * @see Options#THREAD_LOCAL_BUFFERS - */ - public final boolean threadLocalBuffers; - - /** - * The initial capacity for the backing {@link ILRUCache} hash map for - * each {@link IRawStore}. - * - * @see Options#INITIAL_CAPACITY - */ - public final int initialCacheCapacity; - - /** - * The limiting cache capacity across all cache instances. - * - * @see Options#LIMITING_CAPACITY - */ - public final int limitingCacheCapacity; - - /** - * The percentage of the JVM heap to use for bigdata buffers. - * - * @see Options#PERCENT_HEAP - */ - public final float percentHeap; - - /** - * The maximum heap size in bytes (optional). - * - * @see Options#MAX_HEAP - */ - public final long maxHeap; - - /** - * The maximum bytesInMemory to retain across the caches. This is - * computed based on {@link #percentHeap} and {@link #maxHeap} and - * represents the actual limit which will be imposed on the shared LRU. - * - * @see Options#MAX_HEAP - * @see Options#PERCENT_HEAP - */ - public final long maximumBytesInMemory; - - /** - * @see Options#PERCENT_CLEARED - */ - public final double percentCleared; - - /** - * {@link #percentCleared} TIMES {@link #maximumBytesInMemory}. - */ - public final long minCleared; - - /** - * The minimum #of caches to keep open for an {@link IGlobalLRU} based - * on a weak value hash map. - * - * @see Options#MIN_CACHE_SET_SIZE - */ - public final int minCacheSetSize; - - /** - * @see WeakReferenceGlobalLRU - */ - public final int queueCapacity; - - /** - * @see WeakReferenceGlobalLRU - */ - public final int nscan; - - /** - * The capacity of the thread-local buffer used to amortize the cost of - * updating the access policy. This option is only recognized by some - * {@link IGlobalLRU} implementations. - * - * @see BCHMGlobalLRU2 - * - * @see Options#THREAD_LOCAL_BUFFER_CAPACITY - */ - public final int threadLocalBufferCapacity; - - /** - * The access policy algorithm (LRU, LIRS, etc). - * - * @see Options#ACCESS_POLICY - */ - public final AccessPolicyEnum accessPolicy; - - /** - * Parses the {@link Options} found in the caller's {@link Properties} - * to populate the fields of this {@link CacheSettings} object. - * - * @param properties - * The properties. - * @throws ClassNotFoundException - */ - public CacheSettings(final Properties properties) - throws ClassNotFoundException { - - if (properties == null) - throw new IllegalArgumentException(); - - enabled = Boolean.valueOf(properties.getProperty( - Options.ENABLED, Options.DEFAULT_ENABLED)); - - indexSegmentBuildPopulatesCache = Boolean.valueOf(properties.getProperty( - Options.INDEX_SEGMENT_BUILD_POPULATES_CACHE, - Options.DEFAULT_INDEX_SEGMENT_BUILD_POPULATES_CACHE)); - - cls = (Class<? extends IGlobalLRU>) LRUNexus.class - .forName(properties.getProperty(Options.CLASS, - Options.DEFAULT_CLASS)); - - final boolean validClass = IGlobalLRU.class.isAssignableFrom(cls); - - if (!validClass) { - - throw new RuntimeException("Class does not implement " - + IGlobalLRU.class.getName() + " : class=" + cls); - - } - - loadFactor = Float.valueOf(properties.getProperty( - Options.LOAD_FACTOR, Options.DEFAULT_LOAD_FACTOR)); - - concurrencyLevel = Integer.valueOf(properties.getProperty( - Options.CONCURRENCY_LEVEL, Options.DEFAULT_CONCURRENCY_LEVEL)); - - threadLocalBuffers = Boolean.valueOf(properties.getProperty( - Options.THREAD_LOCAL_BUFFERS, - Options.DEFAULT_THREAD_LOCAL_BUFFERS)); - - initialCacheCapacity = Integer.valueOf(System - .getProperty(Options.INITIAL_CAPACITY, - Options.DEFAULT_INITIAL_CAPACITY)); - - limitingCacheCapacity = Integer.valueOf(System - .getProperty(Options.LIMITING_CAPACITY, - Options.DEFAULT_LIMITING_CAPACITY)); - - threadLocalBufferCapacity = Integer.valueOf(properties.getProperty( - Options.THREAD_LOCAL_BUFFER_CAPACITY, - Options.DEFAULT_THREAD_LOCAL_BUFFER_CAPACITY)); - - accessPolicy = AccessPolicyEnum.valueOf(properties.getProperty( - Options.ACCESS_POLICY, Options.DEFAULT_ACCESS_POLICY)); - - percentHeap = Float.valueOf(properties.getProperty( - Options.PERCENT_HEAP, Options.DEFAULT_PERCENT_HEAP)); - - if (percentHeap < 0f || percentHeap > 1f) { - - throw new IllegalArgumentException(Options.PERCENT_HEAP - + " : must be in [0:1]."); - - } - - maxHeap = BytesUtil.getByteCount(properties.getProperty( - Options.MAX_HEAP, Options.DEFAULT_MAX_HEAP)); - - if (maxHeap < 0) - throw new IllegalArgumentException(Options.MAX_HEAP - + "=" - + properties.getProperty(Options.MAX_HEAP, - Options.DEFAULT_MAX_HEAP)); - - if (maxHeap == 0 && percentHeap != 0f) { - // compute based on the percentage of the heap. - maximumBytesInMemory = (long) (Runtime.getRuntime() - .maxMemory() * percentHeap); - } else if (maxHeap != 0) { - // directly given. - maximumBytesInMemory = maxHeap; - } else { - // disabled. - maximumBytesInMemory = 0L; - } - - percentCleared = Double.valueOf(properties.getProperty( - Options.PERCENT_CLEARED, Options.DEFAULT_PERCENT_CLEARED)); - - if (percentCleared < 0f || percentCleared > 1f) { - - throw new IllegalArgumentException(Options.PERCENT_CLEARED - + " : must be in [0:1]."); - - } - - minCleared = (long) (percentCleared * maximumBytesInMemory); - - minCacheSetSize = Integer.valueOf(properties.getProperty( - Options.MIN_CACHE_SET_SIZE, - Options.DEFAULT_MIN_CACHE_SET_SIZE)); - - /* - * Note: Values below this point are specific to the - * WeakReferenceGlobalLRU. - */ - - /* - * Estimate of the average record size. - * - * Note: 1024 is not a bad value for a WORM journal, but 4096 or - * 8192 are better values for the RW store and the index segment - * files. - */ - // The average record size. - final int baseAverageRecordSize = 1024; - - final int averageRecordSize = (int) (baseAverageRecordSize * (Integer - .valueOf(IndexMetadata.Options.DEFAULT_BTREE_BRANCHING_FACTOR) / 32.)); - - /* - * The target capacity for that expected record size. - * - * Note: This parameter can get you into trouble with too much GC if - * too much gets buffered on the queue (this is the reasons this LRU - * implementation is not recommended!) - * - * 4x may be a bit aggressive. Try 3x. - * - * TestTripleStoreLoadRateLocal: 4x yields 38s GC time with 1G heap. - * - * TestTripleStoreLoadRateLocal: 3x yields 36s GC time with 1G heap. - */ - final long maximumQueueCapacityEstimate = maximumBytesInMemory - / averageRecordSize * 2; - - if (BigdataStatics.debug) - System.err.println(// - "averageRecordSize=" - + averageRecordSize// - + ", maximumQueueCapacityEstimate=" - + maximumQueueCapacityEstimate// - ); - - if (true) { - - queueCapacity = (int) Math.min(Integer.MAX_VALUE, - maximumQueueCapacityEstimate); - - } else if (maximumBytesInMemory < Bytes.gigabyte * 2) { - - // capacity is no more than X - queueCapacity = (int) Math.min( - maximumQueueCapacityEstimate, 200000/* - * 200k - */); - - } else { - - // capacity is no more than Y - queueCapacity = (int) Math.min( - maximumQueueCapacityEstimate, 1000000/* - * 1M - */); - - } - - nscan = 20; -// Integer.valueOf(properties.getProperty(Options.NSCAN, -// Options.DEFAULT_NSCAN)); - - } - - /** - * Create a new {@link IGlobalLRU} instance from the - * {@link CacheSettings}. The {@link IGlobalLRU} MUST define a public - * constructor with the following method signature. - * - * <pre> - * public FooGlobalLRU(CacheSettings) - * </pre> - * - * @return The new instance -or- <code>null</code> if the cache is - * disabled. - * - * @throws NoSuchMethodException - * @throws SecurityException - * @throws InvocationTargetException - * @throws IllegalAccessException - * @throws InstantiationException - * @throws IllegalArgumentException - * @throws UnsupportedOperationException - * if something is not supported.... - * - * @todo Instead of returning <code>null</code> if the cache is not - * enabled or if something goes wrong we could return a - * NOPGlobalLRU. That could simplify conditional logic. The - * implementation would have to support per-store caches but would - * not retain any records in those caches. - */ - public IGlobalLRU<Long, Object> newInstance() throws SecurityException, - NoSuchMethodException, IllegalArgumentException, - InstantiationException, IllegalAccessException, - InvocationTargetException { - - if (enabled) { - - if (maximumBytesInMemory > 0) { - - final Constructor<?> ctor = cls - .getConstructor(new Class[] { CacheSettings.class }); - - return (IGlobalLRU<Long, Object>) ctor - .newInstance(new Object[] { this }); - - } - - } - - // Not enabled. - return null; - - } - - public String toString() { - - return super.toString() - + "{"// - + "maxPercent=" + percentHeap// - + ", maxHeap=" + maxHeap// - + ", maximumBytesInMemory=" + maximumBytesInMemory// - + ", percentCleared=" + percentCleared// - + ", minCleared=" + minCleared// - + ", maxMemory=" + Runtime.getRuntime().maxMemory()// - + ", loadFactor=" + loadFactor// - + ", concurrencyLevel=" + concurrencyLevel// - + ", threadLocalBuffers=" + threadLocalBuffers// - + ", threadLocalBufferCapacity=" + threadLocalBufferCapacity// - + ", initialCacheCapacity=" + initialCacheCapacity// - + ", limitingCacheCapacity=" + limitingCacheCapacity// - + ", minCacheSetSize=" + minCacheSetSize// - + ", queueCapacity=" + queueCapacity// - + ", nscan=" + nscan// - + ", cls=" + cls.getName()// - + ", indexSegmentBuildPopulatesCache=" + indexSegmentBuildPopulatesCache + // - "}"; - - } - - } - - /** - * The configuration in use. - */ - private static final CacheSettings settings; - - static { - - IGlobalLRU<Long, Object> tmp = null; - CacheSettings s = null; - - try { - - // parse the options. - s = new CacheSettings(System.getProperties()); - - if (BigdataStatics.debug || log.isInfoEnabled()) { - - final String msg = s.enabled ? s.toString() - : "LRUNexus is disabled"; - - if (BigdataStatics.debug) - System.err.println(msg); - - if (log.isInfoEnabled()) - log.info(msg); - - } - - // create the cache object. - tmp = s.newInstance(); - - } catch (Throwable t) { - - log.error("LRUNexus disabled", t); - - } finally { - - // Note: MAY be null. - INSTANCE = tmp; - - // Note: MAY be null. - settings = s; - - } - - } - - /** - * Return <code>true</code> if the {@link IndexSegmentBuilder} will populate - * the {@link IGlobalLRU} with records for the new {@link IndexSegment} - * during the build. - * - * @see Options#INDEX_SEGMENT_BUILD_POPULATES_CACHE - */ - public static final boolean getIndexSegmentBuildPopulatesCache() { - - return settings != null && settings.indexSegmentBuildPopulatesCache; - - } - - /** - * Factory returns the {@link ILRUCache} for the store iff the - * {@link LRUNexus} is enabled. - * - * @param store - * The store. - * - * @return The cache for that store if the {@link LRUNexus} is enabled and - * otherwise <code>null</code>. - * - * @throws IllegalArgumentException - * if the store is <code>null</code>. - */ - public static ILRUCache<Long, Object> getCache(final IRawStore store) { - - if (store == null) - throw new IllegalArgumentException(); - - if (INSTANCE == null) - return null; - - final IAddressManager am; - - if (store instanceof AbstractJournal) { - - /* - * This avoids hard reference to the journal (it winds up using a - * clone of the address manager instead). - */ - - am = ((IBufferStrategy) ((AbstractJournal) store) - .getBufferStrategy()).getAddressManager(); - - } else if (store instanceof TemporaryRawStore) { - - /* - * This avoids using a hard reference to the temporary store (it - * basically clones the address manager instead). - */ - - am = new WormAddressManager(((TemporaryRawStore) store) - .getOffsetBits()); - - } else if (store instanceof AbstractRawStore) { - - /* - * Note: this covers the IndexSegmentStore. - */ - am = ((AbstractRawStore) store).getAddressManager(); - - } else { - - // @todo which cases come though here? SimpleMemoryStore, - // SimpleFileStore, - am = null; - - } - - if (am instanceof IRawStore) { - - /* - * This would cause the IRawStore to be retained by a hard - * reference! - */ - - throw new AssertionError(am.getClass().getName() + " implements " - + IRawStore.class.getName()); - - } - - return INSTANCE.getCache(store.getUUID(), am); - - } - - /** - * Command line utility may be used to confirm the environment settings. - * - * @param args - * Ignored. All parameters are specified either in the - * environment or using JVM - * <code>-Dcom.bigdata.LRUNexus.foo=bar</code> arguments on the - * command line. - * - * @throws ClassNotFoundException - */ - public static void main(String[] args) throws ClassNotFoundException { - - System.out.println(new CacheSettings(System.getProperties()).toString()); - - } - -} Modified: branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java =================================================================== --- branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-20 21:37:58 UTC (rev 3600) +++ branches/maven_scaleout/bigdata-core/src/main/java/com/bigdata/btree/AbstractBTree.java 2010-09-20 23:46:12 UTC (rev 3601) @@ -47,7 +47,6 @@ import com.bigdata.Banner; import com.bigdata.BigdataStatics; -import com.bigdata.LRUNexus; import com.bigdata.btree.AbstractBTreeTupleCursor.MutableBTreeTupleCursor; import com.bigdata.btree.AbstractBTreeTupleCursor.ReadOnlyBTreeTupleCursor; import com.bigdata.btree.IndexMetadata.Options; @@ -244,12 +243,6 @@ final protected IRawStore store; /** - * Optional cache for {@link INodeData} and {@link ILeafData} instances and - * always <code>null</code> if the B+Tree is transient. - */ - protected final ILRUCache<Long, Object> storeCache; - - /** * The branching factor for the btree. */ final protected int branchingFactor; @@ -645,12 +638,6 @@ * held). This means that lookup in a map is not required for top-down * navigation. * <p> - * The {@link LRUNexus} provides an {@link INodeData} / {@link ILeafData} - * data record cache based on a hash map with lookup by the address of the - * node or leaf. This is tested when the child {@link WeakReference} was - * never set or has been cleared. This cache is also used by the - * {@link IndexSegment} for the linked-leaf traversal pattern, which does - * not use top-down navigation. * * @todo consider a policy that dynamically adjusts the queue capacities * based on the height of the btree in order to maintain a cache that @@ -952,11 +939,7 @@ * the child nodes and the parents are connected using hard links * rather than weak references. */ - - this.storeCache = null; -// this.globalLRU = null; - // this.readRetentionQueue = null; } else { @@ -972,16 +955,6 @@ * support concurrent read operations. The INodeData or ILeafData * will be wrapped as a Node or Leaf by the owning B+Tree instance. */ - - ... [truncated message content] |
From: <ble...@us...> - 2010-09-20 21:38:05
|
Revision: 3600 http://bigdata.svn.sourceforge.net/bigdata/?rev=3600&view=rev Author: blevine218 Date: 2010-09-20 21:37:58 +0000 (Mon, 20 Sep 2010) Log Message: ----------- Don't set federation.name system property when running Junit tests Modified Paths: -------------- branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml Modified: branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml =================================================================== --- branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml 2010-09-20 20:14:28 UTC (rev 3599) +++ branches/maven_scaleout/bigdata-core/src/test/deploy/testing/test.xml 2010-09-20 21:37:58 UTC (rev 3600) @@ -216,10 +216,12 @@ <sysproperty key="log4j.path" value="${log4j.configuration}" /> <sysproperty key="default.nic" value="${default.nic}" /> <!-- Jini group name --> + <!-- <sysproperty key="federation.name" value="${federation.name}" /> + --> + <sysproperty key="java.class.path" value="${junit.classpath.text}" /> - <sysproperty key="classserver.jar" value="${deploy.lib}/classserver.jar" /> <sysproperty key="colt.jar" value="${deploy.lib}/colt.jar" /> <sysproperty key="ctc_utils.jar" value="${deploy.lib}/ctc_utils.jar" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |