This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <jer...@us...> - 2013-11-20 22:41:42
|
Revision: 7576 http://bigdata.svn.sourceforge.net/bigdata/?rev=7576&view=rev Author: jeremy_carroll Date: 2013-11-20 22:41:35 +0000 (Wed, 20 Nov 2013) Log Message: ----------- some mods to avoid exposing db so much in optimizer after code review of 773 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -220,17 +220,17 @@ } -// @Override - public boolean isReorderable(ITripleStore db) { + @Override + public boolean isReorderable() { - final long estCard = getEstimatedCardinality(null, db); + final long estCard = getEstimatedCardinality(null); return estCard >= 0 && estCard < Long.MAX_VALUE; } -// @Override - public long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db) { + @Override + public long getEstimatedCardinality(StaticOptimizer opt) { final JoinGroupNode group = subgroup(); @@ -256,7 +256,10 @@ zeroMatchAdjustment = 1; break; case 2: - zeroMatchAdjustment = db.getURICount() + db.getBNodeCount(); // this is too big when we are looking in a reduced dataset + zeroMatchAdjustment = Long.MAX_VALUE / 2; + // The following is more accurate, but more expensive and unnecessary. + // db.getURICount() + db.getBNodeCount(); + System.err.println("adj: "+zeroMatchAdjustment); break; } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -44,7 +44,7 @@ * by examining the type - individual instances of a particular type * may or may not be reorderable. */ - boolean isReorderable(ITripleStore db); + boolean isReorderable(); /** * Return the estimated cardinality - either the range count of a @@ -52,6 +52,6 @@ * group. * @param opt This optimizer can be used to help work out the estimate */ - long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db); + long getEstimatedCardinality(StaticOptimizer opt); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -349,12 +349,12 @@ } - public List<IReorderableNode> getReorderableChildren(ITripleStore db) { + public List<IReorderableNode> getReorderableChildren() { final List<IReorderableNode> nodes = getChildren(IReorderableNode.class); final Iterator<IReorderableNode> it = nodes.iterator(); while (it.hasNext()) { final IReorderableNode node = it.next(); - if (!node.isReorderable(db)) { + if (!node.isReorderable()) { it.remove(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -636,7 +636,7 @@ * @see com.bigdata.rdf.sparql.ast.IReorderableNode#isReorderable() */ @Override - public boolean isReorderable(ITripleStore db) { + public boolean isReorderable() { return !isOptional(); @@ -646,7 +646,7 @@ * @see com.bigdata.rdf.sparql.ast.IReorderableNode#getEstimatedCardinality() */ @Override - public long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db) { + public long getEstimatedCardinality(StaticOptimizer opt) { return getProperty(AST2BOpBase.Annotations.ESTIMATED_CARDINALITY, -1l); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -472,9 +472,5 @@ return set; } - - public ITripleStore getDB() { - return evaluationContext.getAbstractTripleStore(); - } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -93,22 +93,22 @@ @Override - public long getEstimatedCardinality(StaticOptimizer optimizer, ITripleStore db) { + public long getEstimatedCardinality(StaticOptimizer optimizer) { long cardinality = 0; for (JoinGroupNode child : this) { - StaticOptimizer opt = new StaticOptimizer(optimizer, child.getReorderableChildren(db)); + StaticOptimizer opt = new StaticOptimizer(optimizer, child.getReorderableChildren()); cardinality += opt.getCardinality(); } return cardinality; } @Override - public boolean isReorderable(ITripleStore db) { + public boolean isReorderable() { for (JoinGroupNode child : this) { for (IGroupMemberNode grandchild : child) { if (! (grandchild instanceof IReorderableNode)) return false; - if (! ((IReorderableNode)grandchild).isReorderable(db)) + if (! ((IReorderableNode)grandchild).isReorderable()) return false; } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -466,7 +466,7 @@ /* * Let the optimizer handle the simple optionals too. */ - final List<IReorderableNode> nodes = joinGroup.getReorderableChildren(ctx.getAbstractTripleStore()); + final List<IReorderableNode> nodes = joinGroup.getReorderableChildren(); if (!nodes.isEmpty()) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -512,7 +512,7 @@ if (rangeCount[tailIndex] == -1L) { - final long rangeCount = (long) nodes.get(tailIndex).getEstimatedCardinality(this, sa.getDB()); + final long rangeCount = (long) nodes.get(tailIndex).getEstimatedCardinality(this); this.rangeCount[tailIndex] = rangeCount; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java 2013-11-20 14:34:33 UTC (rev 7575) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java 2013-11-20 22:41:35 UTC (rev 7576) @@ -28,6 +28,7 @@ import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode; +import com.bigdata.rdf.sparql.ast.GroupMemberNodeBase; import com.bigdata.rdf.sparql.ast.StatementPatternNode; import com.bigdata.rdf.sparql.ast.VarNode; import com.bigdata.rdf.store.AbstractTripleStore; @@ -53,13 +54,15 @@ public NotNestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym, boolean switchOrdering) { String pattern = "c" + sym; - given = select( varNode(z), + StatementPatternNode spn1 = statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431); + StatementPatternNode spn2 = statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054); + given = select( varNode(z), where ( joinGroupNode(propertyPathNode(varNode(x),pattern, constantNode(b))), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + spn1, propertyPathNode(varNode(x),pattern, varNode(z)), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + spn2 ) ); varCount = 0; @@ -74,16 +77,12 @@ } else { alpp1 = alpp1(zero_or_one_to_one_or_more); alpp2 = alpp2(zero_or_one_to_one_or_more); - } + final GroupMemberNodeBase<?> gmn[] = alpp1.lowerBound() == 0 + ? new GroupMemberNodeBase[]{alpp1, spn1, alpp2, spn2} + : new GroupMemberNodeBase[]{alpp1, alpp2, spn2, spn1}; - expected = select( varNode(z), - where ( - alpp1, - alpp2, - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431) - ) ); + expected = select( varNode(z), where ( gmn ) ); varCount = 0; } @@ -99,30 +98,33 @@ private class NestedHelper extends Helper { public NestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym) { - String pattern = "c" + sym; + String pattern = "d" + sym; - given = select( varNode(z), + StatementPatternNode spn1 = statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431); + StatementPatternNode spn2 = statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054); + given = select( varNode(z), where ( joinGroupNode(propertyPathNode(varNode(x),pattern, constantNode(b))), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + spn1, joinGroupNode(propertyPathNode(varNode(x),pattern, varNode(z))), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + spn2 ) ); varCount = 0; - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431) - ) ); + ArbitraryLengthPathNode alpp1 = arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(d), rightVar(), 26) + ) ); + ArbitraryLengthPathNode alpp2 = arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(d), rightVar(), 3135) + ) ); + + final GroupMemberNodeBase<?> gmn[] = alpp1.lowerBound() == 0 + ? new GroupMemberNodeBase[]{alpp1, spn1, alpp2, spn2} + : new GroupMemberNodeBase[]{alpp1, alpp2, spn2, spn1}; + + expected = select( varNode(z), where ( gmn ) ); varCount = 0; } @@ -195,7 +197,7 @@ joinGroupNode( arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + statementPatternNode(leftVar(), constantNode(d), rightVar(), 26) ) ) ), @@ -203,7 +205,7 @@ joinGroupNode( arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + statementPatternNode(leftVar(), constantNode(d), rightVar(), 3135) ) ) ), This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 14:34:44
|
Revision: 7575 http://bigdata.svn.sourceforge.net/bigdata/?rev=7575&view=rev Author: thompsonbry Date: 2013-11-20 14:34:33 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Added the 1.2.4 release notes into the 1.3.0 development branch. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_2_4.txt Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_2_4.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_2_4.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/releases/RELEASE_1_2_4.txt 2013-11-20 14:34:33 UTC (rev 7575) @@ -0,0 +1,299 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_2_4 + +New features: + +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. +- SPARQL 1.1 Property Paths. +- Remote Java client for Multi-Tenancy extensions NanoSparqlServer +- Sesame 2.6.10 dependency +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- High availability for the journal and the cluster. +- Runtime Query Optimizer for Analytic Query mode; and +- Simplified deployment, configuration, and administration for clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.2.4: + +- http://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://sourceforge.net/apps/trac/bigdata/ticket/168 (Maven Build) +- http://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory). +- http://sourceforge.net/apps/trac/bigdata/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://sourceforge.net/apps/trac/bigdata/ticket/312 (CI (mock) quorums deadlock) +- http://sourceforge.net/apps/trac/bigdata/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://sourceforge.net/apps/trac/bigdata/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://sourceforge.net/apps/trac/bigdata/ticket/485 (RDFS Plus Profile) +- http://sourceforge.net/apps/trac/bigdata/ticket/495 (SPARQL 1.1 Property Paths) +- http://sourceforge.net/apps/trac/bigdata/ticket/519 (Negative parser tests) +- http://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://sourceforge.net/apps/trac/bigdata/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://sourceforge.net/apps/trac/bigdata/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://sourceforge.net/apps/trac/bigdata/ticket/570 (MemoryManager Journal does not implement all methods). +- http://sourceforge.net/apps/trac/bigdata/ticket/575 (NSS Admin API) +- http://sourceforge.net/apps/trac/bigdata/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://sourceforge.net/apps/trac/bigdata/ticket/578 (Concise Bounded Description (CBD)) +- http://sourceforge.net/apps/trac/bigdata/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://sourceforge.net/apps/trac/bigdata/ticket/583 (VoID in ServiceDescription) +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/590 (nxparser fails with uppercase language tag) +- http://sourceforge.net/apps/trac/bigdata/ticket/592 (Optimize RWStore allocator sizes) +- http://sourceforge.net/apps/trac/bigdata/ticket/593 (Ugrade to Sesame 2.6.10) +- http://sourceforge.net/apps/trac/bigdata/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://sourceforge.net/apps/trac/bigdata/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://sourceforge.net/apps/trac/bigdata/ticket/597 (SPARQL UPDATE LISTENER) +- http://sourceforge.net/apps/trac/bigdata/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://sourceforge.net/apps/trac/bigdata/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://sourceforge.net/apps/trac/bigdata/ticket/600 (BlobIV collision counter hits false limit.) +- http://sourceforge.net/apps/trac/bigdata/ticket/601 (Log uncaught exceptions) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/607 (History service / index) +- http://sourceforge.net/apps/trac/bigdata/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://sourceforge.net/apps/trac/bigdata/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) +- http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) +- http://sourceforge.net/apps/trac/bigdata/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://sourceforge.net/apps/trac/bigdata/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://sourceforge.net/apps/trac/bigdata/ticket/616 (Row store read/update not isolated on Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://sourceforge.net/apps/trac/bigdata/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://sourceforge.net/apps/trac/bigdata/ticket/626 (Expose performance counters for read-only indices) +- http://sourceforge.net/apps/trac/bigdata/ticket/627 (Environment variable override for NSS properties file) +- http://sourceforge.net/apps/trac/bigdata/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/631 (ClassCastException in SIDs mode query) +- http://sourceforge.net/apps/trac/bigdata/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://sourceforge.net/apps/trac/bigdata/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://sourceforge.net/apps/trac/bigdata/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://sourceforge.net/apps/trac/bigdata/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://sourceforge.net/apps/trac/bigdata/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://sourceforge.net/apps/trac/bigdata/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://sourceforge.net/apps/trac/bigdata/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://sourceforge.net/apps/trac/bigdata/ticket/650 (Can not POST RDF to a graph using REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://sourceforge.net/apps/trac/bigdata/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://sourceforge.net/apps/trac/bigdata/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://sourceforge.net/apps/trac/bigdata/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://sourceforge.net/apps/trac/bigdata/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://sourceforge.net/apps/trac/bigdata/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://sourceforge.net/apps/trac/bigdata/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://sourceforge.net/apps/trac/bigdata/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://sourceforge.net/apps/trac/bigdata/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://sourceforge.net/apps/trac/bigdata/ticket/541 (MemoryManaged backed Journal mode) +- http://sourceforge.net/apps/trac/bigdata/ticket/546 (Index cache for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://sourceforge.net/apps/trac/bigdata/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://sourceforge.net/apps/trac/bigdata/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://sourceforge.net/apps/trac/bigdata/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://sourceforge.net/apps/trac/bigdata/ticket/563 (DISTINCT ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://sourceforge.net/apps/trac/bigdata/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://sourceforge.net/apps/trac/bigdata/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://sourceforge.net/apps/trac/bigdata/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://sourceforge.net/apps/trac/bigdata/ticket/92 (Monitoring webapp) +- http://sourceforge.net/apps/trac/bigdata/ticket/267 (Support evaluation of 3rd party operators) +- http://sourceforge.net/apps/trac/bigdata/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://sourceforge.net/apps/trac/bigdata/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://sourceforge.net/apps/trac/bigdata/ticket/438 (KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/439 (Class loader problem) +- http://sourceforge.net/apps/trac/bigdata/ticket/441 (Ganglia integration) +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://sourceforge.net/apps/trac/bigdata/ticket/448 (SPARQL 1.1 UPDATE) +- http://sourceforge.net/apps/trac/bigdata/ticket/449 (SPARQL 1.1 Federation extension) +- http://sourceforge.net/apps/trac/bigdata/ticket/451 (Serialization error in SIDs mode on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://sourceforge.net/apps/trac/bigdata/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://sourceforge.net/apps/trac/bigdata/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://sourceforge.net/apps/trac/bigdata/ticket/458 (Java level deadlock in DS) +- http://sourceforge.net/apps/trac/bigdata/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://sourceforge.net/apps/trac/bigdata/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://sourceforge.net/apps/trac/bigdata/ticket/464 (Query statistics do not update correctly on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/465 (Too many GRS reads on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/469 (Sail does not flush assertion buffers before query) +- http://sourceforge.net/apps/trac/bigdata/ticket/472 (acceptTaskService pool size on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/475 (Optimize serialization for query messages on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://sourceforge.net/apps/trac/bigdata/ticket/478 (Cluster does not map input solution(s) across shards) +- http://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://sourceforge.net/apps/trac/bigdata/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://sourceforge.net/apps/trac/bigdata/ticket/484 (Java API for NanoSparqlServer REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://sourceforge.net/apps/trac/bigdata/ticket/493 (Virtual Graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/496 (Sesame 2.6.3) +- http://sourceforge.net/apps/trac/bigdata/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://sourceforge.net/apps/trac/bigdata/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://sourceforge.net/apps/trac/bigdata/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://sourceforge.net/apps/trac/bigdata/ticket/504 (UNION with Empty Group Pattern) +- http://sourceforge.net/apps/trac/bigdata/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://sourceforge.net/apps/trac/bigdata/ticket/508 (LIMIT causes hash join utility to log errors) +- http://sourceforge.net/apps/trac/bigdata/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://sourceforge.net/apps/trac/bigdata/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://sourceforge.net/apps/trac/bigdata/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://sourceforge.net/apps/trac/bigdata/ticket/517 (Java 7 Compiler Compatibility) +- http://sourceforge.net/apps/trac/bigdata/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://sourceforge.net/apps/trac/bigdata/ticket/520 (CONSTRUCT WHERE shortcut) +- http://sourceforge.net/apps/trac/bigdata/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://sourceforge.net/apps/trac/bigdata/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://sourceforge.net/apps/trac/bigdata/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://sourceforge.net/apps/trac/bigdata/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/23 (Lexicon joins) + - http://sourceforge.net/apps/trac/bigdata/ticket/109 (Store large literals as "blobs") + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://sourceforge.net/apps/trac/bigdata/ticket/232 (Bottom-up evaluation semantics). + - http://sourceforge.net/apps/trac/bigdata/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://sourceforge.net/apps/trac/bigdata/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://sourceforge.net/apps/trac/bigdata/ticket/261 (Lift conditions out of subqueries.) + - http://sourceforge.net/apps/trac/bigdata/ticket/300 (Native ORDER BY) + - http://sourceforge.net/apps/trac/bigdata/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://sourceforge.net/apps/trac/bigdata/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://sourceforge.net/apps/trac/bigdata/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://sourceforge.net/apps/trac/bigdata/ticket/364 (Scalable default graph evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/368 (Prune variable bindings during query evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://sourceforge.net/apps/trac/bigdata/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://sourceforge.net/apps/trac/bigdata/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://sourceforge.net/apps/trac/bigdata/ticket/380 (Native SPARQL evaluation on cluster) + - http://sourceforge.net/apps/trac/bigdata/ticket/387 (Cluster does not compute closure) + - http://sourceforge.net/apps/trac/bigdata/ticket/395 (HTree hash join performance) + - http://sourceforge.net/apps/trac/bigdata/ticket/401 (inline xsd:unsigned datatypes) + - http://sourceforge.net/apps/trac/bigdata/ticket/408 (xsd:string cast fails for non-numeric data) + - http://sourceforge.net/apps/trac/bigdata/ticket/421 (New query hints model.) + - http://sourceforge.net/apps/trac/bigdata/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 14:29:40
|
Revision: 7574 http://bigdata.svn.sourceforge.net/bigdata/?rev=7574&view=rev Author: thompsonbry Date: 2013-11-20 14:29:34 +0000 (Wed, 20 Nov 2013) Log Message: ----------- removed unused class. Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OldBackupPathNode.java Deleted: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OldBackupPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OldBackupPathNode.java 2013-11-20 14:27:18 UTC (rev 7573) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OldBackupPathNode.java 2013-11-20 14:29:34 UTC (rev 7574) @@ -1,390 +0,0 @@ -package com.bigdata.rdf.sparql.ast; - -import java.util.Map; - -import com.bigdata.bop.BOp; -import com.bigdata.bop.NV; - -/** - * AST Node used to represent a property path. - * - * See http://www.w3.org/TR/sparql11-query/#rTriplesSameSubjectPath for details. - * - * This class corresponds to "VerbPath". - * - * A VerbPath (PathNode) has one Path. - * VerbPath ::= Path - * - * A Path has one PathAlternative. - * Path ::= PathAlt - * - * A PathAlternative has one or more PathSequences. - * PathAlternative ::= PathSequence ( '|' PathSequence )* - * - * A PathSequence has one or more PathEltOrInverses. - * PathSequence ::= PathEltOrInverse ( '/' PathEltOrInverse )* - * - * A PathEltOrInverse has one PathElt and a boolean flag for inverse ('^'). - * PathEltOrInverse ::= PathElt | '^' PathElt - * - * A PathElt has a PathPrimary and an optional PathMod. - * PathElt ::= PathPrimary PathMod? - * - * A PathPrimary has either an iri, a PathNegatedPropertySet, or a nested Path. - * PathPrimary ::= iri | '!' PathNegatedPropertySet | '(' Path ')' - * - * A PathMod is one from the enumeration '?', '*', or '+'. '?' means zero or - * one (simple optional), '+' means one or more (fixed point), and '*' means - * zero or more (optional fixed point). - * PathMod ::= '?' | '*' | '+' - * - * A PathNegatedPropertySet is zero or more PathOneInPropertySets. - * PathNegatedPropertySet ::= PathOneInPropertySet | - * '(' (PathOneInPropertySet ( '|' PathOneInPropertySet )* )? ')' - * - * A PathOneInPropertySet is an iri and a boolean flag for inverse ('^'). - * PathOneInPropertySet ::= iri | '^' iri - * - * @author mikepersonick - */ -public class OldBackupPathNode extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = -4396141823074067307L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public OldBackupPathNode(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public OldBackupPathNode(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public OldBackupPathNode(final Path arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public Path getPath() { - return (Path) get(0); - } - - public static class Path extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public Path(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public Path(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public Path(final PathAlternative arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public PathAlternative getPathAlternative() { - return (PathAlternative) get(0); - } - - } - - public static class PathAlternative extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathAlternative(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathAlternative(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathAlternative(final PathSequence... args) { - this(args, BOp.NOANNS); - - if (args == null || args.length == 0) - throw new IllegalArgumentException("one or more args required"); - } - - } - - public static class PathSequence extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathSequence(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathSequence(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathSequence(final PathEltOrInverse... args) { - this(args, BOp.NOANNS); - - if (args == null || args.length == 0) - throw new IllegalArgumentException("one or more args required"); - } - - } - - public static class PathEltOrInverse extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - interface Annotations extends ASTBase.Annotations { - - String INVERSE = "inverse"; - - } - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathEltOrInverse(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathEltOrInverse(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathEltOrInverse(final PathElt arg) { - this(arg, false); - } - - public PathEltOrInverse(final PathElt arg, final boolean inverse) { - this(new BOp[] { arg }, NV.asMap(new NV(Annotations.INVERSE, inverse))); - } - - public PathElt getPathElt() { - return (PathElt) get(0); - } - - public boolean inverse() { - return (Boolean) super.getRequiredProperty(Annotations.INVERSE); - } - - } - - public static class PathElt extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - interface Annotations extends ASTBase.Annotations { - - String MOD = "mod"; - - } - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathElt(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathElt(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathElt(final PathPrimary arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public PathElt(final PathPrimary arg, final PathMod mod) { - this(new BOp[] { arg }, NV.asMap(new NV(Annotations.MOD, mod))); - } - - public PathPrimary getPrimary() { - return (PathPrimary) get(0); - } - - public PathMod getMod() { - return (PathMod) super.getProperty(Annotations.MOD); - } - - } - - public static class PathPrimary extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathPrimary(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathPrimary(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathPrimary(final ConstantNode arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public PathPrimary(final Path arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public PathPrimary(final PathNegatedPropertySet arg) { - this(new BOp[] { arg }, BOp.NOANNS); - } - - public Object get() { - return get(0); - } - - } - - public static enum PathMod { - - ZERO_OR_ONE("?"), - - ZERO_OR_MORE("*"), - - ONE_OR_MORE("+"); - - final String mod; - PathMod(final String mod) { - this.mod = mod; - } - - public String toString() { - return mod; - } - - } - - public static class PathNegatedPropertySet extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathNegatedPropertySet(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathNegatedPropertySet(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathNegatedPropertySet(final PathOneInPropertySet... args) { - this(args, BOp.NOANNS); - } - - } - - public static class PathOneInPropertySet extends ASTBase { - - /** - * - */ - private static final long serialVersionUID = 1L; - - interface Annotations extends ASTBase.Annotations { - - String INVERSE = "inverse"; - - } - - /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. - */ - public PathOneInPropertySet(OldBackupPathNode op) { - super(op); - } - - /** - * Required shallow copy constructor. - */ - public PathOneInPropertySet(final BOp[] args, final Map<String, Object> anns) { - super(args, anns); - } - - public PathOneInPropertySet(final ConstantNode arg) { - this(arg, false); - } - - public PathOneInPropertySet(final ConstantNode arg, final boolean inverse) { - this(new BOp[] { arg }, NV.asMap(new NV(Annotations.INVERSE, inverse))); - } - - public ConstantNode getArg() { - return (ConstantNode) get(0); - } - - public boolean inverse() { - return (Boolean) super.getRequiredProperty(Annotations.INVERSE); - } - - } - -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 14:27:24
|
Revision: 7573 http://bigdata.svn.sourceforge.net/bigdata/?rev=7573&view=rev Author: thompsonbry Date: 2013-11-20 14:27:18 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Formatting, final, arg check should throw exception rather than assert. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java 2013-11-20 14:23:34 UTC (rev 7572) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java 2013-11-20 14:27:18 UTC (rev 7573) @@ -167,19 +167,29 @@ * Wrap name/value pairs as a map. * * @param nameValuePairs - * Pairs each being a string followed by an object, being the name value pair in the resulting map. - * + * Pairs each being a string followed by an object, being the + * name value pair in the resulting map. + * * @return The map. */ - static public Map<String,Object> asMap(Object ... nameValuePairs) { - assert nameValuePairs.length % 2 == 0; - final Map<String,Object> rslt = new LinkedHashMap<String,Object>(nameValuePairs.length/2); - for (int i=0;i<nameValuePairs.length;i+=2) { - rslt.put((String)nameValuePairs[i], nameValuePairs[i+1]); - } - return rslt; - } + static public Map<String, Object> asMap(final Object... nameValuePairs) { + if (nameValuePairs.length % 2 != 0) + throw new IllegalArgumentException(); + + final Map<String, Object> rslt = new LinkedHashMap<String, Object>( + nameValuePairs.length / 2); + + for (int i = 0; i < nameValuePairs.length; i += 2) { + + rslt.put((String) nameValuePairs[i], nameValuePairs[i + 1]); + + } + + return rslt; + + } + /** * Wrap an array name/value pairs as a {@link Map}. * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 14:23:40
|
Revision: 7572 http://bigdata.svn.sourceforge.net/bigdata/?rev=7572&view=rev Author: thompsonbry Date: 2013-11-20 14:23:34 +0000 (Wed, 20 Nov 2013) Log Message: ----------- in fact, one of these is the shallow copy constructor. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java 2013-11-20 14:22:34 UTC (rev 7571) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java 2013-11-20 14:23:34 UTC (rev 7572) @@ -47,7 +47,7 @@ } /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. + * Shallow copy constructor (required). */ public OR(final BOp[] args, final Map<String, Object> anns) { super(args, anns); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 14:22:42
|
Revision: 7571 http://bigdata.svn.sourceforge.net/bigdata/?rev=7571&view=rev Author: thompsonbry Date: 2013-11-20 14:22:34 +0000 (Wed, 20 Nov 2013) Log Message: ----------- in fact, one of these is the shallow copy constructor. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java 2013-11-20 13:38:47 UTC (rev 7570) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java 2013-11-20 14:22:34 UTC (rev 7571) @@ -47,7 +47,7 @@ } /** - * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. + * Shallow copy constructor (required). */ public AND(final BOp[] args, final Map<String, Object> anns) { super(args, anns); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 13:38:55
|
Revision: 7570 http://bigdata.svn.sourceforge.net/bigdata/?rev=7570&view=rev Author: thompsonbry Date: 2013-11-20 13:38:47 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Changing the maven POM to deploy the 1.2.4 artifact. Modified Paths: -------------- tags/BIGDATA_RELEASE_1_2_4/pom.xml Modified: tags/BIGDATA_RELEASE_1_2_4/pom.xml =================================================================== --- tags/BIGDATA_RELEASE_1_2_4/pom.xml 2013-11-20 13:28:28 UTC (rev 7569) +++ tags/BIGDATA_RELEASE_1_2_4/pom.xml 2013-11-20 13:38:47 UTC (rev 7570) @@ -48,7 +48,7 @@ <modelVersion>4.0.0</modelVersion> <groupId>com.bigdata</groupId> <artifactId>bigdata</artifactId> - <version>1.2.2-SNAPSHOT</version> + <version>1.2.4</version> <packaging>pom</packaging> <name>bigdata(R)</name> <description>Bigdata(R) Maven Build</description> @@ -154,12 +154,12 @@ <repository> <id>bigdata.releases</id> <name>bigdata(R) releases</name> - <url>scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/releases</url> + <url>scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/releases</url> </repository> <snapshotRepository> <id>bigdata.snapshots</id> <name>bigdata(R) snapshots</name> - <url>scpexe://shell.speakeasy.net/home/t/thompsonbry/systap.com/maven/snapshots</url> + <url>scpexe://www.systap.com/srv/www/htdocs/systap.com/maven/snapshots</url> <uniqueVersion>true</uniqueVersion> </snapshotRepository> </distributionManagement> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 13:28:34
|
Revision: 7569 http://bigdata.svn.sourceforge.net/bigdata/?rev=7569&view=rev Author: thompsonbry Date: 2013-11-20 13:28:28 +0000 (Wed, 20 Nov 2013) Log Message: ----------- making snapshot:=true in the 1.3.0 development branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-20 13:27:51 UTC (rev 7568) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2013-11-20 13:28:28 UTC (rev 7569) @@ -89,7 +89,7 @@ # Set true to do a snapshot build. This changes the value of ${version} to # include the date. -snapshot=false +snapshot=true # Javadoc build may be disabled using this property. The javadoc target will # not be executed unless this property is defined (its value does not matter). This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 13:28:00
|
Revision: 7568 http://bigdata.svn.sourceforge.net/bigdata/?rev=7568&view=rev Author: thompsonbry Date: 2013-11-20 13:27:51 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Adding the release notes and bumping the bigdata version number for the tagged 1.2.4 release. Modified Paths: -------------- tags/BIGDATA_RELEASE_1_2_4/build.properties Added Paths: ----------- tags/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_4.txt Added: tags/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_4.txt =================================================================== --- tags/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_4.txt (rev 0) +++ tags/BIGDATA_RELEASE_1_2_4/bigdata/src/releases/RELEASE_1_2_4.txt 2013-11-20 13:27:51 UTC (rev 7568) @@ -0,0 +1,299 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_2_4 + +New features: + +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. +- SPARQL 1.1 Property Paths. +- Remote Java client for Multi-Tenancy extensions NanoSparqlServer +- Sesame 2.6.10 dependency +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- High availability for the journal and the cluster. +- Runtime Query Optimizer for Analytic Query mode; and +- Simplified deployment, configuration, and administration for clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.2.4: + +- http://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) + +1.2.3: + +- http://sourceforge.net/apps/trac/bigdata/ticket/168 (Maven Build) +- http://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory). +- http://sourceforge.net/apps/trac/bigdata/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://sourceforge.net/apps/trac/bigdata/ticket/312 (CI (mock) quorums deadlock) +- http://sourceforge.net/apps/trac/bigdata/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://sourceforge.net/apps/trac/bigdata/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://sourceforge.net/apps/trac/bigdata/ticket/485 (RDFS Plus Profile) +- http://sourceforge.net/apps/trac/bigdata/ticket/495 (SPARQL 1.1 Property Paths) +- http://sourceforge.net/apps/trac/bigdata/ticket/519 (Negative parser tests) +- http://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://sourceforge.net/apps/trac/bigdata/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://sourceforge.net/apps/trac/bigdata/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://sourceforge.net/apps/trac/bigdata/ticket/570 (MemoryManager Journal does not implement all methods). +- http://sourceforge.net/apps/trac/bigdata/ticket/575 (NSS Admin API) +- http://sourceforge.net/apps/trac/bigdata/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://sourceforge.net/apps/trac/bigdata/ticket/578 (Concise Bounded Description (CBD)) +- http://sourceforge.net/apps/trac/bigdata/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://sourceforge.net/apps/trac/bigdata/ticket/583 (VoID in ServiceDescription) +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/590 (nxparser fails with uppercase language tag) +- http://sourceforge.net/apps/trac/bigdata/ticket/592 (Optimize RWStore allocator sizes) +- http://sourceforge.net/apps/trac/bigdata/ticket/593 (Ugrade to Sesame 2.6.10) +- http://sourceforge.net/apps/trac/bigdata/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://sourceforge.net/apps/trac/bigdata/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://sourceforge.net/apps/trac/bigdata/ticket/597 (SPARQL UPDATE LISTENER) +- http://sourceforge.net/apps/trac/bigdata/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://sourceforge.net/apps/trac/bigdata/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://sourceforge.net/apps/trac/bigdata/ticket/600 (BlobIV collision counter hits false limit.) +- http://sourceforge.net/apps/trac/bigdata/ticket/601 (Log uncaught exceptions) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/607 (History service / index) +- http://sourceforge.net/apps/trac/bigdata/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://sourceforge.net/apps/trac/bigdata/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) +- http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) +- http://sourceforge.net/apps/trac/bigdata/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://sourceforge.net/apps/trac/bigdata/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://sourceforge.net/apps/trac/bigdata/ticket/616 (Row store read/update not isolated on Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://sourceforge.net/apps/trac/bigdata/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://sourceforge.net/apps/trac/bigdata/ticket/626 (Expose performance counters for read-only indices) +- http://sourceforge.net/apps/trac/bigdata/ticket/627 (Environment variable override for NSS properties file) +- http://sourceforge.net/apps/trac/bigdata/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/631 (ClassCastException in SIDs mode query) +- http://sourceforge.net/apps/trac/bigdata/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://sourceforge.net/apps/trac/bigdata/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://sourceforge.net/apps/trac/bigdata/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://sourceforge.net/apps/trac/bigdata/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://sourceforge.net/apps/trac/bigdata/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://sourceforge.net/apps/trac/bigdata/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://sourceforge.net/apps/trac/bigdata/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://sourceforge.net/apps/trac/bigdata/ticket/650 (Can not POST RDF to a graph using REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://sourceforge.net/apps/trac/bigdata/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://sourceforge.net/apps/trac/bigdata/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://sourceforge.net/apps/trac/bigdata/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://sourceforge.net/apps/trac/bigdata/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://sourceforge.net/apps/trac/bigdata/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://sourceforge.net/apps/trac/bigdata/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) +- http://sourceforge.net/apps/trac/bigdata/ticket/682 (AtomicRowFilter UnsupportedOperationException) + +1.2.2: + +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://sourceforge.net/apps/trac/bigdata/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://sourceforge.net/apps/trac/bigdata/ticket/541 (MemoryManaged backed Journal mode) +- http://sourceforge.net/apps/trac/bigdata/ticket/546 (Index cache for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://sourceforge.net/apps/trac/bigdata/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://sourceforge.net/apps/trac/bigdata/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://sourceforge.net/apps/trac/bigdata/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://sourceforge.net/apps/trac/bigdata/ticket/563 (DISTINCT ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://sourceforge.net/apps/trac/bigdata/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://sourceforge.net/apps/trac/bigdata/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://sourceforge.net/apps/trac/bigdata/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://sourceforge.net/apps/trac/bigdata/ticket/92 (Monitoring webapp) +- http://sourceforge.net/apps/trac/bigdata/ticket/267 (Support evaluation of 3rd party operators) +- http://sourceforge.net/apps/trac/bigdata/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://sourceforge.net/apps/trac/bigdata/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://sourceforge.net/apps/trac/bigdata/ticket/438 (KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/439 (Class loader problem) +- http://sourceforge.net/apps/trac/bigdata/ticket/441 (Ganglia integration) +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://sourceforge.net/apps/trac/bigdata/ticket/448 (SPARQL 1.1 UPDATE) +- http://sourceforge.net/apps/trac/bigdata/ticket/449 (SPARQL 1.1 Federation extension) +- http://sourceforge.net/apps/trac/bigdata/ticket/451 (Serialization error in SIDs mode on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://sourceforge.net/apps/trac/bigdata/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://sourceforge.net/apps/trac/bigdata/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://sourceforge.net/apps/trac/bigdata/ticket/458 (Java level deadlock in DS) +- http://sourceforge.net/apps/trac/bigdata/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://sourceforge.net/apps/trac/bigdata/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://sourceforge.net/apps/trac/bigdata/ticket/464 (Query statistics do not update correctly on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/465 (Too many GRS reads on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/469 (Sail does not flush assertion buffers before query) +- http://sourceforge.net/apps/trac/bigdata/ticket/472 (acceptTaskService pool size on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/475 (Optimize serialization for query messages on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://sourceforge.net/apps/trac/bigdata/ticket/478 (Cluster does not map input solution(s) across shards) +- http://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://sourceforge.net/apps/trac/bigdata/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://sourceforge.net/apps/trac/bigdata/ticket/484 (Java API for NanoSparqlServer REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://sourceforge.net/apps/trac/bigdata/ticket/493 (Virtual Graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/496 (Sesame 2.6.3) +- http://sourceforge.net/apps/trac/bigdata/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://sourceforge.net/apps/trac/bigdata/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://sourceforge.net/apps/trac/bigdata/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://sourceforge.net/apps/trac/bigdata/ticket/504 (UNION with Empty Group Pattern) +- http://sourceforge.net/apps/trac/bigdata/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://sourceforge.net/apps/trac/bigdata/ticket/508 (LIMIT causes hash join utility to log errors) +- http://sourceforge.net/apps/trac/bigdata/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://sourceforge.net/apps/trac/bigdata/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://sourceforge.net/apps/trac/bigdata/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://sourceforge.net/apps/trac/bigdata/ticket/517 (Java 7 Compiler Compatibility) +- http://sourceforge.net/apps/trac/bigdata/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://sourceforge.net/apps/trac/bigdata/ticket/520 (CONSTRUCT WHERE shortcut) +- http://sourceforge.net/apps/trac/bigdata/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://sourceforge.net/apps/trac/bigdata/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://sourceforge.net/apps/trac/bigdata/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://sourceforge.net/apps/trac/bigdata/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/23 (Lexicon joins) + - http://sourceforge.net/apps/trac/bigdata/ticket/109 (Store large literals as "blobs") + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://sourceforge.net/apps/trac/bigdata/ticket/232 (Bottom-up evaluation semantics). + - http://sourceforge.net/apps/trac/bigdata/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://sourceforge.net/apps/trac/bigdata/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://sourceforge.net/apps/trac/bigdata/ticket/261 (Lift conditions out of subqueries.) + - http://sourceforge.net/apps/trac/bigdata/ticket/300 (Native ORDER BY) + - http://sourceforge.net/apps/trac/bigdata/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://sourceforge.net/apps/trac/bigdata/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://sourceforge.net/apps/trac/bigdata/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://sourceforge.net/apps/trac/bigdata/ticket/364 (Scalable default graph evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/368 (Prune variable bindings during query evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://sourceforge.net/apps/trac/bigdata/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://sourceforge.net/apps/trac/bigdata/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://sourceforge.net/apps/trac/bigdata/ticket/380 (Native SPARQL evaluation on cluster) + - http://sourceforge.net/apps/trac/bigdata/ticket/387 (Cluster does not compute closure) + - http://sourceforge.net/apps/trac/bigdata/ticket/395 (HTree hash join performance) + - http://sourceforge.net/apps/trac/bigdata/ticket/401 (inline xsd:unsigned datatypes) + - http://sourceforge.net/apps/trac/bigdata/ticket/408 (xsd:string cast fails for non-numeric data) + - http://sourceforge.net/apps/trac/bigdata/ticket/421 (New query hints model.) + - http://sourceforge.net/apps/trac/bigdata/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. Modified: tags/BIGDATA_RELEASE_1_2_4/build.properties =================================================================== --- tags/BIGDATA_RELEASE_1_2_4/build.properties 2013-11-20 13:13:59 UTC (rev 7567) +++ tags/BIGDATA_RELEASE_1_2_4/build.properties 2013-11-20 13:27:51 UTC (rev 7568) @@ -82,7 +82,7 @@ release.dir=ant-release # The build version (note: 0.82b -> 0.82.0); 0.83.2 is followed by 1.0.0 -build.ver=1.2.3 +build.ver=1.2.4 build.ver.osgi=1.0 # Set true to do a snapshot build. This changes the value of ${version} to This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-20 13:14:08
|
Revision: 7567 http://bigdata.svn.sourceforge.net/bigdata/?rev=7567&view=rev Author: thompsonbry Date: 2013-11-20 13:13:59 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Tagging the 1.2.4 release for a customer. This release contains a single bug fix for ticket #777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) over the 1.2.3 release. Added Paths: ----------- tags/BIGDATA_RELEASE_1_2_4/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-20 00:53:31
|
Revision: 7566 http://bigdata.svn.sourceforge.net/bigdata/?rev=7566&view=rev Author: jeremy_carroll Date: 2013-11-20 00:53:25 +0000 (Wed, 20 Nov 2013) Log Message: ----------- Corrected blunder: debug code was committed in previous revision. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-19 23:54:27 UTC (rev 7565) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-20 00:53:25 UTC (rev 7566) @@ -57,7 +57,7 @@ /** * This field is public non-final so that we can change its value during testing. */ - public static boolean DEFAULT_NATIVE_HASH_JOINS = true; // QueryHints.DEFAULT_NATIVE_HASH_JOINS; + public static boolean DEFAULT_NATIVE_HASH_JOINS = QueryHints.DEFAULT_NATIVE_HASH_JOINS; /** * The {@link ASTContainer} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-19 23:54:35
|
Revision: 7565 http://bigdata.svn.sourceforge.net/bigdata/?rev=7565&view=rev Author: jeremy_carroll Date: 2013-11-19 23:54:27 +0000 (Tue, 19 Nov 2013) Log Message: ----------- Running AST eval tests also in HTree mode ?\226?\128?\166 (omitting trac748 tests that fail for reasons articulated in trac 776) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-19 22:25:59 UTC (rev 7564) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2013-11-19 23:54:27 UTC (rev 7565) @@ -53,6 +53,11 @@ * FIXME Rolling back r7319 which broke UNION processing. */ public class AST2BOpContext implements IdFactory, IEvaluationContext { + + /** + * This field is public non-final so that we can change its value during testing. + */ + public static boolean DEFAULT_NATIVE_HASH_JOINS = true; // QueryHints.DEFAULT_NATIVE_HASH_JOINS; /** * The {@link ASTContainer} @@ -148,7 +153,7 @@ * * @see QueryHints#NATIVE_HASH_JOINS */ - public boolean nativeHashJoins = QueryHints.DEFAULT_NATIVE_HASH_JOINS; + public boolean nativeHashJoins = DEFAULT_NATIVE_HASH_JOINS; /** * When <code>true</code>, a merge-join pattern will be recognized if it Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-11-19 22:25:59 UTC (rev 7564) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java 2013-11-19 23:54:27 UTC (rev 7565) @@ -26,6 +26,7 @@ import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.rdf.sparql.ast.eval.reif.TestReificationDoneRightEval; +import junit.extensions.TestSetup; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; @@ -55,7 +56,7 @@ * Returns a test that will run each of the implementation specific test * suites in turn. */ - public static Test suite() + private static TestSuite coreSuite() { final TestSuite suite = new TestSuite("AST Evaluation"); @@ -181,4 +182,31 @@ } + + /** + * Returns a test that will run each of the implementation specific test + * suites in turn. + */ + public static Test suite() + { + + final TestSuite suite = new TestSuite("AST Evaluation (all)"); + final TestSuite tHash = coreSuite(); + tHash.setName("AST Evaluation (tHash)"); + suite.addTest(new TestSetup(tHash) { + + protected void setUp() throws Exception { + AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS = true; + } + protected void tearDown() throws Exception { + AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS = false; + } + + }); + final TestSuite jvmHash = coreSuite(); + jvmHash.setName("AST Evaluation (jvmHash)"); + suite.addTest(jvmHash); + return suite; + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2013-11-19 22:25:59 UTC (rev 7564) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestTickets.java 2013-11-19 23:54:27 UTC (rev 7565) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sparql.ast.eval; +import com.bigdata.BigdataStatics; + /** * Test suite for tickets at <href a="http://sourceforge.net/apps/trac/bigdata"> * trac </a>. @@ -178,6 +180,11 @@ } public void test_ticket_748() throws Exception { + // Concerning omitting the test with hash joins, see Trac776 and + // com.bigdata.rdf.internal.encoder.AbstractBindingSetEncoderTestCase.test_solutionWithOneMockIV() + + if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) + return; new TestHelper("ticket748-subselect",// testURI, "ticket748-subselect.rq",// queryFileURL @@ -190,6 +197,9 @@ public void test_ticket_748a() throws Exception { + if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) + return; + new TestHelper("ticket748A-subselect",// testURI, "ticket748A-subselect.rq",// queryFileURL "ticket748-subselect.ttl",// dataFileURL @@ -200,6 +210,9 @@ public void test_ticket_two_subselects_748() throws Exception { + if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) + return; + new TestHelper("ticket748-two-subselects",// testURI, "ticket748-two-subselects.rq",// queryFileURL "ticket748-two-subselects.ttl",// dataFileURL @@ -211,6 +224,9 @@ public void test_ticket_two_subselects_748a() throws Exception { + if(AST2BOpContext.DEFAULT_NATIVE_HASH_JOINS && !BigdataStatics.runKnownBadTests) + return; + new TestHelper("ticket748A-two-subselects",// testURI, "ticket748A-two-subselects.rq",// queryFileURL "ticket748-two-subselects.ttl",// dataFileURL Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java 2013-11-19 22:25:59 UTC (rev 7564) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestServiceRegistry.java 2013-11-19 23:54:27 UTC (rev 7565) @@ -300,6 +300,7 @@ // De-register alias ServiceRegistry.getInstance().remove(serviceURI1); + ServiceRegistry.getInstance().remove(serviceURI2); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-19 22:26:07
|
Revision: 7564 http://bigdata.svn.sourceforge.net/bigdata/?rev=7564&view=rev Author: thompsonbry Date: 2013-11-19 22:25:59 +0000 (Tue, 19 Nov 2013) Log Message: ----------- Bug fix for customer patch for final QA leading up to their release. This commit incorporates r7216 which is a bug fix for a ConcurrentModificationException in the ASTComplexOptionalOptimizer. I have applied that one bug fix to the 1.2.3 maintenance branch: branches/BIGDATA_RELEASE_1_2_3/. That branch r7185. Due to a goof in the release process, tags/BIGDATA_RELEASE_1_2_3/ is r7188. However, both r7185 and r7188 are for exactly the same SVN commit point, which is r7184. See https://sourceforge.net/apps/trac/bigdata/ticket/777 (ConcurrentModificationException in ASTComplexOptionalOptimizer) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7216&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7185&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7188&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7185&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7188&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7184&view=rev Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_3/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java Modified: branches/BIGDATA_RELEASE_1_2_3/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_3/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java 2013-11-19 14:10:04 UTC (rev 7563) +++ branches/BIGDATA_RELEASE_1_2_3/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java 2013-11-19 22:25:59 UTC (rev 7564) @@ -165,8 +165,16 @@ if (namedSubqueries != null) { + final List<NamedSubqueryRoot> roots = new LinkedList<NamedSubqueryRoot>(); + for (NamedSubqueryRoot namedSubquery : namedSubqueries) { + roots.add(namedSubquery); + + } + + for (NamedSubqueryRoot namedSubquery : roots) { + convertComplexOptionalGroups(context, sa, namedSubquery, namedSubquery.getWhereClause(), exogenousVars); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-19 14:10:13
|
Revision: 7563 http://bigdata.svn.sourceforge.net/bigdata/?rev=7563&view=rev Author: thompsonbry Date: 2013-11-19 14:10:04 +0000 (Tue, 19 Nov 2013) Log Message: ----------- I have added unit tests for a physically empty HALog and a corrupt HALog parallel to the existing unit test for a logically empty HALog. All three conditions are correctly handled when the HALog with the problem is the successor of the last commit point on the journal. This closes out the bug identified above. The other aspects of this ticket remain open. See #775 (HAJournal start()) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServerWithHALogs.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3WORMJournalServer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-11-18 22:45:25 UTC (rev 7562) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-11-19 14:10:04 UTC (rev 7563) @@ -70,7 +70,7 @@ */ public class HALogNexus implements IHALogWriter { - private static final Logger log = Logger.getLogger(SnapshotManager.class); + private static final Logger log = Logger.getLogger(HALogNexus.class); /** * Logger for HA events. @@ -249,37 +249,125 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > * HAJournalServer can not restart due to logically empty log files * </a> + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> */ { /* - * Used to detect a logically empty HALog (iff it is the last one in - * commit order). + * Data structure used to detect a bad HALog and identify whether or + * not it is the last one in commit order. */ final HALogScanState tmp = new HALogScanState(); // Scan the HALog directory, populating the in-memory index. populateIndexRecursive(haLogDir, IHALogReader.HALOG_FILTER, tmp); - if (tmp.emptyHALogFile != null) { + final long commitCounterOnJournal = journal.getRootBlockView().getCommitCounter(); - /* - * The last HALog file is logically empty. It WAS NOT added to - * the in-memory index. We try to remove it now. + if (tmp.firstBadHALogFile != null) { + + /** + * The only the last HALog file is bad (physically empty, + * logically empty, bad MAGIC, etc), then it WAS NOT added to + * the in-memory index. * + * We try to remove it now and then start up normally. While we + * are short one HALog file, we can obtain it during + * resynchronization from the other nodes in the cluster. + * * Note: It is not critical that we succeed in removing this * HALog file so long as it does not interfere with the correct * startup of the HAJournalServer. */ - final File f = tmp.emptyHALogFile; + final File f = tmp.firstBadHALogFile; - if (!f.delete()) { + /* + * Parse out the closing commit counter for that HALog. This is + * the commit counter that would be assigned to the root block + * if this transaction had been applied to the Journal. + */ + final long closingCommitCounter = CommitCounterUtility + .parseCommitCounterFile(f.getName(), + IHALogReader.HA_LOG_EXT); - log.warn("Could not remove empty HALog: " + f); + if (commitCounterOnJournal + 1 == closingCommitCounter) { + + /* + * This HALog file was for the next commit point to be + * recorded on the Journal. We can safely delete it and + * continue the normal startup. + */ + if (haLog.isInfoEnabled()) + haLog.info("Removing bad/empty HALog file: commitCounterOnJournal=" + + commitCounterOnJournal); + + if (!f.delete()) { + + log.warn("Could not remove empty HALog: " + f); + + } + + } else { + + /* + * This HALog file is bad. The service can not start until + * it has been replaced. + * + * FIXME Automate the replacement of the bad/missing HALog + * file from the quorum leader. + */ + throw new HALogException(tmp.firstBadHALogFile, + tmp.firstCause); + } } + + // Get the most recent HALog record from the index. + final IHALogRecord r = haLogIndex.getNewestEntry(); + + if (r != null) { + + /** + * Note: The logic above makes sure that we have each HALog in + * sequence from some unspecified starting point, but it does + * not verify that the last HALog file corresponds to the last + * durable commit point on the Journal, does not verify the + * number of local HALog files against some target (e.g., as + * specified by the restore policy), and does not verify that + * there are no HALog files for commit points beyond the last + * commit point on the journal (which could happen if someone + * did a point in time restore of the journal from a snapshot + * and failed to remove the HALog files after that point in + * time). + * + * TODO This should be refactored when we address #775. + */ + + if (r.getCommitCounter() < commitCounterOnJournal) { + /* + * Reject start if we are missing the HALog for the most + * recent commit point on the journal. + */ + throw new RuntimeException( + "Missing HALog(s) for committed state on journal: journal@=" + + commitCounterOnJournal + ", lastHALog@" + + r.getCommitCounter()); + } + + /* + * Note: If there are HALog files for commit points beyond the + * most recent commit point on the journal, then those HALog + * files will be applied to roll forward the journal. This is + * done by HAJournalServer in its RESTORE state. Thus is + * necessary to remove any HALog files beyond the desired commit + * point before restarting the service when rolling back to a + * specific point in time. + */ + + } } @@ -309,13 +397,19 @@ */ private static class HALogScanState { /** - * Flag is set the first time an empty HALog file is identified. + * Flag is set the first time bad HALog file is identified. * <p> - * Note: We scan the HALog files in commit counter order. If the last - * file is (logically) empty, then we will silently remove it. However, - * if any other HALog file is logically empty, then this is an error. + * Note: We scan the HALog files in commit counter order. If only the + * last file in the scan is bad, then we will silently remove it - the + * HALog will be replaced when this service attempts to . + * However, if there is more than one bad HALog file, then this is an + * error. */ - File emptyHALogFile = null; + File firstBadHALogFile = null; + /** + * The exception when we first encountered a bad HALog file. + */ + Throwable firstCause = null; } /** @@ -328,11 +422,11 @@ * side-effect using the {@link HALogScanState} and will NOT be added to the * index. The caller SHOULD then remove the logically empty HALog file * - * TODO If an HALog is discovered to have bad checksums or otherwise corrupt - * root blocks and there is a met quorum, then we should re-replicate that - * HALog from the quourm leader. + * FIXME If an HALog is discovered to have bad checksums or otherwise + * corrupt root blocks and there is a met quorum, then we should + * re-replicate that HALog from the quourm leader. * - * TODO For HALog files other than the last HALog file (in commit counter + * FIXME For HALog files other than the last HALog file (in commit counter * sequence) if there are any missing HALog files in the sequence, if any if * the files in the sequence other than the last HALog file is logically * empty, or if any of those HALog files has a bad root bloxks) then we @@ -342,6 +436,27 @@ * we allow the service to start, then it will have limited rollback * capability. All of this could be checked in an index scan once we have * identified all of the HALog files in the file system. + * + * TODO This could be rewritten to generate the filenames by running the + * commit counter from the first discovered HALog file's commit counter up + * through the current commit point on the journal. Alternatively, we could + * just start with the current commit point on the journal and the substract + * one and move backward until we find the first HALog file that is not + * locally available. We could then cross check this with the + * {@link IRestorePolicy} and decide whether we needed to back fill either + * HALog files or snapshots on this service in order to satisify the + * {@link IRestorePolicy}. This has the advantage that we start with the + * most recent HALog file first, so we can immediately diagnose any problems + * with the last commit point on restart. It removes the recursive logic and + * makes it easier to write code that decides whether or not a given HALog + * file being bad poses a problem and what kind of a problem and how to + * resolve that problem. There will be more GC associated with the + * generation of the file names from the commit counters, but we could get + * rid of that GC overhead entirely by supplying a reusable + * {@link StringBuilder}. + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> */ private void populateIndexRecursive(final File f, final FileFilter fileFilter, final HALogScanState state) @@ -370,7 +485,7 @@ } else { - if (state.emptyHALogFile != null) { + if (state.firstBadHALogFile != null) { /* * We already have an empty HALog file. If there are any more @@ -380,8 +495,9 @@ * order). */ - throw new LogicallyEmptyHALogException(state.emptyHALogFile); - + throw new HALogException(state.firstBadHALogFile, + state.firstCause); + } try { @@ -389,16 +505,22 @@ // Attempt to add to the index. addHALog(f); - } catch (LogicallyEmptyHALogException ex) { + } catch (Throwable t) { + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + // propagate interrupt. + throw new RuntimeException(t); + } + // Should be null since we checked this above. - assert state.emptyHALogFile == null; + assert state.firstBadHALogFile == null; /* * The first empty HALog file. There is at most one allowed and * it must be the last HALog file in commit counter order. */ - state.emptyHALogFile = f; + state.firstBadHALogFile = f; + state.firstCause = t; } @@ -430,6 +552,13 @@ final byte[] b0 = new byte[RootBlockView.SIZEOF_ROOT_BLOCK]; final byte[] b1 = new byte[RootBlockView.SIZEOF_ROOT_BLOCK]; + + if (file.length() == 0L) { + /* + * The file is physically empty (zero length). + */ + throw new EmptyHALogException(file); + } final DataInputStream is = new DataInputStream( new FileInputStream(file)); @@ -455,8 +584,8 @@ } catch(IOException ex) { // Wrap exception with the file name. - throw new IOException(ex.getMessage() + ", file=" + file, ex); - + throw new HALogException(file, ex); + } finally { is.close(); @@ -489,18 +618,16 @@ * @throws ChecksumError * if there is a checksum problem with the root blocks. * - * TODO If the root blocks are the same then this is an empty - * HALog. Right now that is an error. [We might want to simply - * remove any such HALog file.] - * <p> - * Likewise, it is an error if any HALog has bad root blocks - * (checksum or other errors). - * * TODO A similar problem exists if any of the HALog files GTE * the earliest snapshot are missing, have bad root blocks, etc. * We will not be able to restore the commit point associated * with that HALog file unless it also happens to correspond to - * a snapshot. + * a snapshot. Such bad/missing HALog files should be + * re-replicated from the quorum leader. This process should be + * automated. + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> */ private void addHALog(final File file) throws IOException, LogicallyEmptyHALogException { @@ -554,28 +681,66 @@ } /** + * Base class for exceptions when we are unable to read an HALog file. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private static class HALogException extends IOException { + + private static final long serialVersionUID = 1L; + + public HALogException(final File file) { + + super(file.getAbsolutePath()); + + } + + public HALogException(final File file,final Throwable cause) { + + super(file.getAbsolutePath(), cause); + + } + + } + + /** * Exception raise when an HALog file is logically empty (the opening and * closing root blocks are identicial). * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> */ - private static class LogicallyEmptyHALogException extends IOException { + private static class LogicallyEmptyHALogException extends HALogException { - /** - * - */ private static final long serialVersionUID = 1L; public LogicallyEmptyHALogException(final File file) { - super(file.getAbsolutePath()); + super(file); } } /** + * Exception raise when an HALog file is physically empty (zero length). + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class EmptyHALogException extends HALogException { + + private static final long serialVersionUID = 1L; + + public EmptyHALogException(final File file) { + + super(file); + + } + + } + + /** * Remove an snapshot from the file system and the {@link #haLogIndex}. * * @param file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2013-11-18 22:45:25 UTC (rev 7562) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2013-11-19 14:10:04 UTC (rev 7563) @@ -32,8 +32,6 @@ import junit.framework.TestSuite; import com.bigdata.journal.Journal; -import com.bigdata.journal.WORMStrategy; -import com.bigdata.rwstore.RWStore; /** * Test suite for highly available configurations of the standalone @@ -60,21 +58,6 @@ /** * Returns a test that will run each of the implementation specific test * suites in turn. - * - * FIXME (*) Test {@link WORMStrategy} and {@link RWStore} (through an override?) - * - * FIXME The NSS should transparently proxy mutation requests to the quorum - * leader (and to a global leader if offsite is supported, or maybe that is - * handled at a layer above). The tests need to be modified (A) to NOT only - * write on the leader; and (B) to verify that we can send a write request - * to ANY service that is joined with the met quorum. (And verify for POST, - * DELETE, and PUT since those are all different method.) - * <p> - * Note: We could have services that are not joined with the met quorum - * simply forward read requests to services that ARE joined with the met - * quorum. That way they can begin "accepting" reads and writes immediately. - * This could also be done one level down, using failover reads to reach a - * service joined with the met quorum. */ public static Test suite() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServerWithHALogs.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServerWithHALogs.java 2013-11-18 22:45:25 UTC (rev 7562) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServerWithHALogs.java 2013-11-19 14:10:04 UTC (rev 7563) @@ -27,6 +27,9 @@ package com.bigdata.journal.jini.ha; import java.io.File; +import java.io.FileOutputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; import net.jini.config.Configuration; @@ -37,7 +40,9 @@ import com.bigdata.ha.halog.IHALogReader; import com.bigdata.ha.msg.IHA2PhasePrepareMessage; import com.bigdata.journal.CommitCounterUtility; +import com.bigdata.journal.FileMetadata; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; +import com.bigdata.util.InnerCause; /** * Test suite when we are using the {@link DefaultSnapshotPolicy} and @@ -104,7 +109,7 @@ * HAJournalServer can not restart due to logically empty log files * </a> */ - public void test_startABC_emptyLogFileDeletedOnRestartC() throws Exception { + public void test_startABC_logicallyEmptyLogFileDeletedOnRestartC() throws Exception { final ABC abc = new ABC(true/* sequential */); @@ -275,6 +280,620 @@ } /** + * This is a unit test for the ability to silently remove a physically empty + * HALog file. Three services are started in sequence (A,B,C). A series of + * small commits are applied to the quorum. (C) is then shutdown. A + * logically empty HALog file should exist on each service for the next + * commit point. We now overwrite that file with a physically empty HALog + * file (zero length). We then do one more update. C is then restarted. We + * verify that C restarts and that the logically empty HALog file has been + * replaced by an HALog file that has the same digest as the HALog file for + * that commit point on (A,B). + * <p> + * Note: We can not reliably observe that the physically HALog file was + * removed during startup. However, this is not critical. What is critical + * is that the physically empty HALog file (a) does not prevent (C) from + * starting; (b) is replaced by the correct HALog data from the quorum + * leader; and (c) that (C) resynchronizes with the met quorum and joins + * causing a fully met quorum. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > + * HAJournalServer can not restart due to logically empty log files + * </a> + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> + */ + public void test_startABC_physicallyEmptyLogFileDeletedOnRestartC() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB; + HAGlue serverC = abc.serverC; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + 1/* lastCommitCounter */, new HAGlue[] { serverA, serverB, + serverC }); + + /* + * Do a series of small commits. + */ + + final int NSMALL = 5; + + for (int i = 1/* createKB */; i <= NSMALL; i++) { + + simpleTransaction(); + + } + + final long commitCounter1 = 1 + NSMALL; // AKA (6) + + // await the commit points to become visible. + awaitCommitCounter(commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitLogCount(getHALogDirA(), commitCounter1 + 1); + awaitLogCount(getHALogDirB(), commitCounter1 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 + 1); + + /* + * Shutdown C. + * + * Note: This might cause the empty HALog file on (C) to be deleted. + * That is Ok, since we will copy the desired empty HALOg from (A) to + * (C), thus enforcing the desired test condition. + */ + shutdownC(); + + /* + * Verify that there is an empty HALog file on (A) for the next + * commit point. + */ + + // The next commit point. + final long commitCounter2 = commitCounter1 + 1; // AKA (7) + + // The HALog for that next commit point. + final File fileA = CommitCounterUtility.getCommitCounterFile( + getHALogDirA(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Verify HALog file for next commit point on A is logically empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // The name of that HALog file on (C). + final File fileC = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter2, IHALogReader.HA_LOG_EXT); + +// // Copy that empty HALog file to (C). +// copyFile(fileA, fileC, false/* append */); + + // delete the logically empty file (if it exists). + if (fileC.exists() && !fileC.delete()) + fail("Could not delete: fileC=" + fileC); + + // create the physically empty file. + if (!fileC.createNewFile()) + fail("Could not create: fileC=" + fileC); + + /* + * Do another transaction. This will cause the HALog file for that + * commit point to be non-empty on A. + */ + simpleTransaction(); + + /* + * Await the commit points to become visible. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitCommitCounter(commitCounter2, new HAGlue[] { serverA, serverB }); + + // Verify the expected #of HALogs on each service. + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter2); + + // Verify HALog file for next commit point on A is NOT empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertFalse(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // Verify HALog file for next commit point on C is phsyically empty. + { + assertTrue(fileC.exists()); + assertEquals(0L, fileC.length()); + } + + /* + * Restart (C). It should start without complaint. The logically empty + * HALog file should be replaced by the corresponding file from (A) by + * the time the quorum fully meets. At this point all services will have + * the same digests for all HALog files. + */ + + // Restart C. + serverC = startC(); + + // Wait until the quorum is fully met. + awaitFullyMetQuorum(); + + // await the commit points to become visible. + awaitCommitCounter(commitCounter2, + new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + commitCounter2 /* lastCommitCounter */, new HAGlue[] { serverA, + serverB, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: Each service will have an empty HALog for the next commit + * point. + */ + awaitLogCount(getHALogDirA(), commitCounter2+1); + awaitLogCount(getHALogDirB(), commitCounter2+1); + awaitLogCount(getHALogDirC(), commitCounter2+1); + + } + + /** + * This is a variant test for the ability to silently remove a corrupt HALog + * file on restart when it is the HALog file for the first write set not yet + * committed on the journal. Three services are started in sequence (A,B,C). + * A series of small commits are applied to the quorum. (C) is then + * shutdown. A logically empty HALog file should exist on each service for + * the next commit point. However, since this might have been removed on C + * when it was shutdown, we copy the logically empty HALog file from (A) to + * (C). We then overwrite the root blocks of that logically empty HALog file + * with junk. We then do one more update. C is then restarted. We verify + * that C restarts and that the corrupt HALog file has been replaced + * by an HALog file that has the same digest as the HALog file for that + * commit point on (A,B). + * <p> + * Note: We can not reliably observe that the logically HALog file was + * removed during startup. However, this is not critical. What is critical + * is that the logically empty HALog file (a) does not prevent (C) from + * starting; (b) is replaced by the correct HALog data from the quorum + * leader; and (c) that (C) resynchronizes with the met quorum and joins + * causing a fully met quorum. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > + * HAJournalServer can not restart due to logically empty log files + * </a> + */ + public void test_startABC_corruptLogFileDeletedOnRestartC() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB; + HAGlue serverC = abc.serverC; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + 1/* lastCommitCounter */, new HAGlue[] { serverA, serverB, + serverC }); + + /* + * Do a series of small commits. + */ + + final int NSMALL = 5; + + for (int i = 1/* createKB */; i <= NSMALL; i++) { + + simpleTransaction(); + + } + + final long commitCounter1 = 1 + NSMALL; // AKA (6) + + // await the commit points to become visible. + awaitCommitCounter(commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitLogCount(getHALogDirA(), commitCounter1 + 1); + awaitLogCount(getHALogDirB(), commitCounter1 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 + 1); + + /* + * Shutdown C. + * + * Note: This might cause the empty HALog file on (C) to be deleted. + * That is Ok, since we will copy the desired empty HALOg from (A) to + * (C), thus enforcing the desired test condition. + */ + shutdownC(); + + /* + * Verify that there is an empty HALog file on (A) for the next + * commit point. + */ + + // The next commit point. + final long commitCounter2 = commitCounter1 + 1; // AKA (7) + + // The HALog for that next commit point. + final File fileA = CommitCounterUtility.getCommitCounterFile( + getHALogDirA(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Verify HALog file for next commit point on A is logically empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // The name of that HALog file on (C). + final File fileC = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Copy that empty HALog file to (C). + copyFile(fileA, fileC, false/* append */); + /* + * Overwrite the root blocks of the HALog on (C). + */ + { + final OutputStream os = new FileOutputStream(fileC); + try { + final ByteBuffer buf = getRandomData(FileMetadata.headerSize0); + final byte[] b = getBytes(buf); + os.write(b); + os.flush(); + } finally { + os.close(); + } + } + + /* + * Do another transaction. This will cause the HALog file for that + * commit point to be non-empty on A. + */ + simpleTransaction(); + + /* + * Await the commit points to become visible. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitCommitCounter(commitCounter2, new HAGlue[] { serverA, serverB }); + + // Verify the expected #of HALogs on each service. + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter2); + + // Verify HALog file for next commit point on A is NOT empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertFalse(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // Verify HALog file for next commit point on C is corrupt. + { + boolean ok = false; + try { + new HALogReader(fileC); + ok = true; + } catch(Throwable t) { + // Note: Could be IOException, ChecksumError, or + // RuntimeException. + } + if (ok) + fail("HALog is not corrupt: " + fileC); + } + + /* + * Restart (C). It should start without complaint. The logically empty + * HALog file should be replaced by the corresponding file from (A) by + * the time the quorum fully meets. At this point all services will have + * the same digests for all HALog files. + */ + + // Restart C. + serverC = startC(); + + // Wait until the quorum is fully met. + awaitFullyMetQuorum(); + + // await the commit points to become visible. + awaitCommitCounter(commitCounter2, + new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + commitCounter2 /* lastCommitCounter */, new HAGlue[] { serverA, + serverB, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: Each service will have an empty HALog for the next commit + * point. + */ + awaitLogCount(getHALogDirA(), commitCounter2+1); + awaitLogCount(getHALogDirB(), commitCounter2+1); + awaitLogCount(getHALogDirC(), commitCounter2+1); + + } + + /** + * This is a unit test for the ability to correctly NOT remove a logically + * empty HALog file when that HALog file is for the last commit point on the + * Journal. Three services are started in sequence (A,B,C). A series of + * small commits are applied to the quorum. (C) is then shutdown. A + * logically empty HALog file should exist on each service for the next + * commit point. We remove the HALog for the next commit point from (C) if + * it exists. We then remove the HALog for the last durable commit point on + * (C) and replace it with a physically empty HALog file. We then do one + * more update. C is then restarted. We verify that C DOES NOT restart and + * that the physically empty HALog file for the last durable commit point on + * C has not been removed or updated. + * + * TODO This is the staring place for adding the capability to automatically + * replicate bad or missing historical HALog files from the quorum leader. + * The tests exists now to ensure that the logic to remove a bad HALog on + * startup will refuse to remove an HALog corresponding to the most recent + * commit point on the Journal. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/679" > + * HAJournalServer can not restart due to logically empty log files + * </a> + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/775" > + * HAJournal start() </a> + */ + public void test_startABC_missingHALogFileForLastCommitBlocksRestartC() throws Exception { + + final ABC abc = new ABC(true/* sequential */); + + final HAGlue serverA = abc.serverA, serverB = abc.serverB; + HAGlue serverC = abc.serverC; + + // Verify quorum is FULLY met. + awaitFullyMetQuorum(); + + // await the KB create commit point to become visible on each service. + awaitCommitCounter(1L, new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, + 1/* lastCommitCounter */, new HAGlue[] { serverA, serverB, + serverC }); + + /* + * Do a series of small commits. + */ + + final int NSMALL = 5; + + for (int i = 1/* createKB */; i <= NSMALL; i++) { + + simpleTransaction(); + + } + + final long commitCounter1 = 1 + NSMALL; // AKA (6) + + // await the commit points to become visible. + awaitCommitCounter(commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL journals. + assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); + + // Verify binary equality of ALL HALog files. + assertHALogDigestsEquals(1L/* firstCommitCounter */, commitCounter1, + new HAGlue[] { serverA, serverB, serverC }); + + /* + * Verify the expected #of HALogs on each service. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitLogCount(getHALogDirA(), commitCounter1 + 1); + awaitLogCount(getHALogDirB(), commitCounter1 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 + 1); + + /* + * Shutdown C. + * + * Note: This might cause the empty HALog file on (C) to be deleted. + * That is Ok, since we will copy the desired empty HALOg from (A) to + * (C), thus enforcing the desired test condition. + */ + shutdownC(); + + /* + * Verify that there is an empty HALog file on (A) for the next + * commit point. + */ + + // The next commit point. + final long commitCounter2 = commitCounter1 + 1; // AKA (7) + + // The HALog for that next commit point. + final File fileA = CommitCounterUtility.getCommitCounterFile( + getHALogDirA(), commitCounter2, IHALogReader.HA_LOG_EXT); + + // Verify HALog file for next commit point on A is logically empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertTrue(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // The name of that HALog file on (C). + final File fileC = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter2, IHALogReader.HA_LOG_EXT); + +// // Copy that empty HALog file to (C). +// copyFile(fileA, fileC, false/* append */); + if (fileC.exists()) + if (!fileC.delete()) + fail("Could not remove HALog for open write set: " + fileC); + + // The HALog file on (C) for the last durable commit point on (C). + final File fileCLastCommit = CommitCounterUtility.getCommitCounterFile( + getHALogDirC(), commitCounter1, IHALogReader.HA_LOG_EXT); + + if (!fileCLastCommit.exists()) + fail("HALog for last commit not found: " + fileCLastCommit); + + if (!fileCLastCommit.delete()) + fail("Could not remove HALog for last commit: " + fileCLastCommit); + + /* + * Do another transaction. This will cause the HALog file for that + * commit point to be non-empty on A. + */ + simpleTransaction(); + + /* + * Await the commit points to become visible. + * + * Note: This is (lastCommitCounter+1) since an empty HALog was created + * for the next commit point. + */ + awaitCommitCounter(commitCounter2, new HAGlue[] { serverA, serverB }); + + // Verify the expected #of HALogs on each service. + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 - 1); + + // Verify HALog file for next commit point on A is NOT empty. + { + assertTrue(fileA.exists()); + final IHALogReader r = new HALogReader(fileA); + assertFalse(r.isEmpty()); + assertFalse(r.isLive()); + r.close(); + assertTrue(fileA.exists()); + } + + // Verify HALog files for last and next commit point on C are missing. + { + assertFalse(fileC.exists()); + assertFalse(fileCLastCommit.exists()); + } + + /* + * Restart (C). + * + * Note: This restart should fail. The number of HALog files on (C) + * should be unchanged. + */ + + // Restart C. + { + boolean ok = false; + try { + serverC = startC(); + ok = true; + } catch (Throwable t) { + if (InnerCause.isInnerCause(t, InterruptedException.class)) + // test interrupted? propagate interrupt. + throw new RuntimeException(t); + // log message. refused start is expected. + log.warn("C refused to start: " + t, t); + } + if (ok) + fail("C should not have restarted."); + } + + /* + * Verify the expected #of HALogs on each service. + * + * Note: Each service will have an empty HALog for the next commit + * point. + */ + awaitLogCount(getHALogDirA(), commitCounter2 + 1); + awaitLogCount(getHALogDirB(), commitCounter2 + 1); + awaitLogCount(getHALogDirC(), commitCounter1 - 1); + + } + + /** * Unit test for a situation in which A B and C start. A quorum mets and the * third service resyncs with the met quorum. The quorum then fully meets. * Once the fully met quorum is stable, C is then restarted. This test Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3WORMJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3WORMJournalServer.java 2013-11-18 22:45:25 UTC (rev 7562) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3WORMJournalServer.java 2013-11-19 14:10:04 UTC (rev 7563) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import com.bigdata.journal.BufferMode; @@ -2,13 +25,19 @@ +/** + * FIXME HAWORM: This test suite is not implemented. It needs to override the + * {@link BufferMode}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA3WORMJournalServer extends TestHA3JournalServer { - - - public TestHA3WORMJournalServer() {} - - public TestHA3WORMJournalServer(String nme) { - super(nme); - } - + + public TestHA3WORMJournalServer() { + } + + public TestHA3WORMJournalServer(String nme) { + super(nme); + } + protected BufferMode getDiskMode() { - return BufferMode.DiskWORM; + return BufferMode.DiskWORM; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-18 22:45:39
|
Revision: 7562 http://bigdata.svn.sourceforge.net/bigdata/?rev=7562&view=rev Author: jeremy_carroll Date: 2013-11-18 22:45:25 +0000 (Mon, 18 Nov 2013) Log Message: ----------- Corrected ASTUnionFilters lifting to use deep copy, and not the deep copy constructor. Added tests for 767, the ones for the second part are fixed by the above change. Corrected the comment on (all) the deep copy constructors to clarify that they do not make a deep copy. Added another factory method to NV for making maps. Enhanced the framework for optimizer tests Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Bind.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Constant.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/PipelineOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/Predicate.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/filter/DistinctFilter.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQ.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NE.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NEConstant.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/JVMDistinctBindingSetsOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/TestPartitionedJoinGroup_canJoinUsingConstraints.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/filter/NativeDistinctFilter.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AndBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CoalesceBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConcatBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConditionalBind.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DateBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DigestBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/EBVBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FuncBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IVValueExpression.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IfBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InferenceBVE.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBNodeBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBoundBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInlineBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteralBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsMaterializedBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsNumericBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsURIBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangMatchesBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LcaseBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NeedsMaterializationBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NotBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NumericBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/OrBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RangeBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ReplaceBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SameTermBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SparqlTypeErrorBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/StrBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TryBeforeMaterializationConstraint.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XSDBooleanIVValueExpression.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XsdStrBOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexPredicate.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rules/RejectAnythingSameAsItself.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ConstantNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ConstructNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/DatasetNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/DummyConstantNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ExistsNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FilterNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionRegistry.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GraphPatternGroup.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GroupMemberNodeBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GroupNodeBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NamedSubqueryInclude.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NotExistsNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OldBackupPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByExpr.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathUnionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryNodeBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SubqueryFunctionNodeBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/TermNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/VarNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTUnionFiltersOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOPredicate.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/spo/SPOStarJoin.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestCustomFunction.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTUnionFiltersOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/spo/TestSPORelation.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnionMinus.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_01.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_01.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_01.trig branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_02.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_02.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/union_minus_02.trig Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -67,7 +67,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Bind.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Bind.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Bind.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -16,7 +16,7 @@ private static final long serialVersionUID = 1L; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public Bind(Bind<E> op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Constant.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Constant.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/Constant.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -83,7 +83,7 @@ // } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/NV.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -164,6 +164,23 @@ } /** + * Wrap name/value pairs as a map. + * + * @param nameValuePairs + * Pairs each being a string followed by an object, being the name value pair in the resulting map. + * + * @return The map. + */ + static public Map<String,Object> asMap(Object ... nameValuePairs) { + assert nameValuePairs.length % 2 == 0; + final Map<String,Object> rslt = new LinkedHashMap<String,Object>(nameValuePairs.length/2); + for (int i=0;i<nameValuePairs.length;i+=2) { + rslt.put((String)nameValuePairs[i], nameValuePairs[i+1]); + } + return rslt; + } + + /** * Wrap an array name/value pairs as a {@link Map}. * * @param a Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/PipelineOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/PipelineOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -227,7 +227,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/Predicate.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/Predicate.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -79,7 +79,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public Predicate(final Predicate<E> op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/filter/DistinctFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/filter/DistinctFilter.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/ap/filter/DistinctFilter.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -58,7 +58,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public DistinctFilter(final DistinctFilter op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/AND.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -47,7 +47,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public AND(final BOp[] args, final Map<String, Object> anns) { super(args, anns); @@ -58,7 +58,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public AND(final AND op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -77,7 +77,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public Constraint(final Constraint<X> op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQ.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQ.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQ.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -63,7 +63,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public EQ(final EQ op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/EQConstant.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -60,7 +60,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public EQConstant(final EQConstant op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NE.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NE.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NE.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -63,7 +63,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public NE(final NE op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NEConstant.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NEConstant.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/NEConstant.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -60,7 +60,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public NEConstant(final NEConstant op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/OR.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -47,7 +47,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public OR(final BOp[] args, final Map<String, Object> anns) { super(args, anns); @@ -58,7 +58,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public OR(final OR op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/HTreeDistinctBindingSetsOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -72,7 +72,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public HTreeDistinctBindingSetsOp(final HTreeDistinctBindingSetsOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/JVMDistinctBindingSetsOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/JVMDistinctBindingSetsOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/JVMDistinctBindingSetsOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -74,7 +74,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public JVMDistinctBindingSetsOp(final JVMDistinctBindingSetsOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -94,7 +94,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public MemoryGroupByOp(final MemoryGroupByOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -74,7 +74,7 @@ private static final long serialVersionUID = 1L; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public MemorySortOp(final MemorySortOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -90,7 +90,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public PipelinedAggregationOp(final PipelinedAggregationOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/TestPartitionedJoinGroup_canJoinUsingConstraints.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/TestPartitionedJoinGroup_canJoinUsingConstraints.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/TestPartitionedJoinGroup_canJoinUsingConstraints.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -248,7 +248,7 @@ private static final long serialVersionUID = 1L; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ @@ -281,7 +281,7 @@ private static final long serialVersionUID = 1L; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ @@ -318,7 +318,7 @@ private static final long serialVersionUID = 1L; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/relation/rule/AbstractRuleTestCase.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -151,7 +151,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public P(final P op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/filter/NativeDistinctFilter.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/filter/NativeDistinctFilter.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/filter/NativeDistinctFilter.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -126,7 +126,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public NativeDistinctFilter(final NativeDistinctFilter op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AndBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AndBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/AndBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -74,7 +74,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public AndBOp(final AndBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CoalesceBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CoalesceBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CoalesceBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -68,7 +68,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public CoalesceBOp(final CoalesceBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/CompareBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -101,7 +101,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public CompareBOp(final CompareBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConcatBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConcatBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConcatBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -56,7 +56,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public ConcatBOp(final ConcatBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConditionalBind.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConditionalBind.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ConditionalBind.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -34,7 +34,7 @@ protected transient Boolean projection; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public ConditionalBind(ConditionalBind<E> op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DatatypeBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -76,7 +76,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public DatatypeBOp(final DatatypeBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DateBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DateBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DateBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -111,7 +111,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DigestBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DigestBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/DigestBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -104,7 +104,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/EBVBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/EBVBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/EBVBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -72,7 +72,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public EBVBOp(final EBVBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FalseBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -57,7 +57,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public FalseBOp(final FalseBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FuncBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FuncBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/FuncBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -105,7 +105,7 @@ private transient volatile Function funct; /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public FuncBOp(final FuncBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IVValueExpression.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IVValueExpression.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IVValueExpression.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -171,7 +171,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IVValueExpression(final IVValueExpression<T> op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IfBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IfBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IfBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -75,7 +75,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IfBOp(final IfBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InferenceBVE.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InferenceBVE.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/InferenceBVE.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -35,7 +35,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public InferenceBVE(final InferenceBVE op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBNodeBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBNodeBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBNodeBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -63,7 +63,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsBNodeBOp(final IsBNodeBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBoundBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBoundBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsBoundBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -63,7 +63,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsBoundBOp(final IsBoundBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInlineBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInlineBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsInlineBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -82,7 +82,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsInlineBOp(final IsInlineBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteralBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteralBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsLiteralBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -62,7 +62,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsLiteralBOp(final IsLiteralBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsMaterializedBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsMaterializedBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsMaterializedBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -94,7 +94,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsMaterializedBOp(final IsMaterializedBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsNumericBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsNumericBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsNumericBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -62,7 +62,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsNumericBOp(final IsNumericBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsURIBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsURIBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IsURIBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -62,7 +62,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public IsURIBOp(final IsURIBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -70,7 +70,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public LangBOp(final LangBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangMatchesBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangMatchesBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LangMatchesBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -69,7 +69,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public LangMatchesBOp(final LangMatchesBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LcaseBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LcaseBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/LcaseBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -75,7 +75,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/MathBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -141,7 +141,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NeedsMaterializationBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NeedsMaterializationBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NeedsMaterializationBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -67,7 +67,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public NeedsMaterializationBOp(final NeedsMaterializationBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NotBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NotBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NotBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -69,7 +69,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public NotBOp(final NotBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NumericBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NumericBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/NumericBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -100,7 +100,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. * * @param op */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/OrBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/OrBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/OrBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -74,7 +74,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public OrBOp(final OrBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RangeBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RangeBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RangeBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -105,7 +105,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public RangeBOp(final RangeBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/RegexBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -141,7 +141,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public RegexBOp(final RegexBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ReplaceBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ReplaceBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/ReplaceBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -135,7 +135,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public ReplaceBOp(final ReplaceBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -88,7 +88,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public SPARQLConstraint(final SPARQLConstraint op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SameTermBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SameTermBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SameTermBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -106,7 +106,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public SameTermBOp(final SameTermBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SparqlTypeErrorBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SparqlTypeErrorBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SparqlTypeErrorBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -66,7 +66,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public SparqlTypeErrorBOp(final SparqlTypeErrorBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/StrBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/StrBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/StrBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -77,7 +77,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public StrBOp(final StrBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TrueBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -57,7 +57,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public TrueBOp(final TrueBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TryBeforeMaterializationConstraint.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TryBeforeMaterializationConstraint.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/TryBeforeMaterializationConstraint.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -73,7 +73,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public TryBeforeMaterializationConstraint( final TryBeforeMaterializationConstraint op) { @@ -125,4 +125,4 @@ } -} \ No newline at end of file +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XSDBooleanIVValueExpression.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XSDBooleanIVValueExpression.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XSDBooleanIVValueExpression.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -81,7 +81,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public XSDBooleanIVValueExpression(final XSDBooleanIVValueExpression op) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XsdStrBOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XsdStrBOp.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/XsdStrBOp.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -79,7 +79,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public XsdStrBOp(final XsdStrBOp op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexPredicate.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexPredicate.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/lexicon/LexPredicate.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -141,7 +141,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public LexPredicate(final LexPredicate op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rules/RejectAnythingSameAsItself.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rules/RejectAnythingSameAsItself.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/rules/RejectAnythingSameAsItself.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -53,7 +53,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public RejectAnythingSameAsItself(final RejectAnythingSameAsItself op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -74,7 +74,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public ASTBase(ASTBase op) { super(op); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -69,7 +69,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public ArbitraryLengthPathNode(ArbitraryLengthPathNode op) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java 2013-11-18 17:41:12 UTC (rev 7561) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java 2013-11-18 22:45:25 UTC (rev 7562) @@ -35,7 +35,7 @@ } /** - * Required deep copy constructor. + * Constructor required for {@link com.bigdata.bop.BOpUtility#deepCopy(FilterNode)}. */ public AssignmentNode(AssignmentNode op) { Modifi... [truncated message content] |
From: <tho...@us...> - 2013-11-18 17:41:25
|
Revision: 7561 http://bigdata.svn.sourceforge.net/bigdata/?rev=7561&view=rev Author: thompsonbry Date: 2013-11-18 17:41:12 +0000 (Mon, 18 Nov 2013) Log Message: ----------- Wrapped IOException with the file name so we can diagnose an HALog file that prevent HAJournal startup more easily. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-11-15 22:30:29 UTC (rev 7560) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-11-18 17:41:12 UTC (rev 7561) @@ -452,6 +452,11 @@ is.readFully(b0); is.readFully(b1); + } catch(IOException ex) { + + // Wrap exception with the file name. + throw new IOException(ex.getMessage() + ", file=" + file, ex); + } finally { is.close(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-15 22:30:38
|
Revision: 7560 http://bigdata.svn.sourceforge.net/bigdata/?rev=7560&view=rev Author: jeremy_carroll Date: 2013-11-15 22:30:29 +0000 (Fri, 15 Nov 2013) Log Message: ----------- Improved estimated of counts for ALPPs with lower bounds of zero. The main change is in ArbitraryLengthPathNode.getEstimatedCardinality() This involved passing the ITripleStore object into the appropriate method, and so several method signatures had to have an extra argument. With this change the just added tests for trac773 all got to be much simpler, and the behavior appears improved. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/ITripleStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -11,6 +11,7 @@ import com.bigdata.rdf.sparql.ast.PathNode.PathMod; import com.bigdata.rdf.sparql.ast.eval.AST2BOpBase; import com.bigdata.rdf.sparql.ast.optimizers.StaticOptimizer; +import com.bigdata.rdf.store.ITripleStore; /** * A special kind of AST node that represents the SPARQL 1.1 arbitrary length @@ -220,19 +221,20 @@ } // @Override - public boolean isReorderable() { + public boolean isReorderable(ITripleStore db) { - final long estCard = getEstimatedCardinality(null); + final long estCard = getEstimatedCardinality(null, db); return estCard >= 0 && estCard < Long.MAX_VALUE; } // @Override - public long getEstimatedCardinality(StaticOptimizer opt) { + public long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db) { final JoinGroupNode group = subgroup(); - + + long zeroMatchAdjustment = 0; /* * if lowerBound() is zero, and both ?s and ?o are * variables then we (notionally) match @@ -244,11 +246,21 @@ * Despite this not being implemented, the optimizer does better * knowing this correctly. */ - if (lowerBound() == 0 && left() instanceof VarNode && right() instanceof VarNode) { - return Long.MAX_VALUE; + if (lowerBound() == 0 ) { + int fixedCount = (left() instanceof VarNode ? 1 : 0) + (right() instanceof VarNode ? 1 : 0); + switch (fixedCount) { + case 0: + zeroMatchAdjustment = left().getValue().equals(right().getValue())?1:0; + break; + case 1: + zeroMatchAdjustment = 1; + break; + case 2: + zeroMatchAdjustment = db.getURICount() + db.getBNodeCount(); // this is too big when we are looking in a reduced dataset + break; + } } - /* * Only deal with singleton paths for now. * @@ -261,8 +273,11 @@ final long estCard = node.getProperty( AST2BOpBase.Annotations.ESTIMATED_CARDINALITY, Long.MAX_VALUE); + + + - return estCard; + return estCard + zeroMatchAdjustment; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -29,6 +29,7 @@ import com.bigdata.bop.BOp; import com.bigdata.rdf.sparql.ast.optimizers.StaticOptimizer; +import com.bigdata.rdf.store.ITripleStore; /** * Interface for things which can be re-ordered by the static join @@ -43,7 +44,7 @@ * by examining the type - individual instances of a particular type * may or may not be reorderable. */ - boolean isReorderable(); + boolean isReorderable(ITripleStore db); /** * Return the estimated cardinality - either the range count of a @@ -51,6 +52,6 @@ * group. * @param opt This optimizer can be used to help work out the estimate */ - long getEstimatedCardinality(StaticOptimizer opt); + long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -9,6 +9,7 @@ import com.bigdata.bop.IVariable; import com.bigdata.rdf.internal.constraints.InBOp; import com.bigdata.rdf.sparql.ast.service.ServiceNode; +import com.bigdata.rdf.store.ITripleStore; /** * An optional or non-optional collection of query nodes that run together in @@ -348,12 +349,12 @@ } - public List<IReorderableNode> getReorderableChildren() { + public List<IReorderableNode> getReorderableChildren(ITripleStore db) { final List<IReorderableNode> nodes = getChildren(IReorderableNode.class); final Iterator<IReorderableNode> it = nodes.iterator(); while (it.hasNext()) { final IReorderableNode node = it.next(); - if (!node.isReorderable()) { + if (!node.isReorderable(db)) { it.remove(); } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -23,6 +23,7 @@ import com.bigdata.rdf.spo.DistinctTermAdvancer; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPOAccessPath; +import com.bigdata.rdf.store.ITripleStore; import com.bigdata.relation.rule.eval.ISolution; import com.bigdata.striterator.IKeyOrder; @@ -635,7 +636,7 @@ * @see com.bigdata.rdf.sparql.ast.IReorderableNode#isReorderable() */ @Override - public boolean isReorderable() { + public boolean isReorderable(ITripleStore db) { return !isOptional(); @@ -645,7 +646,7 @@ * @see com.bigdata.rdf.sparql.ast.IReorderableNode#getEstimatedCardinality() */ @Override - public long getEstimatedCardinality(StaticOptimizer opt) { + public long getEstimatedCardinality(StaticOptimizer opt, ITripleStore db) { return getProperty(AST2BOpBase.Annotations.ESTIMATED_CARDINALITY, -1l); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -39,6 +39,7 @@ import com.bigdata.bop.IVariable; import com.bigdata.rdf.sparql.ast.eval.IEvaluationContext; import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager; +import com.bigdata.rdf.store.ITripleStore; /** * Base class for static analysis. @@ -471,5 +472,9 @@ return set; } + + public ITripleStore getDB() { + return evaluationContext.getAbstractTripleStore(); + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -6,6 +6,7 @@ import com.bigdata.bop.BOp; import com.bigdata.rdf.sparql.ast.optimizers.StaticOptimizer; +import com.bigdata.rdf.store.ITripleStore; /** * A special kind of group {@link IGroupNode} that represents the sparql union @@ -92,22 +93,22 @@ @Override - public long getEstimatedCardinality(StaticOptimizer optimizer) { + public long getEstimatedCardinality(StaticOptimizer optimizer, ITripleStore db) { long cardinality = 0; for (JoinGroupNode child : this) { - StaticOptimizer opt = new StaticOptimizer(optimizer, child.getReorderableChildren()); + StaticOptimizer opt = new StaticOptimizer(optimizer, child.getReorderableChildren(db)); cardinality += opt.getCardinality(); } return cardinality; } @Override - public boolean isReorderable() { + public boolean isReorderable(ITripleStore db) { for (JoinGroupNode child : this) { for (IGroupMemberNode grandchild : child) { if (! (grandchild instanceof IReorderableNode)) return false; - if (! ((IReorderableNode)grandchild).isReorderable()) + if (! ((IReorderableNode)grandchild).isReorderable(db)) return false; } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -466,7 +466,7 @@ /* * Let the optimizer handle the simple optionals too. */ - final List<IReorderableNode> nodes = joinGroup.getReorderableChildren(); + final List<IReorderableNode> nodes = joinGroup.getReorderableChildren(ctx.getAbstractTripleStore()); if (!nodes.isEmpty()) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -512,8 +512,7 @@ if (rangeCount[tailIndex] == -1L) { - final long rangeCount = (long) nodes.get(tailIndex) - .getEstimatedCardinality(this); + final long rangeCount = (long) nodes.get(tailIndex).getEstimatedCardinality(this, sa.getDB()); this.rangeCount[tailIndex] = rangeCount; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/ITripleStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/ITripleStore.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/ITripleStore.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -73,7 +73,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ -interface ITripleStore { +public interface ITripleStore { /** * The #of named graphs. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java 2013-11-15 20:40:47 UTC (rev 7559) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java 2013-11-15 22:30:29 UTC (rev 7560) @@ -48,6 +48,9 @@ private class NotNestedHelper extends Helper { public NotNestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym) { + this(zero_or_one_to_one_or_more, sym, true); + } + public NotNestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym, boolean switchOrdering) { String pattern = "c" + sym; given = select( varNode(z), @@ -63,23 +66,35 @@ // we have to evaluate this one earlier in order to get the anonymous variable numbering // lined up. Really we should compare the result with expected wise to // the unimportance of the name of anonymous variables. - ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ); - expected = select( varNode(z), + ArbitraryLengthPathNode alpp1; + ArbitraryLengthPathNode alpp2; + if (switchOrdering) { + alpp2 = alpp2(zero_or_one_to_one_or_more); + alpp1 = alpp1(zero_or_one_to_one_or_more); + } else { + alpp1 = alpp1(zero_or_one_to_one_or_more); + alpp2 = alpp2(zero_or_one_to_one_or_more); + + } + + expected = select( varNode(z), where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - alpp, + alpp1, + alpp2, statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431) ) ); varCount = 0; } + ArbitraryLengthPathNode alpp1(HelperFlag zero_or_one_to_one_or_more) { + return arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, + joinGroupNode( statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) ) ); + } + ArbitraryLengthPathNode alpp2(HelperFlag zero_or_one_to_one_or_more) { + return arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, + joinGroupNode( statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) ) ); + } } private class NestedHelper extends Helper { @@ -173,7 +188,7 @@ } public void testNestedPartway() { - new Helper(){{ + new NestedHelper(ZERO_OR_MORE,"*"){{ given = select( varNode(z), where ( @@ -195,26 +210,12 @@ statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) ) ); - varCount = 0; - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) - ) ); }}.test(); } public void testNotNestedPartway() { - new Helper(){{ + new NotNestedHelper(ZERO_OR_MORE,"*", false){{ given = select( varNode(z), where ( @@ -233,66 +234,14 @@ statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) ) ); - varCount = 0; - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) - ) ); - }}.test(); } public void testNestedStar() { - new NestedHelper(ZERO_OR_MORE,"*"){{ - // currently not correctly optimized. - // TODO: this expected result is incorrect. - - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) - ) ); - - }}.test(); + new NestedHelper(ZERO_OR_MORE,"*").test(); } public void testNotNestedStar() { - new NotNestedHelper(ZERO_OR_MORE,"*"){{ - // currently not correctly optimized. - // TODO: this expected result is incorrect. - - ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ); - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), - alpp - ) ); - - }}.test(); + new NotNestedHelper(ZERO_OR_MORE,"*").test(); } public void testNestedPlus() { @@ -304,47 +253,10 @@ } public void testNestedQuestionMark() { - new NestedHelper(ZERO_OR_ONE,"?"){{ - // currently not correctly optimized. - // TODO: this expected result is incorrect. - - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_ONE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_ONE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) - ) ); - - }}.test(); + new NestedHelper(ZERO_OR_ONE,"?").test(); } public void testNotNestedQuestionMark() { - new NotNestedHelper(ZERO_OR_ONE,"?"){{ - // currently not correctly optimized. - // TODO: this expected result is incorrect. - - ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_ONE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) - ) ); - expected = select( varNode(z), - where ( - arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_ONE, - joinGroupNode( - statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) - ) ), - statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), - statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), - alpp - ) ); - - }}.test(); + new NotNestedHelper(ZERO_OR_ONE,"?").test(); } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-15 20:40:54
|
Revision: 7559 http://bigdata.svn.sourceforge.net/bigdata/?rev=7559&view=rev Author: jeremy_carroll Date: 2013-11-15 20:40:47 +0000 (Fri, 15 Nov 2013) Log Message: ----------- Build compiler did not like (int) to cast an object to an Integer to an int Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2013-11-15 20:19:45 UTC (rev 7558) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2013-11-15 20:40:47 UTC (rev 7559) @@ -339,7 +339,7 @@ } for (;i<more.length;i++) { if (more[i] instanceof Integer) { - rslt.setProperty(Annotations.ESTIMATED_CARDINALITY, Long.valueOf((int)more[i])); + rslt.setProperty(Annotations.ESTIMATED_CARDINALITY, Long.valueOf((Integer)more[i])); } else { HelperFlag flag = (HelperFlag)more[i]; flag.apply(rslt); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jer...@us...> - 2013-11-15 20:19:54
|
Revision: 7558 http://bigdata.svn.sourceforge.net/bigdata/?rev=7558&view=rev Author: jeremy_carroll Date: 2013-11-15 20:19:45 +0000 (Fri, 15 Nov 2013) Log Message: ----------- Additional tests for trac 773 Avoided redundancy by more use of getChildren. Added missing copyright statements. Added additional tests for trac 773 (these show current behavior not desired behavior) Made some functionality protected in ASTPropertyPathOptimizer and ASTRangeCountOptimizer, so that it can be overridden in unit tests. Include cardinality estimates in ALPP displays Refactor optimizer testing, extracting super class. Added unit tests for flatten join optimizer. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTOptimizerList.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTPropertyPathOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTFlattenJoinGroupsOptimizer.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -28,7 +28,7 @@ */ private static final long serialVersionUID = 1L; - interface Annotations extends GroupNodeBase.Annotations { + public interface Annotations extends GroupNodeBase.Annotations { /** * The left term - can be a variable or a constant. @@ -202,7 +202,18 @@ sb.append(s).append(getClass().getSimpleName()); sb.append("(left=").append(left()).append(", right=").append(right()).append(") {"); sb.append(subgroup().toString(indent+1)); - sb.append("\n").append(s).append("}"); + sb.append("\n").append(s); + + final Long rangeCount = (Long) getProperty(AST2BOpBase.Annotations.ESTIMATED_CARDINALITY); + + if (rangeCount != null) { + sb.append(indent(indent + 1)); + sb.append(AST2BOpBase.Annotations.ESTIMATED_CARDINALITY); + sb.append("="); + sb.append(rangeCount.toString()); + sb.append("\n"); + } + sb.append("}"); return sb.toString(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -8,7 +8,6 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.IVariable; import com.bigdata.rdf.internal.constraints.InBOp; -import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; import com.bigdata.rdf.sparql.ast.service.ServiceNode; /** @@ -179,21 +178,8 @@ */ public List<StatementPatternNode> getStatementPatterns() { - final List<StatementPatternNode> spNodes = - new LinkedList<StatementPatternNode>(); + return getChildren(StatementPatternNode.class); - for (IQueryNode node : this) { - - if (node instanceof StatementPatternNode) { - - spNodes.add((StatementPatternNode) node); - - } - - } - - return spNodes; - } /** @@ -248,21 +234,8 @@ */ public List<ServiceNode> getServiceNodes() { - final List<ServiceNode> serviceNodes = - new LinkedList<ServiceNode>(); + return getChildren(ServiceNode.class); - for (IQueryNode node : this) { - - if (node instanceof ServiceNode) { - - serviceNodes.add((ServiceNode) node); - - } - - } - - return serviceNodes; - } /** @@ -270,21 +243,8 @@ */ public List<NamedSubqueryInclude> getNamedSubqueryIncludes() { - final List<NamedSubqueryInclude> namedSubqueryIncludes = - new LinkedList<NamedSubqueryInclude>(); + return getChildren(NamedSubqueryInclude.class); - for (IQueryNode node : this) { - - if (node instanceof NamedSubqueryInclude) { - - namedSubqueryIncludes.add((NamedSubqueryInclude) node); - - } - - } - - return namedSubqueryIncludes; - } /** @@ -319,21 +279,8 @@ * evaluated left-to-right in the order given in the original query. */ public List<AssignmentNode> getAssignments(){ - - final List<AssignmentNode> assignments = new LinkedList<AssignmentNode>(); - - for (IQueryNode node : this) { - - if (node instanceof AssignmentNode) { - - assignments.add((AssignmentNode) node); - - } - - } - - return assignments; - + + return getChildren(AssignmentNode.class); } /** @@ -341,20 +288,8 @@ */ public List<FilterNode> getAllFiltersInGroup() { - final List<FilterNode> filters = new LinkedList<FilterNode>(); + return getChildren(FilterNode.class); - for (IQueryNode node : this) { - - if (node instanceof FilterNode) { - - filters.add((FilterNode) node); - - } - - } - - return filters; - } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTOptimizerList.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTOptimizerList.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTOptimizerList.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -27,6 +27,8 @@ package com.bigdata.rdf.sparql.ast.optimizers; +import java.util.Arrays; +import java.util.Collection; import java.util.LinkedList; import org.apache.log4j.Logger; @@ -53,10 +55,12 @@ */ private static final long serialVersionUID = 1L; - public ASTOptimizerList() { - + public ASTOptimizerList(Collection<IASTOptimizer> c) { + super(c); } - + public ASTOptimizerList(IASTOptimizer ... optimizers) { + this(Arrays.asList(optimizers)); + } public boolean add(final IASTOptimizer opt) { if(opt == null) @@ -95,7 +99,7 @@ if (log.isDebugEnabled()) log.debug("Rewritten AST:\n" + queryNode); - + } return queryNode; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTPropertyPathOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTPropertyPathOptimizer.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTPropertyPathOptimizer.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -335,7 +335,12 @@ return v; } - private VarNode anonVar(final String anon) { + /** + * Override during testing to give predictable results + * @param anon + * @return + */ + protected VarNode anonVar(final String anon) { VarNode v = new VarNode(anon+UUID.randomUUID().toString()); v.setAnonymous(true); return v; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -87,37 +87,8 @@ if (sp.getProperty(Annotations.ESTIMATED_CARDINALITY) == null) { - final IV<?, ?> s = getIV(sp.s(), exogenousBindings); - final IV<?, ?> p = getIV(sp.p(), exogenousBindings); - final IV<?, ?> o = getIV(sp.o(), exogenousBindings); - final IV<?, ?> c = getIV(sp.c(), exogenousBindings); - - final RangeNode rangeNode = sp.getRange(); - final RangeBOp range = rangeNode != null ? rangeNode.getRangeBOp() : null; - - final IAccessPath<?> ap = db.getAccessPath(s, p, o, c, range); - - final long cardinality = ap.rangeCount(false/* exact */); + estimateCardinality(sp, db, exogenousBindings); - // Annotate with the fast range count. - sp.setProperty(Annotations.ESTIMATED_CARDINALITY, cardinality); - - /* - * Annotate with the index which would be used if we did not run - * access path "as-bound". This is the index that will be used - * if we wind up doing a hash join for this predicate. - * - * TODO It would make sense to lift this annotation into a - * different AST optimizer so it is always present. An - * optimization for index locality for as-bound evaluation - * depends on the presence of this annotation. - * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/150" - * (Choosing the index for testing fully bound access paths - * based on index locality) - */ - sp.setProperty(Annotations.ORIGINAL_INDEX, ap.getKeyOrder()); - } } @@ -125,6 +96,51 @@ } /** + * For testing purposes we can override this method. + * @param sp + * @param db + * @param exogenousBindings + */ + protected void estimateCardinality(StatementPatternNode sp, final AbstractTripleStore db, + final IBindingSet exogenousBindings) { + final IV<?, ?> s = getIV(sp.s(), exogenousBindings); + final IV<?, ?> p = getIV(sp.p(), exogenousBindings); + final IV<?, ?> o = getIV(sp.o(), exogenousBindings); + final IV<?, ?> c = getIV(sp.c(), exogenousBindings); + + estimateCardinalities(sp, s, p, o, c, db); + } + + protected void estimateCardinalities(StatementPatternNode sp, final IV<?, ?> s, final IV<?, ?> p, + final IV<?, ?> o, final IV<?, ?> c, final AbstractTripleStore db) { + final RangeNode rangeNode = sp.getRange(); + final RangeBOp range = rangeNode != null ? rangeNode.getRangeBOp() : null; + + final IAccessPath<?> ap = db.getAccessPath(s, p, o, c, range); + + final long cardinality = ap.rangeCount(false/* exact */); + + // Annotate with the fast range count. + sp.setProperty(Annotations.ESTIMATED_CARDINALITY, cardinality); + + /* + * Annotate with the index which would be used if we did not run + * access path "as-bound". This is the index that will be used + * if we wind up doing a hash join for this predicate. + * + * TODO It would make sense to lift this annotation into a + * different AST optimizer so it is always present. An + * optimization for index locality for as-bound evaluation + * depends on the presence of this annotation. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/150" + * (Choosing the index for testing fully bound access paths + * based on index locality) + */ + sp.setProperty(Annotations.ORIGINAL_INDEX, ap.getKeyOrder()); + } + + /** * Helper method grabs the IV out of the TermNode, doing the appropriate * NULL and constant/var checks. * Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -0,0 +1,439 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.optimizers; + +import org.openrdf.model.impl.URIImpl; +import org.openrdf.query.algebra.StatementPattern.Scope; + +import com.bigdata.bop.IBindingSet; +import com.bigdata.bop.ModifiableBOpBase; +import com.bigdata.bop.NV; +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.sparql.ast.ASTBase; +import com.bigdata.rdf.sparql.ast.ASTContainer; +import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; +import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode; +import com.bigdata.rdf.sparql.ast.AssignmentNode; +import com.bigdata.rdf.sparql.ast.ConstantNode; +import com.bigdata.rdf.sparql.ast.GraphPatternGroup; +import com.bigdata.rdf.sparql.ast.GroupMemberNodeBase; +import com.bigdata.rdf.sparql.ast.IGroupMemberNode; +import com.bigdata.rdf.sparql.ast.IQueryNode; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.NamedSubqueryInclude; +import com.bigdata.rdf.sparql.ast.NamedSubqueryRoot; +import com.bigdata.rdf.sparql.ast.PathNode; +import com.bigdata.rdf.sparql.ast.PathNode.*; +import com.bigdata.rdf.sparql.ast.ProjectionNode; +import com.bigdata.rdf.sparql.ast.PropertyPathNode; +import com.bigdata.rdf.sparql.ast.PropertyPathUnionNode; +import com.bigdata.rdf.sparql.ast.QueryBase; +import com.bigdata.rdf.sparql.ast.QueryRoot; +import com.bigdata.rdf.sparql.ast.QueryType; +import com.bigdata.rdf.sparql.ast.StatementPatternNode; +import com.bigdata.rdf.sparql.ast.TermNode; +import com.bigdata.rdf.sparql.ast.UnionNode; +import com.bigdata.rdf.sparql.ast.VarNode; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; + +public abstract class AbstractOptimizerTestCase extends AbstractASTEvaluationTestCase { + + public interface Annotations extends + com.bigdata.rdf.sparql.ast.GraphPatternGroup.Annotations, + com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode.Annotations, + com.bigdata.rdf.sparql.ast.eval.AST2BOpBase.Annotations { + } + + enum HelperFlag { + ZERO_OR_ONE { + @Override + public void apply(ASTBase sp) { + setPathMod(((ArbitraryLengthPathNode) sp), PathMod.ZERO_OR_ONE); + } + }, + + ZERO_OR_MORE { + @Override + public void apply(ASTBase sp) { + setPathMod(((ArbitraryLengthPathNode) sp), PathMod.ZERO_OR_MORE); + } + + }, + + ONE_OR_MORE { + @Override + public void apply(ASTBase sp) { + setPathMod(((ArbitraryLengthPathNode) sp), PathMod.ONE_OR_MORE); + } + + }, + DEFAULT_CONTEXTS { + @Override + public void apply(ASTBase sp) { + ((StatementPatternNode) sp).setScope(Scope.DEFAULT_CONTEXTS); + } + }, + NAMED_CONTEXTS { + @Override + public void apply(ASTBase sp) { + ((StatementPatternNode) sp).setScope(Scope.DEFAULT_CONTEXTS); + } + }, + OPTIONAL { + @Override + public void apply(ASTBase sp) { + ((ModifiableBOpBase) sp) + .setProperty(Annotations.OPTIONAL, true); + } + }, + DISTINCT { + @Override + public void apply(ASTBase rslt) { + ((QueryBase) rslt).getProjection().setDistinct(true); + } + }; + + /** + * + * @param target + * @throws ClassCastException + * If there is a mismatch between the flag and its usage. + */ + abstract public void apply(ASTBase target); + + private static void setPathMod(ArbitraryLengthPathNode alp, PathMod mod ) { + alp.setProperty(Annotations.LOWER_BOUND, mod == PathMod.ONE_OR_MORE ? 1L : 0L); + alp.setProperty(Annotations.UPPER_BOUND,mod == PathMod.ZERO_OR_ONE ? 1L : Long.MAX_VALUE); + + } + }; + + /** + * The purpose of this class is to make the tests look like the old + * comments. The first example + * {@link TestASTStaticJoinOptimizer#test_simpleOptional01A()} is based on + * the comments of + * {@link TestASTStaticJoinOptimizer#test_simpleOptional01()} and + * demonstrates that the comment is out of date. + * + * NB: Given this goal, several Java naming conventions are ignored. e.g. + * methods whose names are ALLCAPS or the same as ClassNames + * + * Also, note that the intent is that this class be used in + * anonymous subclasses with a single invocation of the {@link #test()} method, + * and the two fields {@link #given} and {@link #expected} initialized + * in the subclasses constructor (i.e. inside a second pair of braces). + * + * All of the protected members are wrappers around constructors, + * to allow the initialization of these two fields, to have a style + * much more like Prolog than Java. + * + * @author jeremycarroll + * + */ + @SuppressWarnings("rawtypes") + public abstract class Helper { + protected QueryRoot given, expected; + /** + * Variables + */ + protected final String w = "w", x = "x", y = "y", z = "z"; + /** + * Constants ... + */ + protected final IV a = iv("a"), b = iv("b"), c = iv("c"), d = iv("d"), + e = iv("e"), f = iv("f"), g = iv("g"), h = iv("h"); + private VarNode rightVar; + private VarNode leftVar; + int varCount = 0; + + private IV iv(String id) { + return makeIV(new URIImpl("http://example/" + id)); + } + + protected QueryRoot select(VarNode[] varNodes, + NamedSubqueryRoot namedSubQuery, JoinGroupNode where, + HelperFlag... flags) { + QueryRoot rslt = select(varNodes, where, flags); + rslt.getNamedSubqueriesNotNull().add(namedSubQuery); + return rslt; + } + + protected QueryRoot select(VarNode[] varNodes, JoinGroupNode where, + HelperFlag... flags) { + + QueryRoot select = new QueryRoot(QueryType.SELECT); + final ProjectionNode projection = new ProjectionNode(); + for (VarNode varNode : varNodes) + projection.addProjectionVar(varNode); + + select.setProjection(projection); + select.setWhereClause(where); + for (HelperFlag flag : flags) + flag.apply(select); + return select; + } + + protected QueryRoot select(VarNode varNode, + NamedSubqueryRoot namedSubQuery, JoinGroupNode where, + HelperFlag... flags) { + return select(new VarNode[] { varNode }, namedSubQuery, where, + flags); + } + + protected QueryRoot select(VarNode varNode, JoinGroupNode where, + HelperFlag... flags) { + return select(new VarNode[] { varNode }, where, flags); + } + + protected NamedSubqueryRoot namedSubQuery(String name, VarNode varNode, + JoinGroupNode where) { + final NamedSubqueryRoot namedSubquery = new NamedSubqueryRoot( + QueryType.SELECT, name); + final ProjectionNode projection = new ProjectionNode(); + namedSubquery.setProjection(projection); + projection.addProjectionExpression(new AssignmentNode(varNode, + new VarNode(varNode))); + namedSubquery.setWhereClause(where); + return namedSubquery; + } + + protected GroupMemberNodeBase namedSubQueryInclude(String name) { + return new NamedSubqueryInclude(name); + } + + protected VarNode leftVar() { + leftVar = newAlppVar("-tVarLeft-",leftVar); + return leftVar; + } + protected VarNode rightVar() { + rightVar = newAlppVar("-tVarRight-",rightVar); + return rightVar; + } + + private VarNode newAlppVar(String prefix,VarNode v) { + if (v != null) { + return v; + } + v = varNode(prefix+varCount++); + v.setAnonymous(true); + return v ; + + } + + protected ArbitraryLengthPathNode arbitartyLengthPropertyPath(TermNode left, TermNode right, + HelperFlag card, JoinGroupNode joinGroupNode) { + assert leftVar != null; + assert rightVar != null; + ArbitraryLengthPathNode rslt = new ArbitraryLengthPathNode(left, right, leftVar, rightVar, PathMod.ONE_OR_MORE); + card.apply(rslt); + rslt.setArg(0, joinGroupNode); + leftVar = null; + rightVar = null; + return rslt; + } + + + protected VarNode[] varNodes(String... names) { + VarNode rslt[] = new VarNode[names.length]; + for (int i = 0; i < names.length; i++) + rslt[i] = varNode(names[i]); + return rslt; + } + + protected VarNode varNode(String varName) { + return new VarNode(varName); + } + + protected TermNode constantNode(IV iv) { + return new ConstantNode(iv); + } + protected TermNode constantNode(String c) { + return new ConstantNode(iv(c)); + } + + protected PropertyPathNode propertyPathNode(final TermNode s, String pattern, final TermNode o) { + return new PropertyPathNode(s, pathNode(pattern), o); + } + + /** + * This method is only implemented in part. The issue is what patterns are supported. + * The desired support as of the intended contract of this method is that pattern + * can be any sparql property path expression without any whitespace, and where + * every property in the path is reduced to a single letter a-z. + * e.g. "(a/b?/c)+|a|c*" + * + * The current support is that parenthesis, alternatives and negated properties are not supported. + * + * @param pattern + * @return + */ + protected PathNode pathNode(String pattern) { + final String seq[] = pattern.split("/"); + final PathElt elements[] = new PathElt[seq.length]; + PathMod mod = null; + for (int i =0;i<seq.length;i++) { + final String s = seq[i]; + boolean inverse = s.charAt(0)=='^'; + switch (s.charAt(s.length()-1)) { + case '*': + mod = PathMod.ZERO_OR_MORE; + break; + case '+': + mod = PathMod.ONE_OR_MORE; + break; + case '?': + mod = PathMod.ZERO_OR_ONE; + break; + } + String c = s.substring(inverse?1:0,s.length() - (mod!=null?1:0)); + elements[i] = new PathElt(constantNode(c),inverse,mod); + + } + return new PathNode(new PathAlternative(new PathSequence(elements))); + } + + /** + * Create a statement pattern node. The additional arguments after the s, p, o, + * are: + * <ol> + * <li>A context node: must be first (i.e. fourth)</li> + * <li>integer cardinality</li> + * <li>HelperFlag's</li> + * </ol> + * @param s + * @param p + * @param o + * @param more + * @return + */ + protected StatementPatternNode statementPatternNode(TermNode s, + TermNode p, TermNode o, Object ... more) { + StatementPatternNode rslt = newStatementPatternNode(s, p, o); + if (more.length>0) { + int i = 0; + if (more[0] instanceof TermNode) { + rslt.setC((TermNode)more[0]); + i = 1; + } + for (;i<more.length;i++) { + if (more[i] instanceof Integer) { + rslt.setProperty(Annotations.ESTIMATED_CARDINALITY, Long.valueOf((int)more[i])); + } else { + HelperFlag flag = (HelperFlag)more[i]; + flag.apply(rslt); + } + } + } + return rslt; + } + + @SuppressWarnings("unchecked") + private <E extends IGroupMemberNode, T extends GraphPatternGroup<E>> T initGraphPatternGroup( + T rslt, Object... statements) { + for (Object mem : statements) { + if (mem instanceof IGroupMemberNode) { + rslt.addChild((E) mem); + } else { + ((HelperFlag) mem).apply(rslt); + } + } + return rslt; + } + + protected JoinGroupNode joinGroupNode(TermNode context,Object... statements) { + JoinGroupNode rslt = joinGroupNode(statements); + rslt.setContext(context); + return rslt; + } + protected JoinGroupNode joinGroupNode(Object... statements) { + return initGraphPatternGroup(new JoinGroupNode(), statements); + } + + protected PropertyPathUnionNode propertyPathUnionNode(Object... statements) { + return initGraphPatternGroup(new PropertyPathUnionNode(), + statements); + } + + protected UnionNode unionNode(Object... statements) { + return initGraphPatternGroup(new UnionNode(), statements); + + } + + protected JoinGroupNode where(GroupMemberNodeBase... statements) { + return joinGroupNode((Object[]) statements); + } + + public void test() { + final IASTOptimizer rewriter = newOptimizer(); + + final AST2BOpContext context = new AST2BOpContext(new ASTContainer( + given), store); + + final IQueryNode actual = rewriter.optimize(context, given, + new IBindingSet[] {}); + + assertSameAST(expected, actual); + } + } + + public AbstractOptimizerTestCase(String name) { + super(name); + } + + public AbstractOptimizerTestCase() { + super(); + } + + protected StatementPatternNode newStatementPatternNode(final TermNode s, final TermNode p, final TermNode o) { + return newStatementPatternNode(s, p, o, -1, false); + } + protected StatementPatternNode newStatementPatternNode( + final TermNode s, final TermNode p, final TermNode o, + final long cardinality) { + return newStatementPatternNode(s, p, o, cardinality, false); + } + + protected StatementPatternNode newStatementPatternNode( + final TermNode s, final TermNode p, final TermNode o, + final long cardinality, final boolean optional) { + + final StatementPatternNode sp = new StatementPatternNode(s, p, o); + + if (cardinality != -1) { + sp.setProperty(Annotations.ESTIMATED_CARDINALITY, cardinality); + } + + if (optional) { + + sp.setOptional(true); + + } + + return sp; + + } + + abstract IASTOptimizer newOptimizer() ; +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestALPPinTrac773.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -0,0 +1,350 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.sparql.ast.optimizers; + +import static com.bigdata.rdf.sparql.ast.optimizers.AbstractOptimizerTestCase.HelperFlag.*; + +import com.bigdata.rdf.internal.IV; +import com.bigdata.rdf.sparql.ast.ArbitraryLengthPathNode; +import com.bigdata.rdf.sparql.ast.StatementPatternNode; +import com.bigdata.rdf.sparql.ast.VarNode; +import com.bigdata.rdf.store.AbstractTripleStore; + +/** + * Trac733 shows some strange behavior, this test case is intended + * to explore that. + * + * The basic issues concerns the order of execution of arbitrary length propery + * paths and other bits of the query. The observed behavior was that + * adding additional braces, changing the grouping had surprising effects, + * and that the default choice of order was often poor. + * + * With this first commit we capture the incorrect behavior. + * + */ +public class TestALPPinTrac773 extends AbstractOptimizerTestCase { + + private class NotNestedHelper extends Helper { + public NotNestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym) { + String pattern = "c" + sym; + + given = select( varNode(z), + where ( + + joinGroupNode(propertyPathNode(varNode(x),pattern, constantNode(b))), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + propertyPathNode(varNode(x),pattern, varNode(z)), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + varCount = 0; + // we have to evaluate this one earlier in order to get the anonymous variable numbering + // lined up. Really we should compare the result with expected wise to + // the unimportance of the name of anonymous variables. + ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ); + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + alpp, + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431) + ) ); + varCount = 0; + + } + } + private class NestedHelper extends Helper { + + public NestedHelper(HelperFlag zero_or_one_to_one_or_more, String sym) { + String pattern = "c" + sym; + + given = select( varNode(z), + where ( + joinGroupNode(propertyPathNode(varNode(x),pattern, constantNode(b))), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + joinGroupNode(propertyPathNode(varNode(x),pattern, varNode(z))), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + varCount = 0; + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + arbitartyLengthPropertyPath(varNode(x), varNode(z), zero_or_one_to_one_or_more, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431) + ) ); + varCount = 0; + + } + } + public TestALPPinTrac773() { + } + + public TestALPPinTrac773(String name) { + super(name); + } + @Override + IASTOptimizer newOptimizer() { + return new ASTOptimizerList( + new ASTPropertyPathOptimizer() { + private int counter = 0; + + @Override + protected VarNode anonVar(final String anon) { + VarNode v = new VarNode(anon+counter++); + v.setAnonymous(true); + return v; + } + }, + new ASTRangeCountOptimizer(){ + @Override + + + protected void estimateCardinalities(StatementPatternNode sp, final IV<?, ?> s, final IV<?, ?> p, + final IV<?, ?> o, final IV<?, ?> c, final AbstractTripleStore db) { + if (o != null) + sp.setProperty(Annotations.ESTIMATED_CARDINALITY, 26l); + else + sp.setProperty(Annotations.ESTIMATED_CARDINALITY, 3135l); + } + + }, + new ASTFlattenJoinGroupsOptimizer(), + new ASTStaticJoinOptimizer()); + } + + + public void testSimpleALPP() { + + new Helper(){{ + + given = select( varNode(x), + where ( + joinGroupNode( + propertyPathNode(varNode(x),"c*", constantNode(b)) + ) + ) ); + + + expected = select( varNode(x), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ) + ) ); + + }}.test(); + } + public void testNestedPartway() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ) + + ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + joinGroupNode( + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ) + + ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + varCount = 0; + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + }}.test(); + } + public void testNotNestedPartway() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ) + + ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + varCount = 0; + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + }}.test(); + } + public void testNestedStar() { + + new NestedHelper(ZERO_OR_MORE,"*"){{ + // currently not correctly optimized. + // TODO: this expected result is incorrect. + + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + }}.test(); + } + public void testNotNestedStar() { + new NotNestedHelper(ZERO_OR_MORE,"*"){{ + // currently not correctly optimized. + // TODO: this expected result is incorrect. + + ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ); + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_MORE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), + alpp + ) ); + + }}.test(); + } + public void testNestedPlus() { + + new NestedHelper(ONE_OR_MORE,"+").test(); + } + public void testNotNestedPlus() { + + new NotNestedHelper(ONE_OR_MORE,"+").test(); + } + public void testNestedQuestionMark() { + + new NestedHelper(ZERO_OR_ONE,"?"){{ + // currently not correctly optimized. + // TODO: this expected result is incorrect. + + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054) + ) ); + + }}.test(); + } + public void testNotNestedQuestionMark() { + + new NotNestedHelper(ZERO_OR_ONE,"?"){{ + // currently not correctly optimized. + // TODO: this expected result is incorrect. + + ArbitraryLengthPathNode alpp = arbitartyLengthPropertyPath(varNode(x), varNode(z), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 3135) + ) ); + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), constantNode(b), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar(), 26) + ) ), + statementPatternNode(varNode(y), constantNode(c), varNode(x), 15431), + statementPatternNode(varNode(z), constantNode(a), varNode(w), 2054), + alpp + ) ); + + }}.test(); + } +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTFlattenJoinGroupsOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTFlattenJoinGroupsOptimizer.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTFlattenJoinGroupsOptimizer.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -0,0 +1,123 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.rdf.sparql.ast.optimizers; + +import static com.bigdata.rdf.sparql.ast.optimizers.AbstractOptimizerTestCase.HelperFlag.*; + + +public class TestASTFlattenJoinGroupsOptimizer extends AbstractOptimizerTestCase { + + public TestASTFlattenJoinGroupsOptimizer(String name) { + super(name); + } + + public TestASTFlattenJoinGroupsOptimizer() { + } + @Override + IASTOptimizer newOptimizer() { + return new ASTFlattenJoinGroupsOptimizer(); + } + + public void testBasicFlattening() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + joinGroupNode( statementPatternNode(varNode(x), constantNode(e), varNode(z)) ), + joinGroupNode( statementPatternNode(varNode(x), constantNode(f), varNode(z)) ) + ) + ), + DISTINCT ); + + + expected = select( varNode(z), + where ( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + statementPatternNode(varNode(x), constantNode(e), varNode(z)), + statementPatternNode(varNode(x), constantNode(f), varNode(z)) + ), + DISTINCT ); + + }}.test(); + } + public void testContextChange() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + joinGroupNode( varNode(w), + statementPatternNode(varNode(x), constantNode(e), varNode(z), varNode(w), NAMED_CONTEXTS) ), + joinGroupNode( statementPatternNode(varNode(x), constantNode(f), varNode(z), DEFAULT_CONTEXTS) ) + ) + ), + DISTINCT ); + + + expected = select( varNode(z), + where ( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + statementPatternNode(varNode(x), constantNode(e), varNode(z), varNode(w), NAMED_CONTEXTS), + statementPatternNode(varNode(x), constantNode(f), varNode(z), DEFAULT_CONTEXTS) + ), + DISTINCT ); + + }}.test(); + } + + public void testSingleALPP() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + arbitartyLengthPropertyPath(varNode(x), varNode(y), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar()) + ) ) + + ) + ) ); + + varCount = 0; + + expected = select( varNode(z), + where ( + arbitartyLengthPropertyPath(varNode(x), varNode(y), ZERO_OR_ONE, + joinGroupNode( + statementPatternNode(leftVar(), constantNode(c), rightVar()) + ) ) + ) ); + + }}.test(); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -32,225 +32,36 @@ import org.openrdf.model.vocabulary.RDF; import com.bigdata.bop.IBindingSet; -import com.bigdata.bop.ModifiableBOpBase; import com.bigdata.rdf.internal.IV; -import com.bigdata.rdf.sparql.ast.ASTBase; import com.bigdata.rdf.sparql.ast.ASTContainer; -import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; import com.bigdata.rdf.sparql.ast.AssignmentNode; import com.bigdata.rdf.sparql.ast.ConstantNode; -import com.bigdata.rdf.sparql.ast.GraphPatternGroup; -import com.bigdata.rdf.sparql.ast.GroupMemberNodeBase; -import com.bigdata.rdf.sparql.ast.IGroupMemberNode; import com.bigdata.rdf.sparql.ast.IQueryNode; import com.bigdata.rdf.sparql.ast.JoinGroupNode; import com.bigdata.rdf.sparql.ast.NamedSubqueryInclude; import com.bigdata.rdf.sparql.ast.NamedSubqueryRoot; import com.bigdata.rdf.sparql.ast.ProjectionNode; -import com.bigdata.rdf.sparql.ast.QueryBase; import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.sparql.ast.QueryType; import com.bigdata.rdf.sparql.ast.StatementPatternNode; -import com.bigdata.rdf.sparql.ast.TermNode; -import com.bigdata.rdf.sparql.ast.UnionNode; import com.bigdata.rdf.sparql.ast.VarNode; import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; import com.bigdata.rdf.sparql.ast.service.ServiceNode; import com.bigdata.rdf.store.BDS; -import com.bigdata.rdf.sparql.ast.PropertyPathUnionNode; +import static com.bigdata.rdf.sparql.ast.optimizers.AbstractOptimizerTestCase.HelperFlag.*; /** * Test suite for {@link ASTStaticJoinOptimizer}. */ -public class TestASTStaticJoinOptimizer extends AbstractASTEvaluationTestCase { +public class TestASTStaticJoinOptimizer extends AbstractOptimizerTestCase +{ - public interface Annotations extends - com.bigdata.rdf.sparql.ast.GraphPatternGroup.Annotations, - com.bigdata.rdf.sparql.ast.eval.AST2BOpBase.Annotations { - } - enum HelperFlag { - OPTIONAL { - @Override - public void apply(ASTBase sp) { - ((ModifiableBOpBase) sp) - .setProperty(Annotations.OPTIONAL, true); - } - }, - DISTINCT { - @Override - public void apply(ASTBase rslt) { - ((QueryBase) rslt).getProjection().setDistinct(true); - } - }; - - /** - * - * @param target - * @throws ClassCastException - * If there is a mismatch between the flag and its usage. - */ - abstract public void apply(ASTBase target); - }; - - /** - * The purpose of this class is to make the tests look like the old - * comments. The first example - * {@link TestASTStaticJoinOptimizer#test_simpleOptional01A()} is based on - * the comments of - * {@link TestASTStaticJoinOptimizer#test_simpleOptional01()} and - * demonstrates that the comment is out of date. - * - * NB: Given this goal, several Java naming conventions are ignored. e.g. - * methods whose names are ALLCAPS or the same as ClassNames - * - * Also, note that the intent is that this class be used in - * anonymous subclasses with a single invocation of the {@link #test()} method, - * and the two fields {@link #given} and {@link #expected} initialized - * in the subclasses constructor (i.e. inside a second pair of braces). - * - * All of the protected members are wrappers around constructors, - * to allow the initialization of these two fields, to have a style - * much more like Prolog than Java. - * - * @author jeremycarroll - * - */ - @SuppressWarnings("rawtypes") - public abstract class Helper { - protected QueryRoot given, expected; - protected final String w = "w", x = "x", y = "y", z = "z"; - protected final IV a = iv("a"), b = iv("b"), c = iv("c"), d = iv("d"), - e = iv("e"), f = iv("f"), g = iv("g"), h = iv("h"); - - protected final HelperFlag OPTIONAL = HelperFlag.OPTIONAL; - protected final HelperFlag DISTINCT = HelperFlag.DISTINCT; - - private IV iv(String id) { - return makeIV(new URIImpl("http://example/" + id)); - } - - protected QueryRoot select(VarNode[] varNodes, - NamedSubqueryRoot namedSubQuery, JoinGroupNode where, - HelperFlag... flags) { - QueryRoot rslt = select(varNodes, where, flags); - rslt.getNamedSubqueriesNotNull().add(namedSubQuery); - return rslt; - } - - protected QueryRoot select(VarNode[] varNodes, JoinGroupNode where, - HelperFlag... flags) { - - QueryRoot select = new QueryRoot(QueryType.SELECT); - final ProjectionNode projection = new ProjectionNode(); - for (VarNode varNode : varNodes) - projection.addProjectionVar(varNode); - - select.setProjection(projection); - select.setWhereClause(where); - for (HelperFlag flag : flags) - flag.apply(select); - return select; - } - - protected QueryRoot select(VarNode varNode, - NamedSubqueryRoot namedSubQuery, JoinGroupNode where, - HelperFlag... flags) { - return select(new VarNode[] { varNode }, namedSubQuery, where, - flags); - } - - protected QueryRoot select(VarNode varNode, JoinGroupNode where, - HelperFlag... flags) { - return select(new VarNode[] { varNode }, where, flags); - } - - protected NamedSubqueryRoot namedSubQuery(String name, VarNode varNode, - JoinGroupNode where) { - final NamedSubqueryRoot namedSubquery = new NamedSubqueryRoot( - QueryType.SELECT, name); - final ProjectionNode projection = new ProjectionNode(); - namedSubquery.setProjection(projection); - projection.addProjectionExpression(new AssignmentNode(varNode, - new VarNode(varNode))); - - namedSubquery.setWhereClause(where); - return namedSubquery; - } - - protected GroupMemberNodeBase namedSubQueryInclude(String name) { - return new NamedSubqueryInclude(name); - } - - protected VarNode[] varNodes(String... names) { - VarNode rslt[] = new VarNode[names.length]; - for (int i = 0; i < names.length; i++) - rslt[i] = varNode(names[i]); - return rslt; - } - - protected VarNode varNode(String varName) { - return new VarNode(varName); - } - - protected TermNode constantNode(IV iv) { - return new ConstantNode(iv); - } - - protected StatementPatternNode statementPatternNode(TermNode s, - TermNode p, TermNode o, long cardinality, HelperFlag... flags) { - StatementPatternNode rslt = newStatementPatternNode(s, p, o, - cardinality); - for (HelperFlag flag : flags) { - flag.apply(rslt); - } - return rslt; - } - - @SuppressWarnings("unchecked") - private <E extends IGroupMemberNode, T extends GraphPatternGroup<E>> T initGraphPatternGroup( - T rslt, Object... statements) { - for (Object mem : statements) { - if (mem instanceof IGroupMemberNode) { - rslt.addChild((E) mem); - } else { - ((HelperFlag) mem).apply(rslt); - } - } - return rslt; - } - - protected JoinGroupNode joinGroupNode(Object... statements) { - return initGraphPatternGroup(new JoinGroupNode(), statements); - } - - protected PropertyPathUnionNode propertyPathUnionNode( - Object... statements) { - return initGraphPatternGroup(new PropertyPathUnionNode(), - statements); - } - - protected UnionNode unionNode(Object... statements) { - return initGraphPatternGroup(new UnionNode(), statements); - - } - - protected JoinGroupNode where(GroupMemberNodeBase... statements) { - return joinGroupNode((Object[]) statements); - } - - public void test() { - final IASTOptimizer rewriter = new ASTStaticJoinOptimizer(); - - final AST2BOpContext context = new AST2BOpContext(new ASTContainer( - given), store); - - final IQueryNode actual = rewriter.optimize(context, given, - new IBindingSet[] {}); - - assertSameAST(expected, actual); - } + @Override + protected ASTStaticJoinOptimizer newOptimizer() { + return new ASTStaticJoinOptimizer(); } + public void test_simpleOptional01A() { new Helper() {{ given = select( varNode(x), @@ -281,6 +92,7 @@ * */ public TestASTStaticJoinOptimizer() { + super(); } /** @@ -1911,14 +1723,7 @@ } - private StatementPatternNode newStatementPatternNode( - final TermNode s, final TermNode p, final TermNode o, - final long cardinality) { - return newStatementPatternNode(s, p, o, cardinality, false); - - } - /* * * prefix skos: <http://www.w3.org/2004/02/skos/core#> @@ -2225,23 +2030,6 @@ } - private StatementPatternNode newStatementPatternNode( - final TermNode s, final TermNode p, final TermNode o, - final long cardinality, final boolean optional) { - - final StatementPatternNode sp = new StatementPatternNode(s, p, o); - - sp.setProperty(Annotations.ESTIMATED_CARDINALITY, cardinality); - - if (optional) { - - sp.setOptional(true); - - } - - return sp; - - } private StatementPatternNode runFirst(final StatementPatternNode sp) { sp.setProperty(QueryHints.RUN_FIRST, true); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java 2013-11-15 18:31:16 UTC (rev 7557) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestAll.java 2013-11-15 20:19:45 UTC (rev 7558) @@ -138,6 +138,10 @@ // Test suite for resolving mock IVs. suite.addTestSuite(TestASTBatchResolveTermsOptimizer.class); + + suite.addTestSuite(TestASTFlattenJoinGroupsOptimizer.class); + + suite.addTestSuite(TestALPPinTrac773.class); return suite; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-15 18:31:26
|
Revision: 7557 http://bigdata.svn.sourceforge.net/bigdata/?rev=7557&view=rev Author: thompsonbry Date: 2013-11-15 18:31:16 +0000 (Fri, 15 Nov 2013) Log Message: ----------- Committing fix for #722. Queries are now cancelled by the deadline. Cancellation will occur within a fixed granularity of 100ms as specified by QueryEngine.DEADLINE_CHECK_MILLIS. This is checked by QueryEngineTask.run(). That method now checks the deadlineQueue at the same time that it is checking the priorityQueue. The deadlineQueue is a priority queue in order of increasing deadline time. Only the head of the queue is checked, and then only those queries at the head of the queue whose deadline is LT the current system time. Periodically the head of the deadlineQueue is drained if it gets above a set size (200 as configured). This allows us to find and remove entries corresponding to done queries in a timely fashion. A new counter has been added to the queue engine to report the current size of the deadlineQueue. This queue will remain empty unless queries are submitted with a set deadline. A stress test for this feature was developed based on testOrderByQueriesAreInterruptable() in BigdataConnectionTest. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngineCounters.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/BindingSetComparator.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/IHaltable.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryDeadline.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryDeadlineOrder.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -328,6 +328,8 @@ runState.setDeadline(deadline); + queryEngine.addQueryToDeadlineQueue(this); + } catch (QueryTimeoutException e) { /* @@ -349,10 +351,20 @@ * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> * Query timeout only checked at operator start/stop. </a> */ - final public void checkDeadline() { + final protected void checkDeadline() { + if (isDone()) { + + // already terminated. + return; + + } + try { +// if (log.isTraceEnabled()) +// log.trace("Checking " + deadline); + runState.checkDeadline(); } catch (QueryTimeoutException ex) { @@ -367,7 +379,7 @@ } } - + @Override final public long getDeadline() { @@ -1472,6 +1484,13 @@ } @Override + final public Throwable getAsThrownCause() { + + return future.getAsThrownCause(); + + } + + @Override public IBigdataFederation<?> getFederation() { return queryEngine.getFederation(); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryDeadline.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryDeadline.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryDeadline.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -0,0 +1,113 @@ +package com.bigdata.bop.engine; + +import java.lang.ref.WeakReference; + +/** + * Class pairs together the immutable deadline associated with a query and the + * {@link AbstractRunningQuery}. The natural ordering places instances of this + * class into ascending deadline order. The deadline is simply the timestamp at + * which the query deadline is expired. Therefore, the instances are simply + * ordered by the time when their deadline will expire. The queries that will + * expire soonest are first, those that can run longer come later. This ordering + * is used for a priority queue. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> Query + * timeout only checked at operator start/stop. </a> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +class QueryDeadline implements Comparable<QueryDeadline> { + + /** + * The deadline for this query. + */ + final long deadline; + + /** + * A reference to the query. + * <p> + * Note: A {@link WeakReference} is used to avoid having the deadline queue + * pin the {@link AbstractRunningQuery} objects. + */ + private final WeakReference<AbstractRunningQuery> queryRef; + + /** + * + * @param deadline + * The deadline. + * @param query + * The query. + */ + public QueryDeadline(final long deadline, final AbstractRunningQuery query) { + + this.deadline = deadline; + + this.queryRef = new WeakReference<AbstractRunningQuery>(query); + + } + + /** + * Comparator orders the queries based on increasing deadline. The query + * with the soonest deadline will be ordered first. The query with the + * greatest deadline will be ordered last. Queries that do not have an + * explicit deadline are assigned a deadline of {@link Long#MAX_VALUE} and + * will be ordered last. + * <p> + * Note: A natural order based on deadline was added to support timely + * termination of compute bound queries that exceed their deadline. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + @Override + public int compareTo(final QueryDeadline o) { + final long d0 = this.deadline; + final long d1 = o.deadline; + if (d0 < d1) + return -1; + if (d0 > d1) + return 1; + return 0; + } + + /** + * Check the deadline on the query. If the query is not terminated and the + * deadline has expired, then the query is terminated as a side-effect. + * + * @param now + * A current timestamp. + * + * @return <code>null</code> if the query is terminated and + * <code>this</code> if the query is not terminated. + */ + QueryDeadline checkDeadline(final long now) { + + final AbstractRunningQuery q = queryRef.get(); + + if (q == null) { + + /* + * The weak reference to the query has been cleared. This query is + * known to be terminated. + */ + + return null; + + } + + // Check the deadline. + q.checkDeadline(); + + if (q.isDone()) { + + // Query is terminated. + return null; + + } + + // Query is running and deadline is not expired. + return this; + + } + +} \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -27,10 +27,13 @@ package com.bigdata.bop.engine; +import java.lang.ref.WeakReference; import java.lang.reflect.Constructor; import java.rmi.RemoteException; +import java.util.ArrayList; import java.util.Comparator; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.BlockingQueue; @@ -42,6 +45,7 @@ import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; @@ -255,10 +259,14 @@ * Return a {@link CounterSet} which reports various statistics for the * {@link QueryEngine}. */ + @Override public CounterSet getCounters() { final CounterSet root = new CounterSet(); + // Note: This counter is not otherwise tracked. + counters.deadlineQueueSize.set(deadlineQueue.size()); + // global counters. root.attach(counters.getCounters()); @@ -402,15 +410,7 @@ // */ // private final ForkJoinPool fjpool; - /** - * The {@link UUID} of the service in which this {@link QueryEngine} is - * running. - * - * @return The {@link UUID} of the service in which this {@link QueryEngine} - * is running -or- a unique and distinct UUID if the - * {@link QueryEngine} is not running against an - * {@link IBigdataFederation}. - */ + @Override public UUID getServiceUUID() { return ((IRawStore) localIndexManager).getUUID(); @@ -589,7 +589,209 @@ // ); /** + * A queue arranged in order of increasing deadline times. Only queries with + * an explicit deadline are added to this priority queue. The head of the + * queue contains the query whose deadline will expire soonest. A thread can + * thus poll the head of the queue to determine whether the deadline would + * have passed. Such queries can be removed from the queue and their + * {@link AbstractRunningQuery#checkDeadline()} method invoked to force + * their timely termination. + * <p> + * {@link AbstractRunningQuery#startOp(IStartOpMessage)} and + * {@link AbstractRunningQuery#haltOp(IHaltOpMessage)} check to see if the + * deadline for a query has expired. However, those methods are only invoked + * when a query plan operator starts and halts. In cases where the query is + * compute bound within a single operator (e.g., ORDER BY or an unconstraint + * cross-product JOIN), the query will not be checked for termination. This + * priority queue is used to ensure that the query deadline is tested even + * though it may be in a compute bound operator. + * <p> + * If the deadline has expired, {@link IRunningQuery#cancel(boolean)} will + * be invoked. In order for a compute bound operator to terminate in a + * timely fashion, it MUST periodically test {@link Thread#isInterrupted()}. + * <p> + * Note: The deadline of a query may be set at most once. Thus, a query + * which is entered into the {@link #deadlineQueue} may not have its + * deadline modified. This means that we do not have to search the priority + * queue for an existing reference to the query. It also means that we are + * able to store an object that wraps the query with a {@link WeakReference} + * and thus can avoid pinning the query on the heap until its deadline + * expires. That means that we do not need to remove an entries from the + * deadline queue each time a query terminates, but we do need to + * periodically trim the queue to ensure that queries with distant deadlines + * do not hang around in the queue for long periods of time after their + * deadline has expired. This can be done by scanning the queue and removing + * all entries whose {@link WeakReference} has been cleared. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + final private PriorityBlockingQueue<QueryDeadline> deadlineQueue = new PriorityBlockingQueue<QueryDeadline>(); + + /** + * Queries with a deadline that lies significantly in the future can lie + * around in the priority queue until that deadline is reached if there are + * other queries in front of them that are not terminated and whose deadline + * has not be reached. Therefore, periodically, we need to scan the queue + * and clear out entries for terminated queries. This is done any time the + * size of the queue is at least this many elements when we examine the + * queue in {@link #checkDeadlines()}. + */ + final static private int DEADLINE_QUEUE_SCAN_SIZE = 200; + + /** + * The maximum granularity before we will check the deadline priority queue + * for queries that need to be terminated because their deadline has + * expired. + */ + final static private long DEADLINE_CHECK_MILLIS = 100; + + /** + * Add the query to the deadline priority queue + * + * @exception IllegalArgumentException + * if the query deadline has not been set. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + void addQueryToDeadlineQueue(final AbstractRunningQuery query) { + + final long deadline = query.getDeadline(); + + if (deadline == Long.MAX_VALUE) { + /* + * Do not allow queries with an unbounded deadline into the priority + * queue. + */ + throw new IllegalArgumentException(); + } + + deadlineQueue.add(new QueryDeadline(deadline, query)); + + } + + /** + * Scan the priority queue of queries with a specified deadline, halting any + * queries whose deadline has expired. + */ + static private void checkDeadlines(final long now, + final PriorityBlockingQueue<QueryDeadline> deadlineQueue) { + + /* + * While the queue is thread safe, we want at most one thread at a time + * to be inspecting the queue for queries whose deadlines have expired. + */ + synchronized (deadlineQueue) { + + /* + * Check the head of the deadline queue for any queries whose + * deadline has expired. + */ + checkHeadOfDeadlineQueue(now, deadlineQueue); + + if (deadlineQueue.size() > DEADLINE_QUEUE_SCAN_SIZE) { + + /* + * Scan the deadline queue, removing entries for expired + * queries. + */ + scanDeadlineQueue(now, deadlineQueue); + + } + + } + + } + + /** + * Check the head of the deadline queue for any queries whose deadline has + * expired. + */ + static private void checkHeadOfDeadlineQueue(final long now, + final PriorityBlockingQueue<QueryDeadline> deadlineQueue) { + + QueryDeadline x; + + // remove the element at the head of the queue. + while ((x = deadlineQueue.poll()) != null) { + + // test for query done or deadline expired. + if (x.checkDeadline(now) == null) { + + /* + * This query is known to be done. It was removed from the + * priority queue above. We need to check the next element in + * the priority order to see whether it is also done. + */ + + continue; + + } + + if (x.deadline > now) { + + /* + * This query has not yet reached its deadline. That means that + * no other query in the deadline queue has reached its + * deadline. Therefore we are done for now. + */ + + // Put the query back on the deadline queue. + deadlineQueue.add(x); + + break; + + } + + } + + } + + /** + * Queries with a deadline that lies significantly in the future can lie + * around in the priority queue until that deadline is reached if there are + * other queries in front of them that are not terminated and whose deadline + * has not be reached. Therefore, periodically, we need to scan the queue + * and clear out entries for terminated queries. + */ + static private void scanDeadlineQueue(final long now, + final PriorityBlockingQueue<QueryDeadline> deadlineQueue) { + + final List<QueryDeadline> c = new ArrayList<QueryDeadline>( + DEADLINE_QUEUE_SCAN_SIZE); + + // drain up to that many elements. + deadlineQueue.drainTo(c, DEADLINE_QUEUE_SCAN_SIZE); + + int ndropped = 0, nrunning = 0; + + for (QueryDeadline x : c) { + + if (x.checkDeadline(now) != null) { + + // return this query to the deadline queue. + deadlineQueue.add(x); + + nrunning++; + + } else { + + ndropped++; + + } + + } + + if (log.isInfoEnabled()) + log.info("Scan: threadhold=" + DEADLINE_QUEUE_SCAN_SIZE + + ", ndropped=" + ndropped + ", nrunning=" + nrunning + + ", deadlineQueueSize=" + deadlineQueue.size()); + + } + + /** + * * @param localIndexManager * The <em>local</em> index manager. */ @@ -616,7 +818,7 @@ public void init() { final FutureTask<Void> ft = new FutureTaskMon<Void>(new QueryEngineTask( - priorityQueue), (Void) null); + priorityQueue, deadlineQueue), (Void) null); if (engineFuture.compareAndSet(null/* expect */, ft)) { @@ -711,15 +913,23 @@ */ static private class QueryEngineTask implements Runnable { - final private BlockingQueue<AbstractRunningQuery> queue; + final private BlockingQueue<AbstractRunningQuery> priorityQueue; + final private PriorityBlockingQueue<QueryDeadline> deadlineQueue; - public QueryEngineTask(final BlockingQueue<AbstractRunningQuery> queue) { + public QueryEngineTask( + final BlockingQueue<AbstractRunningQuery> priorityQueue, + final PriorityBlockingQueue<QueryDeadline> deadlineQueue) { - if (queue == null) + if (priorityQueue == null) throw new IllegalArgumentException(); + + if (deadlineQueue == null) + throw new IllegalArgumentException(); + + this.priorityQueue = priorityQueue; - this.queue = queue; - + this.deadlineQueue = deadlineQueue; + } @Override @@ -727,10 +937,30 @@ if(log.isInfoEnabled()) log.info("Running: " + this); try { + long mark = System.currentTimeMillis(); + long remaining = DEADLINE_CHECK_MILLIS; while (true) { try { - final AbstractRunningQuery q = queue.take(); - if (!q.isDone()) + final AbstractRunningQuery q = priorityQueue.poll( + remaining, TimeUnit.MILLISECONDS); + final long now = System.currentTimeMillis(); + if ((remaining = now - mark) < 0) { + /* + * Check for queries whose deadline is expired. + * + * Note: We only do this every DEADLINE_CHECK_MILLIS + * and then reset [mark] and [remaining]. + * + * Note: In queue.pool(), we only wait only up to + * the [remaining] time before the next check in + * queue.poll(). + */ + checkDeadlines(now, deadlineQueue); + mark = now; + remaining = DEADLINE_CHECK_MILLIS; + } + // Consume chunk already on queue for this query. + if (q != null && !q.isDone()) q.consumeChunk(); } catch (InterruptedException e) { /* @@ -865,6 +1095,10 @@ cm.shutdown(); } + // clear the queues + priorityQueue.clear(); + deadlineQueue.clear(); + // clear references. engineFuture.set(null); engineService.set(null); @@ -920,6 +1154,10 @@ q.cancel(true/*mayInterruptIfRunning*/); } + + // clear the queues + priorityQueue.clear(); + deadlineQueue.clear(); // clear references. engineFuture.set(null); @@ -932,6 +1170,7 @@ * IQueryPeer */ + @Override @Deprecated // see IQueryClient public void declareQuery(final IQueryDecl queryDecl) throws RemoteException { @@ -939,6 +1178,7 @@ } + @Override public void bufferReady(final IChunkMessage<IBindingSet> msg) { throw new UnsupportedOperationException(); @@ -950,6 +1190,7 @@ * <p> * The default implementation is a NOP. */ + @Override public void cancelQuery(final UUID queryId, final Throwable cause) { // NOP } @@ -957,7 +1198,7 @@ /* * IQueryClient */ - + @Override public PipelineOp getQuery(final UUID queryId) { final AbstractRunningQuery q = getRunningQuery(queryId); @@ -969,6 +1210,7 @@ } + @Override public void startOp(final IStartOpMessage msg) throws RemoteException { final AbstractRunningQuery q = getRunningQuery(msg.getQueryId()); @@ -981,6 +1223,7 @@ } + @Override public void haltOp(final IHaltOpMessage msg) throws RemoteException { final AbstractRunningQuery q = getRunningQuery(msg.getQueryId()); @@ -1827,9 +2070,9 @@ // // } // -// // TODO Must deliver events in another thread! -// // TODO Must drop and drop any errors. -// // TODO Optimize with CopyOnWriteArray +// // Must deliver events in another thread! +// // Must drop and drop any errors. +// // Optimize with CopyOnWriteArray // // Note: Security hole if we allow notification for queries w/o queryId. // protected void fireQueryEndedEvent(final IRunningQuery query) { // Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngineCounters.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngineCounters.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngineCounters.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -99,12 +99,22 @@ */ protected final CAT operatorHaltCount = new CAT(); + /** + * The size of the deadline queue. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + protected final CAT deadlineQueueSize = new CAT(); + + @Override public CounterSet getCounters() { final CounterSet root = new CounterSet(); // #of queries started on this server. root.addCounter("queryStartCount", new Instrument<Long>() { + @Override public void sample() { setValue(queryStartCount.get()); } @@ -112,6 +122,7 @@ // #of queries retired on this server. root.addCounter("queryDoneCount", new Instrument<Long>() { + @Override public void sample() { setValue(queryDoneCount.get()); } @@ -119,6 +130,7 @@ // #of queries with abnormal termination on this server. root.addCounter("queryErrorCount", new Instrument<Long>() { + @Override public void sample() { setValue(queryErrorCount.get()); } @@ -126,6 +138,7 @@ // average #of operator tasks evaluated per query root.addCounter("operatorTasksPerQuery", new Instrument<Double>() { + @Override public void sample() { final long opCount = operatorHaltCount.get(); final long n = queryDoneCount.get(); @@ -136,6 +149,7 @@ // #of queries retired per second on this server. root.addCounter("queriesPerSecond", new Instrument<Double>() { + @Override public void sample() { final long ms = elapsedMillis.get(); final long n = queryDoneCount.get(); @@ -154,6 +168,7 @@ // #of blocked work queues. root.addCounter("blockedWorkQueueCount", new Instrument<Long>() { + @Override public void sample() { setValue(blockedWorkQueueCount.get()); } @@ -161,6 +176,7 @@ // #of times that a work queue has blocked. root.addCounter("blockedWorkQueueRunningTotal", new Instrument<Long>() { + @Override public void sample() { setValue(blockedWorkQueueRunningTotal.get()); } @@ -168,6 +184,7 @@ // #of concurrently executing operator tasks. root.addCounter("operatorActiveCount", new Instrument<Long>() { + @Override public void sample() { setValue(operatorActiveCount.get()); } @@ -175,6 +192,7 @@ // #of operator evaluation tasks which have started. root.addCounter("operatorStartCount", new Instrument<Long>() { + @Override public void sample() { setValue(operatorStartCount.get()); } @@ -182,11 +200,20 @@ // #of operator evaluation tasks which have ended. root.addCounter("operatorHaltCount", new Instrument<Long>() { + @Override public void sample() { setValue(operatorHaltCount.get()); } }); + // The size of the deadlineQueue. + root.addCounter("deadlineQueueSize", new Instrument<Long>() { + @Override + public void sample() { + setValue(deadlineQueueSize.get()); + } + }); + return root; } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -55,6 +55,8 @@ import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.engine.AbstractRunningQuery; +import com.bigdata.bop.engine.QueryTimeoutException; import com.bigdata.btree.BytesUtil; import com.bigdata.btree.keys.IKeyBuilder; import com.bigdata.concurrent.FutureTaskMon; @@ -260,6 +262,7 @@ * @see Annotations#PREDICATE */ @SuppressWarnings("unchecked") + @Override public IPredicate<E> getPredicate() { return (IPredicate<E>) getRequiredProperty(Annotations.PREDICATE); @@ -316,6 +319,7 @@ } + @Override public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { return new FutureTaskMon<Void>(new JoinTask<E>(this, context)); @@ -530,6 +534,7 @@ } + @Override public String toString() { return getClass().getName() + "{ joinOp=" + joinOp + "}"; @@ -541,6 +546,7 @@ * * @return <code>null</code>. */ + @Override public Void call() throws Exception { // final long begin = System.currentTimeMillis(); @@ -728,6 +734,31 @@ } + /** + * {@inheritDoc} + * <p> + * Note: The {@link JoinTask} extends {@link Haltable}. We want to treat + * the {@link QueryTimeoutException} as a normal termination cause for a + * {@link JoinTask}. The {@link Haltable} for the + * {@link AbstractRunningQuery} will have its root cause set to the + * {@link QueryTimeoutException} and from there the exception will get + * eventually converted back into the appropriate openrdf exception. We + * do things differently here because the {@link JoinTask} termination + * is not the same as the {@link AbstractRunningQuery} termination. + * {@link JoinTask} is the only place right now where we extend haltable + * and the only place where we have to make this specific override. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + @Override + protected boolean isNormalTerminationCause(final Throwable cause) { + + return super.isNormalTerminationCause(cause) + || super.isDeadlineTerminationCause(cause); + + } + /** * Cancel sink {@link JoinTask}(s). */ @@ -892,6 +923,7 @@ * true for query on the lastJoin) and that * {@link IBlockingBuffer} has been closed. */ + @Override public Void call() throws Exception { try { @@ -928,8 +960,9 @@ halt(t); if (getCause() != null) { // abnormal termination. - log.error("Halting join (abnormal termination): t="+t+" : cause="+getCause()); - throw new RuntimeException("Halting join: " + t, t); +// log.error("Halting join (abnormal termination): t="+t+" : cause="+getCause()); +// throw new RuntimeException("Halting join: " + t, t); + throw new RuntimeException(t); } // normal termination - ignore exception. if (log.isDebugEnabled()) @@ -1392,6 +1425,7 @@ * * @return if the as bound predicate is equals(). */ + @Override public boolean equals(final Object o) { if (this == o) @@ -1458,6 +1492,7 @@ } + @Override public String toString() { return JoinTask.this.getClass().getSimpleName() + "{ joinOp=" @@ -1481,6 +1516,7 @@ * true for query on the lastJoin) and that * {@link IBlockingBuffer} has been closed. */ + @Override public Void call() throws Exception { halted(); @@ -1717,7 +1753,11 @@ } - bindex++; + if (bindex++ % 50 == 0) { + // Periodically check for an interrupt. + if (Thread.currentThread().isInterrupted()) + throw new InterruptedException(); + } } @@ -2029,6 +2069,7 @@ * * @return */ + @Override public int compareTo(final AccessPathTask o) { /* @@ -2140,6 +2181,7 @@ * true for query on the lastJoin) and that * {@link IBlockingBuffer} has been closed. */ + @Override public Void call() throws Exception { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/BindingSetComparator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/BindingSetComparator.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/BindingSetComparator.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -55,9 +55,10 @@ } + @Override public int compare(final IBindingSet bs1, final IBindingSet bs2) { - if ((n++ % 5000) == 1) { + if ((n++ % 1000) == 1) { /* * Check for interrupts, but not too often. */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -46,6 +46,7 @@ import cern.colt.Arrays; +import com.bigdata.bop.engine.QueryTimeoutException; import com.bigdata.rdf.store.BigdataSolutionResolverator; import com.bigdata.rdf.store.BigdataStatementIteratorImpl; import com.bigdata.relation.rule.IQueryOptions; @@ -1462,11 +1463,33 @@ } catch (ExecutionException e) { - if (InnerCause.isInnerCause(e, - ClosedByInterruptException.class)|| - InnerCause.isInnerCause(e, - InterruptedException.class)) { + if(InnerCause.isInnerCause(e, + QueryTimeoutException.class)) { + /** + * Closed by query deadline expiration. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator + * start/stop. </a> + */ + if (log.isInfoEnabled()) + log.info(e.getMessage()); + + // itr will not deliver any more elements. + _close(); + + // need to rethrow to convert to openrdf query interrupted exception. + throw new RuntimeException(e); + + } + + if (InnerCause.isInnerCause(e, + ClosedByInterruptException.class)|| + InnerCause.isInnerCause(e, + InterruptedException.class)) { + /* * Note: ClosedByInterruptException indicates that the * producer was interrupted. This occurs any time the Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/Haltable.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -44,6 +44,7 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; +import com.bigdata.bop.engine.QueryTimeoutException; import com.bigdata.relation.accesspath.BufferClosedException; import com.bigdata.relation.accesspath.IAsynchronousIterator; import com.bigdata.relation.accesspath.IBlockingBuffer; @@ -105,6 +106,15 @@ private volatile boolean error = false; /** + * Flag is set <code>true</code> if the process was halted by a + * {@link Throwable} indicating a deadline expiration. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + private volatile boolean deadline = false; + + /** * Set to <code>true</code> iff the process should halt. */ private volatile boolean halt = false; @@ -114,10 +124,18 @@ * cancelled}. */ private volatile boolean cancelled = false; + + /** + * Designated constructor. + */ + public Haltable() { + + } /** * Halt (normal termination). */ + @Override final public void halt(final V v) { lock.lock(); try { @@ -146,6 +164,7 @@ * * @return The argument. */ + @Override final public <T extends Throwable> T halt(final T cause) { final boolean didHalt; lock.lock(); @@ -159,6 +178,7 @@ : new IllegalArgumentException()); // note if abnormal termination (firstCause only) error = !isNormalTerminationCause(firstCause); + deadline = isDeadlineTerminationCause(firstCause); try { // signal *all* listeners. halted.signalAll(); @@ -213,6 +233,7 @@ } + @Override final public boolean cancel(final boolean mayInterruptIfRunning) { lock.lock(); try { @@ -236,6 +257,7 @@ } } + @Override final public V get() throws InterruptedException, ExecutionException { lock.lock(); try { @@ -255,6 +277,7 @@ } } + @Override final public V get(final long timeout, final TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { final long begin = System.nanoTime(); @@ -285,6 +308,7 @@ /** * Return <code>true</code> if the process is done. */ + @Override final public boolean isDone() { return halt; @@ -307,6 +331,7 @@ } + @Override public boolean isCancelled() { // Note: lock required for atomic visibility for [halt AND cancelled]. @@ -319,6 +344,7 @@ } + @Override final public Throwable getCause() { lock.lock(); @@ -340,11 +366,7 @@ } - /** - * Return the first {@link Throwable cause} regardless of whether it is - * indicative of normal termination and <code>null</code> iff no cause has - * been set. - */ + @Override final public Throwable getAsThrownCause() { return firstCause; @@ -386,13 +408,30 @@ * @see #getCause() */ protected boolean isNormalTerminationCause(final Throwable cause) { - if(isTerminationByInterrupt(cause)) - return true; + if (isTerminationByInterrupt(cause)) + return true; if (InnerCause.isInnerCause(cause, RejectedExecutionException.class)) return true; return false; } + /** + * Note: There is a special exemption for {@link QueryTimeoutException}. + * This can not be interpreted as "normal" termination since we want the + * exception to be thrown out and then turned into the corresponding openrdf + * exception. However, we do not want to log a full stack trace for this + * since it is, in fact, an exception termination mode for a query. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> + */ + protected boolean isDeadlineTerminationCause(final Throwable cause) { + if (InnerCause.isInnerCause(cause, QueryTimeoutException.class)) { + return true; + } + return false; + } + static public boolean isTerminationByInterrupt(final Throwable cause) { if (InnerCause.isInnerCause(cause, InterruptedException.class)) @@ -403,6 +442,12 @@ return true; if (InnerCause.isInnerCause(cause, BufferClosedException.class)) return true; + /* + * Note: We can not treat this as normal termination or the query will + * fail to report out the openrdf QueryInterruptedException. + */ +// if (InnerCause.isInnerCause(cause, QueryTimeoutException.class)) +// return true; return false; @@ -415,12 +460,17 @@ * an error (as opposed to something which originated as an interrupt) it is * logged @ ERROR. */ - protected void logCause(final boolean isFirstCause, final Throwable cause) { - if (isFirstCause && error) { - log.error(this + " : isFirstCause=" + isFirstCause + " : " - + cause, cause); + private void logCause(final boolean isFirstCause, final Throwable cause) { + if (isFirstCause) { + if (deadline) { + log.warn(this + " : isFirstCause=" + isFirstCause + " : " + + cause, cause); + } else if (error) { + log.error(this + " : isFirstCause=" + isFirstCause + " : " + + cause, cause); + } } else if (log.isEnabledFor(Level.WARN)) { - if (error) { + if (!deadline && error) { log.warn(this + " : isFirstCause=" + isFirstCause + " : " + cause, cause); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/IHaltable.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/IHaltable.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/concurrent/IHaltable.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -43,4 +43,11 @@ */ Throwable getCause(); + /** + * Return the first {@link Throwable cause} regardless of whether it is + * indicative of normal termination and <code>null</code> iff no cause has + * been set. + */ + Throwable getAsThrownCause(); + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/MockRunningQuery.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -83,27 +83,33 @@ } + @Override public IBigdataFederation<?> getFederation() { return fed; } + @Override public IIndexManager getLocalIndexManager() { return indexManager; } + @Override public void halt(Void v) { log.warn("Mock object does not implement halt(Void)"); } + @Override public <T extends Throwable> T halt(T cause) { log.warn("Mock object does not implement halt(Throwable)"); return cause; } + @Override public QueryEngine getQueryEngine() { throw new UnsupportedOperationException(); } + @Override public Map<Integer, BOp> getBOpIndex() { return null; } @@ -112,85 +118,108 @@ // throw new UnsupportedOperationException(); // } + @Override public Map<Integer, BOpStats> getStats() { return null; } + @Override public long getDeadline() { // TODO Auto-generated method stub return 0; } + @Override public long getDoneTime() { // TODO Auto-generated method stub return 0; } + @Override public long getElapsed() { // TODO Auto-generated method stub return 0; } + @Override public long getStartTime() { // TODO Auto-generated method stub return 0; } - public Throwable getCause() { - // TODO Auto-generated method stub - return null; - } + @Override + public Throwable getCause() { + // TODO Auto-generated method stub + return null; + } + @Override + public Throwable getAsThrownCause() { + // TODO Auto-generated method stub + return null; + } + + @Override public BOp getQuery() { // TODO Auto-generated method stub return null; } + @Override public UUID getQueryId() { return queryContext.getQueryId(); } + @Override public IAsynchronousIterator<IBindingSet[]> iterator() { // TODO Auto-generated method stub return null; } + @Override public boolean cancel(boolean mayInterruptIfRunning) { // TODO Auto-generated method stub return false; } + @Override public Void get() throws InterruptedException, ExecutionException { // TODO Auto-generated method stub return null; } + @Override public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { // TODO Auto-generated method stub return null; } + @Override public boolean isCancelled() { // TODO Auto-generated method stub return false; } + @Override public boolean isDone() { // TODO Auto-generated method stub return false; } + @Override public IQueryClient getQueryController() { throw new UnsupportedOperationException(); } + @Override public IMemoryManager getMemoryManager() { return queryContext.getMemoryManager(); } + @Override public IQueryAttributes getAttributes() { return queryContext.getAttributes(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestAll.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestAll.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -45,7 +45,7 @@ /** * @param arg0 */ - public TestAll(String arg0) { + public TestAll(final String arg0) { super(arg0); @@ -95,7 +95,10 @@ // test suite for the RunState class. suite.addTestSuite(TestRunState.class); - + + // test suite for query deadline ordering semantics. + suite.addTestSuite(TestQueryDeadlineOrder.class); + // test suite for query evaluation (basic JOINs). suite.addTestSuite(TestQueryEngine.class); @@ -117,7 +120,7 @@ suite.addTestSuite(TestQueryEngine_DistinctOp.class); // stress test for GROUP_BY. - suite.addTestSuite(TestQueryEngine_GroupByOp.class); + suite.addTestSuite(TestQueryEngine_GroupByOp.class); return suite; Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryDeadlineOrder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryDeadlineOrder.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryDeadlineOrder.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -0,0 +1,185 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.bop.engine; + +import java.util.Properties; +import java.util.UUID; + +import junit.framework.TestCase2; + +import com.bigdata.bop.BOp; +import com.bigdata.bop.BOpEvaluationContext; +import com.bigdata.bop.NV; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.ap.E; +import com.bigdata.bop.ap.Predicate; +import com.bigdata.bop.ap.R; +import com.bigdata.bop.bindingSet.ListBindingSet; +import com.bigdata.bop.bset.StartOp; +import com.bigdata.bop.solutions.SliceOp; +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.ITx; +import com.bigdata.journal.Journal; +import com.bigdata.striterator.ChunkedArrayIterator; + +/** + * Test suite for {@link QueryDeadline} ordering. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestQueryDeadlineOrder extends TestCase2 { + + public TestQueryDeadlineOrder() { + + } + + public TestQueryDeadlineOrder(final String name) { + super(name); + } + + @Override + public Properties getProperties() { + + final Properties p = new Properties(super.getProperties()); + + p.setProperty(Journal.Options.BUFFER_MODE, BufferMode.Transient + .toString()); + + return p; + + } + + static private final String namespace = "ns"; + private Journal jnl; + private QueryEngine queryEngine; + + @Override + public void setUp() throws Exception { + + jnl = new Journal(getProperties()); + + loadData(jnl); + + queryEngine = new QueryEngine(jnl); + + queryEngine.init(); + + } + + /** + * Create and populate relation in the {@link #namespace}. + */ + private void loadData(final Journal store) { + + // create the relation. + final R rel = new R(store, namespace, ITx.UNISOLATED, new Properties()); + rel.create(); + + // data to insert (in key order for convenience). + final E[] a = {// + new E("John", "Mary"),// [0] + new E("Leon", "Paul"),// [1] + new E("Mary", "Paul"),// [2] + new E("Paul", "Leon"),// [3] + }; + + // insert data (the records are not pre-sorted). + rel.insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */)); + + // Do commit since not scale-out. + store.commit(); + + } + + @Override + public void tearDown() throws Exception { + + if (queryEngine != null) { + queryEngine.shutdownNow(); + queryEngine = null; + } + + if (jnl != null) { + jnl.destroy(); + jnl = null; + } + + } + + /** + * Verify the semantics of {@link QueryDeadline#compareTo(QueryDeadline)}. + * + * @throws Exception + */ + public void testQueryDeadlineOrder01() throws Exception { + + final long now = System.currentTimeMillis(); + + final int startId = 1; + + final PipelineOp query1 = new StartOp(new BOp[] {}, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + new NV(SliceOp.Annotations.EVALUATION_CONTEXT, + BOpEvaluationContext.CONTROLLER),// + })); + + final PipelineOp query2 = new StartOp(new BOp[] {}, NV + .asMap(new NV[] {// + new NV(Predicate.Annotations.BOP_ID, startId),// + new NV(SliceOp.Annotations.EVALUATION_CONTEXT, + BOpEvaluationContext.CONTROLLER),// + })); + + final AbstractRunningQuery runningQuery1 = queryEngine.eval(UUID.randomUUID(), + query1, new ListBindingSet()); + + runningQuery1.setDeadline(now + 10000); + + Thread.sleep(2); + + final AbstractRunningQuery runningQuery2 = queryEngine.eval(UUID.randomUUID(), + query2, new ListBindingSet()); + + runningQuery2.setDeadline(now + 20000); + + final QueryDeadline queryDeadline1 = new QueryDeadline( + runningQuery1.getDeadline(), runningQuery1); + + final QueryDeadline queryDeadline2 = new QueryDeadline( + runningQuery2.getDeadline(), runningQuery2); + + // The earlier deadline is LT the later deadline. + assertTrue(queryDeadline1.compareTo(queryDeadline2) < 0); + + // The later deadline is GT the earlier deadline. + assertTrue(queryDeadline2.compareTo(queryDeadline1) > 0); + + // Same deadline. + assertEquals(0, queryDeadline1.compareTo(queryDeadline1)); + assertEquals(0, queryDeadline2.compareTo(queryDeadline2)); + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2013-11-15 17:38:20 UTC (rev 7556) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2013-11-15 18:31:16 UTC (rev 7557) @@ -35,6 +35,10 @@ import java.io.File; import java.io.IOException; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Method; +import java.util.List; import java.util.Properties; import org.apache.log4j.Logger; @@ -51,10 +55,12 @@ import org.openrdf.repository.Repository; import org.openrdf.repository.RepositoryConnectionTest; +import com.bigdata.bop.fed.QueryEngineFactory; import com.bigdata.btree.keys.CollatorEnum; import com.bigdata.btree.keys.StrengthEnum; import com.bigdata.journal.BufferMode; import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSail.Options; import com.bigdata.rdf.sail.BigdataSailRepository; @@ -231,8 +237,11 @@ // // } - if (backend != null) + if (backend != null) { + if(log.isInfoEnabled() && backend instanceof Journal) + log.info(QueryEngineFactory.getExistingQueryController((Journal)backend).getCounters()); backend.destroy(); + } } @@ -676,55 +685,153 @@ } } - /* - * I have lifted this out of the base openrdf class since it often enough - * fails in CI or when running the entire TestBigdataSailWithQuads test - * suite. However, when run by itself I observe timely termination based on - * the deadline. + /** + * {@inheritDoc} + * <p> + * This test was failing historically for two reasons. First, it would + * sometimes encounter a full GC pause that would suspend the JVM for longer + * than the query timeout. This would fail the test. Second, the query + * engine code used to only check for a deadline when a query operator would + * start or stop. This meant that a compute bound operator would not be + * interrupted if there was no other concurrent operators for that query + * that were starting and stoping. This was fixed in #722. * - * Note: This query does several scans of the KB and computes their - * unconstrained cross-product and then sorts the results. - * - * I suspect that the problem may be that the ORDER BY operator does not - * notice the timeout since the deadline is only examined when an operator - * starts or stops. If evaluation reaches the ORDER BY operator and the SORT - * begins, then the SORT is not interrupted since the deadline is not being - * examined. - * - * (non-Javadoc) - * - * @see org.openrdf.repository.RepositoryConnectionTest# - * testOrderByQueriesAreInterruptable() + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/772"> + * Query timeout only checked at operator start/stop. </a> */ @Override - public void testOrderByQueriesAreInterruptable() - throws Exception - { - testCon.setAutoCommit(false); - for (int index = 0; index < 512; index++) { - testCon.add(RDFS.CLASS, RDFS.COMMENT, testCon.getValueFactory().createBNode()); - } - testCon.setAutoCommit(true); + public void testOrderByQueriesAreInterruptable() throws Exception { - TupleQuery query = testCon.prepareTupleQuery(QueryLanguage.SPARQL, - "SELECT * WHERE { ?s ?p ?o . ?s1 ?p1 ?o1 . ?s2 ?p2 ?o2 . ?s3 ?p3 ?o3 . } ORDER BY ?s1 ?p1 ?o1 LIMIT 1000"); - query.setMaxQueryTime(2); + /* + * Note: Test failures arise from length GC pauses. Such GC pauses + * suspend the application for longer than the query should run and + * cause it to miss its deadline. In order to verify that the deadline + * is being applied correctly, we can only rely on those test trials + * where the GC pause was LT the target query time. Other trials need to + * be thrown out. We do this using a Sun specific management API. The + * test will throw a ClassNotFoundException for other JVMs. + */ + final Class cls1 = Class + .forName("com.sun.management.GarbageCollectorMXBean"); - TupleQueryResult result = query.evaluate(); - log.warn("Query evaluation has begin"); - long startTime = System.currentTimeMillis(); + final Class cls2 = Class.forName("com.sun.management.GcInfo"); + + final Method method1 = cls1.getMethod("getLastGcInfo", new Class[] {}); + + final Method method2 = cls2.getMethod("getDuration", new Class[] {}); + + /* + * Load data. + */ + testCon.setAutoCommit(false); + for (int index = 0; index < 512; index++) { + testCon.add(RDFS.CLASS, RDFS.COMMENT, testCon.getValueFactory() + .createBNode()); + } + testCon.setAutoCommit(true); + testCon.commit(); + + final long MAX_QUERY_TIME = 2000; + final long MAX_TIME_MILLIS = 5000; + final int NTRIALS = 20; + int nok = 0, ngcfail = 0; + + for (int i = 0; i < NTRIALS; i++) { + + if (log.isInfoEnabled()) + log.info("RUN-TEST-PASS #" + i); + + final TupleQuery query = testCon + .prepareTupleQuery( + QueryLanguage.SPARQL, + "SELECT * WHERE { ?s ?p ?o . ?s1 ?p1 ?o1 . ?s2 ?p2 ?o2 . ?s3 ?p3 ?o3 . } ORDER BY ?s1 ?p1 ?o1 LIMIT 1000"); + + query.setMaxQueryTime((int) (MAX_QUERY_TIME / 1000)); + + final long startTime = System.currentTimeMillis(); + + final TupleQueryResult result = query.evaluate(); + + if (log.isInfoEnabled()) + log.info("Query evaluation has begin"); + try { + result.hasNext(); - fail("Query should have been interrupted"); - } - catch (QueryInterruptedException e) { + fail("Query should have been interrupted on pass# " + i); + + } catch (QueryInterruptedException e) { + // Expected - long duration = System.currentTimeMillis() - startTime; - log.warn("Actual query duration: " + duration + "ms"); - assertTrue("Query not interrupted quickly enough, should have been ~2s, but was " - + (duration / 1000) + "s", duration < 5000); + final long duration... [truncated message content] |
From: <tho...@us...> - 2013-11-15 17:38:27
|
Revision: 7556 http://bigdata.svn.sourceforge.net/bigdata/?rev=7556&view=rev Author: thompsonbry Date: 2013-11-15 17:38:20 +0000 (Fri, 15 Nov 2013) Log Message: ----------- @Override annotations Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/AbstractASTEvaluationTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/AbstractASTEvaluationTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/AbstractASTEvaluationTestCase.java 2013-11-15 17:38:03 UTC (rev 7555) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/AbstractASTEvaluationTestCase.java 2013-11-15 17:38:20 UTC (rev 7556) @@ -84,6 +84,7 @@ protected String baseURI = null; + @Override protected void setUp() throws Exception { super.setUp(); @@ -100,6 +101,7 @@ } + @Override protected void tearDown() throws Exception { if (store != null) { @@ -118,6 +120,7 @@ } + @Override public Properties getProperties() { // Note: clone to avoid modifying!!! This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-15 17:38:09
|
Revision: 7555 http://bigdata.svn.sourceforge.net/bigdata/?rev=7555&view=rev Author: thompsonbry Date: 2013-11-15 17:38:03 +0000 (Fri, 15 Nov 2013) Log Message: ----------- @Override, final, private annotations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_DistinctOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_GroupByOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2013-11-15 17:01:58 UTC (rev 7554) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine.java 2013-11-15 17:38:03 UTC (rev 7555) @@ -90,8 +90,6 @@ * @version $Id$ * * @see TestFederatedQueryEngine - * - * @todo write a unit and stress tests for deadlines. */ public class TestQueryEngine extends AbstractQueryEngineTestCase { @@ -104,7 +102,7 @@ /** * @param name */ - public TestQueryEngine(String name) { + public TestQueryEngine(final String name) { super(name); } @@ -121,9 +119,10 @@ } static private final String namespace = "ns"; - Journal jnl; - QueryEngine queryEngine; - + private Journal jnl; + private QueryEngine queryEngine; + + @Override public void setUp() throws Exception { jnl = new Journal(getProperties()); @@ -161,6 +160,7 @@ } + @Override public void tearDown() throws Exception { if (queryEngine != null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_DistinctOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_DistinctOp.java 2013-11-15 17:01:58 UTC (rev 7554) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_DistinctOp.java 2013-11-15 17:38:03 UTC (rev 7555) @@ -55,6 +55,7 @@ */ public class TestQueryEngine_DistinctOp extends TestCase2 { + @Override public Properties getProperties() { final Properties p = new Properties(super.getProperties()); @@ -66,9 +67,10 @@ } - Journal jnl; - QueryEngine queryEngine; + private Journal jnl; + private QueryEngine queryEngine; + @Override public void setUp() throws Exception { jnl = new Journal(getProperties()); @@ -79,6 +81,7 @@ } + @Override public void tearDown() throws Exception { if (queryEngine != null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_GroupByOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_GroupByOp.java 2013-11-15 17:01:58 UTC (rev 7554) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_GroupByOp.java 2013-11-15 17:38:03 UTC (rev 7555) @@ -50,6 +50,7 @@ */ public class TestQueryEngine_GroupByOp extends TestCase2 { + @Override public Properties getProperties() { final Properties p = new Properties(super.getProperties()); @@ -61,9 +62,10 @@ } - Journal jnl; - QueryEngine queryEngine; + private Journal jnl; + private QueryEngine queryEngine; + @Override public void setUp() throws Exception { jnl = new Journal(getProperties()); @@ -74,6 +76,7 @@ } + @Override public void tearDown() throws Exception { if (queryEngine != null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java 2013-11-15 17:01:58 UTC (rev 7554) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/engine/TestQueryEngine_Slice.java 2013-11-15 17:38:03 UTC (rev 7555) @@ -87,9 +87,10 @@ } - Journal jnl; - QueryEngine queryEngine; - + private Journal jnl; + private QueryEngine queryEngine; + + @Override public void setUp() throws Exception { jnl = new Journal(getProperties()); @@ -100,6 +101,7 @@ } + @Override public void tearDown() throws Exception { if (queryEngine != null) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-15 17:02:04
|
Revision: 7554 http://bigdata.svn.sourceforge.net/bigdata/?rev=7554&view=rev Author: thompsonbry Date: 2013-11-15 17:01:58 +0000 (Fri, 15 Nov 2013) Log Message: ----------- @Override annotations Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-11-15 17:00:14 UTC (rev 7553) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-11-15 17:01:58 UTC (rev 7554) @@ -314,6 +314,7 @@ * available and no task is currently running, then drain the work queue and * submit a task to consume that work. */ + @Override protected void consumeChunk() { lock.lock(); try { @@ -722,6 +723,7 @@ } + @Override public void run() { try { @@ -791,6 +793,7 @@ } + @Override public void run() { final QueryEngine queryEngine = getQueryEngine(); @@ -1015,6 +1018,7 @@ /** * A human readable representation of the {@link ChunkTask}'s state. */ + @Override public String toString() { return "ChunkTask" + // "{query=" + getQueryId() + // @@ -1269,6 +1273,7 @@ /** * Evaluate the {@link IChunkMessage}. */ + @Override public Void call() throws Exception { if (log.isDebugEnabled()) log.debug("Running chunk: " + this); @@ -1355,6 +1360,7 @@ } + @Override public void add(final E e) { super.add(e); if (SolutionsLog.solutionsLog.isInfoEnabled()) { @@ -1609,7 +1615,8 @@ chunkSize = 0; smallChunks = null; } - + + @Override synchronized // Note: possible side-effect on internal buffer. public long flush() { if (open) @@ -1618,46 +1625,55 @@ // return sink.flush(); } + @Override public void abort(final Throwable cause) { open = false; q.halt(cause); // sink.abort(cause); } + @Override public void close() { // sink.close(); open = false; } + @Override public Future getFuture() { // return sink.getFuture(); return null; } + @Override public boolean isEmpty() { return true; // return sink.isEmpty(); } + @Override public boolean isOpen() { return open && !q.isDone(); // return sink.isOpen(); } + @Override public IAsynchronousIterator<IBindingSet[]> iterator() { throw new UnsupportedOperationException(); // return sink.iterator(); } + @Override public void reset() { // sink.reset(); } + @Override public void setFuture(Future future) { throw new UnsupportedOperationException(); // sink.setFuture(future); } + @Override public int size() { return 0; // return sink.size(); @@ -1702,6 +1718,7 @@ } + @Override public void run() { try { if (q.isController()) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-15 17:00:22
|
Revision: 7553 http://bigdata.svn.sourceforge.net/bigdata/?rev=7553&view=rev Author: thompsonbry Date: 2013-11-15 17:00:14 +0000 (Fri, 15 Nov 2013) Log Message: ----------- javadoc/@Override annotations Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2013-11-14 20:27:35 UTC (rev 7552) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IQueryPeer.java 2013-11-15 17:00:14 UTC (rev 7553) @@ -16,9 +16,14 @@ public interface IQueryPeer extends Remote { /** - * The {@link UUID} of the service within which the {@link IQueryPeer} is + * The {@link UUID} of the service in which this {@link QueryEngine} is * running. * + * @return The {@link UUID} of the service in which this {@link QueryEngine} + * is running -or- a unique and distinct UUID if the + * {@link QueryEngine} is not running against an + * IBigdataFederation. + * * @see IService#getServiceUUID() */ UUID getServiceUUID() throws RemoteException; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-11-14 20:27:35 UTC (rev 7552) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/IRunningQuery.java 2013-11-15 17:00:14 UTC (rev 7553) @@ -59,6 +59,7 @@ /** * The unique identifier for this query. */ + @Override UUID getQueryId(); /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-11-14 20:27:41
|
Revision: 7552 http://bigdata.svn.sourceforge.net/bigdata/?rev=7552&view=rev Author: thompsonbry Date: 2013-11-14 20:27:35 +0000 (Thu, 14 Nov 2013) Log Message: ----------- @Override annotations Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2013-11-14 08:24:35 UTC (rev 7551) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemorySortOp.java 2013-11-14 20:27:35 UTC (rev 7552) @@ -123,6 +123,7 @@ } + @Override public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { return new FutureTask<Void>(new SortTask(this, context)); @@ -200,6 +201,7 @@ } + @Override public Void call() throws Exception { final ICloseableIterator<IBindingSet[]> itr = context This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |