This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2014-01-09 19:24:41
|
Revision: 7753 http://bigdata.svn.sourceforge.net/bigdata/?rev=7753&view=rev Author: thompsonbry Date: 2014-01-09 19:24:27 +0000 (Thu, 09 Jan 2014) Log Message: ----------- This issue is closed. Branch MGC_1_3_0 was created from r7608 of branches/BIGDATA_RELEASE_1_3_0. This branch was used to make HA robust in the face of sudden process kills. Merging MGC_1_3_0 back to the main development branch (branches/BIGDATA_RELEASE_1_3_0). Work on MGC_1_3_0 is closed out. {{{ merge -r7608:HEAD https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/MGC_1_3_0 /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/disco U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/disco --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/quorum/zk/ZKQuorumImpl.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/attr U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/attr --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/util/config U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-jini/src/java/com/bigdata/util/config --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/lubm U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/lubm --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/uniprot/src U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/uniprot/src --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/uniprot U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/uniprot --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/btc/src/resources U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/btc/src/resources --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/btc U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf/btc --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-perf --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/src/resources/bin/config U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/src/resources/bin/config --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/build.properties U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/build.xml --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-compatibility U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-compatibility --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/LEGAL U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/LEGAL --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/lib U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/lib --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test/it/unimi/dsi U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test/it/unimi/dsi --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test/it/unimi U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test/it/unimi --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/test --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java/it/unimi U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java/it/unimi --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java/it U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java/it --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src/java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils/src --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/dsi-utils --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-war/src/html/index.html --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/lib/jetty U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/lib/jetty --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/joinGraph U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/joinGraph --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/TestAll.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/join/AbstractHashJoinUtilityTestCase.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/util U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/bop/util --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/striterator/TestCloseableChunkedIteratorWrapperConverter.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/TestAll.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/pipeline/AbstractHASendAndReceiveTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/msg/TestHASendState.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/ha/msg/TestAll.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/jsr166 U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/jsr166 --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/HABranch.txt D /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java D /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java D /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/TestJournalHA.java D /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/TestAll.java D /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/ha/TestHAWritePipeline.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/journal/TestWORMStrategyNoCache.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/util/httpd U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/test/com/bigdata/util/httpd --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/CloseableIteratorWrapper.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/MergeFilter.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/Striterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/ChunkedResolvingIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/IChunkedStriterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/Resolver.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/PushbackIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/GenericChunkedStriterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/Chunkerator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/DelegateChunkedIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/ChunkedOrderedStriterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/ChunkedConvertingIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/Dechunkerator.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/striterator/CloseableChunkedIteratorWrapperConverter.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/QuorumActor.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/QuorumClient.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/ServiceLookup.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/HAPipelineResetResponse.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/IHAPipelineResetRequest.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/IHAPipelineResetResponse.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/PipelineDownstreamChange.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/PipelineImmediateDownstreamReplicationException.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/NestedPipelineException.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineException.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/ImmediateDownstreamReplicationException.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/PipelineUpstreamChange.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineChangeException.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/HAPipelineResetRequest.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessageBase.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/IHASendState.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/HASendState.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/IHAMessageWrapper.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/msg/HAMessageWrapper.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java A /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/ha/AbstractMessageTask.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/solutions/GroupByRewriter.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/solutions/GroupByState.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/aggregate U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/aggregate --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/engine/RunState.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/util U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/util --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/joinGraph U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/joinGraph --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/BOpBase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/fed/EmptyChunkMessage.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/IHashJoinUtility.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/BaseJoinStats.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/NamedSolutionSetStats.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/join/JoinVariableNotBoundException.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/BOpUtility.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/jsr166 U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/jsr166 --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/htree/raba U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/htree/raba --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/journal/Options.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/journal/Name2Addr.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/journal/AbstractJournal.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/relation/accesspath/IBindingSetAccessPath.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/util/NT.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/resources/deployment --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/osgi U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/osgi --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/test/com/bigdata/rdf/sail/bench U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/test/com/bigdata/rdf/sail/bench --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/bench U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/bench --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestQueryHints.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/internal U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/internal --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/relation U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/test/com/bigdata/rdf/relation --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParserStats.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/changesets U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/changesets --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/error U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/error --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/IQueryHint.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AbstractChunkSizeHint.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GraphPatternGroup.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NamedSubqueryInclude.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionListBaseNode.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SliceNode.java U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryOptimizerEnum.java --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/internal U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/internal --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/relation U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/relation --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/util U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/java/com/bigdata/rdf/util --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/samples U /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/samples --- Merging r7608 through r7752 into /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata-rdf/src/resources/data/lehigh/LUBM-U1.rdf.gz C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN Merge complete. ===== File Statistics: ===== Deleted: 5 Added: 19 Updated: 108 ==== Property Statistics: ===== Updated: 49 ==== Conflict Statistics: ===== File conflicts: 2 Property conflicts: 1 Tree conflicts: 13 }}} There were TWO files with conflicts. In both cases, the incoming version was accepted. Those files are: {{{ C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java C /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_CLEAN/bigdata/src/java/com/bigdata/quorum/QuorumActor.java }}} There was also an SVN property conflict on svn:ignore. This was resolved by using the version from the main branch. I also needed to go into the tree conflict view and "mark resolved" a number of files. Each of this was reported as "local add" and "incoming and upon merge". In all cases these were simply recently added files from the main branch. See #779 (pipeline resynchronization protocol) See #724 (HA wire pulls and sure kill tests) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7608&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAPipelineGlue.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumCommitImpl.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipeline.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessageBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/PipelineDownstreamChange.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/PipelineUpstreamChange.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumActor.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/QuorumClient.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/msg/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestHASendAndReceive3Nodes.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestWORMStrategyNoCache.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/HABranch.txt branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/quorum/MockQuorumFixture.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/quorum/zk/MockQuorumMember.java branches/BIGDATA_RELEASE_1_3_0/build.xml Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/AbstractMessageTask.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAPipelineResetRequest.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/HAPipelineResetResponse.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IHAPipelineResetRequest.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/IHAPipelineResetResponse.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/HAMessageWrapper.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/HASendState.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/IHAMessageWrapper.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/msg/IHASendState.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineChangeException.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/AbstractPipelineException.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/ImmediateDownstreamReplicationException.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/NestedPipelineException.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/ha/pipeline/PipelineImmediateDownstreamReplicationException.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/quorum/ServiceLookup.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/msg/TestHASendState.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/AbstractHASendAndReceiveTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/ha/pipeline/TestSocketsDirect.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JustKills.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/AbstractHAJournalTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/TestHAWORMStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/TestHAWritePipeline.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/ha/TestJournalHA.java Property Changed: ---------------- branches/BIGDATA_RELEASE_1_3_0/ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd/ branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/samples/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/LEGAL/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/lib/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/unimi/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/dsi/ branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_3_0/osgi/ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/config/ Property changes on: branches/BIGDATA_RELEASE_1_3_0 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/MGC_1_3_0:7609-7752 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/joinGraph:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/joinGraph:7465-7484 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/util:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/MGC_1_3_0/bigdata/src/ja... [truncated message content] |
From: <tho...@us...> - 2014-01-09 17:44:51
|
Revision: 7752 http://bigdata.svn.sourceforge.net/bigdata/?rev=7752&view=rev Author: thompsonbry Date: 2014-01-09 17:44:44 +0000 (Thu, 09 Jan 2014) Log Message: ----------- Modified timeouts in loops for local and remote futures in QuorumPipelineImpl prior to merge to main branch. Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-08 17:58:02 UTC (rev 7751) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-09 17:44:44 UTC (rev 7752) @@ -1951,17 +1951,6 @@ * continuously. This does rather beg the question of * whether we should only be checking futRec at this stage. */ - - /* - * Await the Futures, but spend more time waiting on the - * local Future and only check the remote Future every - * second. Timeouts are ignored during this loop - they - * are used to let us wait longer on the local Future - * than on the remote Future. ExecutionExceptions are - * also ignored. We want to continue this loop until - * both Futures are done. Interrupts are not trapped, so - * an interrupt will still exit the loop. - */ while (!futSnd.isDone() || !futRec.isDone()) { /* * Make sure leader's quorum token remains valid for @@ -1969,7 +1958,7 @@ */ member.assertLeader(token); try { - futSnd.get(10L, TimeUnit.MILLISECONDS); + futSnd.get(500L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { /* @@ -1977,7 +1966,7 @@ * if not done. */ try { - futRec.get(1L, TimeUnit.SECONDS); + futRec.get(500L, TimeUnit.MILLISECONDS); } catch(TimeoutException ex) { // Ignore. } catch(ExecutionException ex) { // Ignore. } finally { @@ -1988,7 +1977,7 @@ */ } try { - futRec.get(10L, TimeUnit.MILLISECONDS); + futRec.get(500L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { /* @@ -1996,7 +1985,7 @@ * if not done. */ try { - futSnd.get(10L, TimeUnit.MILLISECONDS); + futSnd.get(500L, TimeUnit.MILLISECONDS); } catch(TimeoutException ex) { // Ignore. } catch(ExecutionException ex) { // Ignore. } finally { @@ -2582,7 +2571,7 @@ */ member.getQuorum().assertQuorum(token); try { - futRec.get(1L, TimeUnit.SECONDS); + futRec.get(500L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { /* @@ -2590,7 +2579,7 @@ * if not done. */ try { - futRep.get(1L, TimeUnit.SECONDS); + futRep.get(500L, TimeUnit.MILLISECONDS); } catch(TimeoutException ex) { // Ignore. } catch(ExecutionException ex) { // Ignore. } finally { @@ -2601,7 +2590,7 @@ */ } try { - futRep.get(10L, TimeUnit.MILLISECONDS); + futRep.get(500L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { /* @@ -2609,7 +2598,7 @@ * if not done. */ try { - futRec.get(1L, TimeUnit.SECONDS); + futRec.get(500L, TimeUnit.MILLISECONDS); } catch(TimeoutException ex) { // Ignore. } catch(ExecutionException ex) { // Ignore. } finally { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-08 17:58:09
|
Revision: 7751 http://bigdata.svn.sourceforge.net/bigdata/?rev=7751&view=rev Author: thompsonbry Date: 2014-01-08 17:58:02 +0000 (Wed, 08 Jan 2014) Log Message: ----------- Added a hidden option to enable the RTO. Changed the way in which the analytic option recognition works. Verified correctly works in sample queries. See #64 (RTO. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-01-08 17:56:54 UTC (rev 7750) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java 2014-01-08 17:58:02 UTC (rev 7751) @@ -101,6 +101,7 @@ import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; import com.bigdata.rdf.sparql.ast.ASTContainer; import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.QueryOptimizerEnum; import com.bigdata.rdf.sparql.ast.QueryRoot; import com.bigdata.rdf.sparql.ast.QueryType; import com.bigdata.rdf.sparql.ast.Update; @@ -145,6 +146,12 @@ protected static final String ANALYTIC = "analytic"; /** + * URL Query parameter used to request the use of the Runtime Query + * Optimizer. + */ + protected static final String RTO = "RTO"; + + /** * URL Query parameter used to request an XHTML response for SPARQL * QUERY or SPARQL UPDATE. For SPARQL QUERY, this provides an XHTML * table view of the solutions. For SPARQL UPDATE, this provides an @@ -612,9 +619,14 @@ /** * When <code>true</code>, enable the "analytic" query hints. */ - final Boolean analytic; + final boolean analytic; /** + * When <code>true</code>, enable the Runtime Query Optimizer. + */ + final boolean rto; + + /** * When <code>true</code>, provide an view of the XHTML representation * of the solutions or graph result (SPARQL QUERY) * @@ -751,6 +763,9 @@ this.explainDetails = explain && isExplainDetails(req); this.analytic = getEffectiveBooleanValue( req.getParameter(ANALYTIC), QueryHints.DEFAULT_ANALYTIC); + this.rto = getEffectiveBooleanValue(req.getParameter(RTO), + QueryHints.DEFAULT_OPTIMIZER + .equals(QueryOptimizerEnum.Runtime)); this.xhtml = getEffectiveBooleanValue(req.getParameter(XHTML), false); this.monitor = getEffectiveBooleanValue(req.getParameter(MONITOR), @@ -822,6 +837,9 @@ this.explainDetails = explain && isExplainDetails(req); this.analytic = getEffectiveBooleanValue( req.getParameter(ANALYTIC), QueryHints.DEFAULT_ANALYTIC); + this.rto = getEffectiveBooleanValue(req.getParameter(RTO), + QueryHints.DEFAULT_OPTIMIZER + .equals(QueryOptimizerEnum.Runtime)); this.xhtml = getEffectiveBooleanValue(req.getParameter(XHTML), false); this.monitor = getEffectiveBooleanValue(req.getParameter(MONITOR), @@ -898,13 +916,18 @@ // Override query if data set protocol parameters were used. overrideDataset(query); - if (analytic != null) { + if (analytic) { // Turn analytic query on/off as requested. -// astContainer.getOriginalAST().setQueryHint(QueryHints.ANALYTIC, -// analytic.toString()); - astContainer.setQueryHint(QueryHints.ANALYTIC, - analytic.toString()); + astContainer.setQueryHint(QueryHints.ANALYTIC, "true"); + + } + + if (rto) { + + // Turn analytic query on/off as requested. + astContainer.setQueryHint(QueryHints.OPTIMIZER, + QueryOptimizerEnum.Runtime.toString()); } @@ -951,13 +974,18 @@ // Override query if data set protocol parameters were used. overrideDataset(update); - if (analytic != null) { + if (analytic) { // Turn analytic query on/off as requested. -// astContainer.getOriginalAST().setQueryHint(QueryHints.ANALYTIC, -// analytic.toString()); - astContainer.setQueryHint(QueryHints.ANALYTIC, - analytic.toString()); + astContainer.setQueryHint(QueryHints.ANALYTIC, "true"); + + } + + if (rto) { + + // Turn analytic query on/off as requested. + astContainer.setQueryHint(QueryHints.OPTIMIZER, + QueryOptimizerEnum.Runtime.toString()); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-01-08 17:56:54 UTC (rev 7750) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/html/index.html 2014-01-08 17:58:02 UTC (rev 7751) @@ -59,7 +59,12 @@ <INPUT type="checkbox" name="analytic" value="true" title="Enable the analytic query package." > Analytic - <INPUT type="checkbox" name="xhtml" value="true" +<!-- TODO Uncomment to reveal the RTO option. + <INPUT type="checkbox" name="RTO" value="true" + title="Enable the Runtime Query Optimizer (RTO)." + > RTO +--> + <INPUT type="checkbox" name="xhtml" value="true" title="Request XHTML response (results formatted as table)." checked="checked" > XHTML This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-08 17:57:00
|
Revision: 7750 http://bigdata.svn.sourceforge.net/bigdata/?rev=7750&view=rev Author: thompsonbry Date: 2014-01-08 17:56:54 +0000 (Wed, 08 Jan 2014) Log Message: ----------- @Override, final annotations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2014-01-08 17:34:47 UTC (rev 7749) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/NanoSparqlClient.java 2014-01-08 17:56:54 UTC (rev 7750) @@ -217,6 +217,7 @@ } + @Override public Void call() throws Exception { // used to measure the total execution time. @@ -454,11 +455,13 @@ parser .setTupleQueryResultHandler(new TupleQueryResultHandlerBase() { // Indicates the end of a sequence of solutions. + @Override public void endQueryResult() { // connection close is handled in finally{} } // Handles a solution. + @Override public void handleSolution(final BindingSet bset) { if (log.isDebugEnabled()) log.debug(bset.toString()); @@ -466,6 +469,7 @@ } // Indicates the start of a sequence of Solutions. + @Override public void startQueryResult(List<String> bindingNames) { } }); @@ -731,12 +735,13 @@ final class Pair implements Comparable<Pair> { public double r = rnd.nextDouble(); - public int val; + final public int val; - public Pair(int val) { + public Pair(final int val) { this.val = val; } + @Override public int compareTo(final Pair other) { if (this == other) return 0; @@ -856,6 +861,7 @@ } + @Override public QueryOptions clone() { try { @@ -987,6 +993,7 @@ /** * Order by increasing elapsed time (slowest queries are last). */ + @Override public int compareTo(final Score o) { if (elapsedNanos < o.elapsedNanos) return -1; @@ -1123,6 +1130,7 @@ } + @Override public void run() { final long begin = System.nanoTime(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-08 17:34:56
|
Revision: 7749 http://bigdata.svn.sourceforge.net/bigdata/?rev=7749&view=rev Author: thompsonbry Date: 2014-01-08 17:34:47 +0000 (Wed, 08 Jan 2014) Log Message: ----------- whitespace for clarity Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq 2014-01-08 17:10:07 UTC (rev 7748) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq 2014-01-08 17:34:47 UTC (rev 7749) @@ -4,11 +4,13 @@ #SELECT ?x ?y ?z SELECT (COUNT(*) as ?count) WHERE{ + # Control all RTO parameters for repeatable behavior. hint:Group hint:optimizer "Runtime". hint:Group hint:RTO-sampleType "DENSE". hint:Group hint:RTO-limit "100". hint:Group hint:RTO-nedges "1". + ?y a ub:Department . ?x a ub:Student; ub:memberOf ?y . This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-01-08 17:10:15
|
Revision: 7748 http://bigdata.svn.sourceforge.net/bigdata/?rev=7748&view=rev Author: martyncutcher Date: 2014-01-08 17:10:07 +0000 (Wed, 08 Jan 2014) Log Message: ----------- Modification to support possible future non-thick future for receiveAndReplicate to check for regression on CI. The intention is to comment out this code once we have verified it is a plausible approach. Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-08 15:14:09 UTC (rev 7747) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-08 17:10:07 UTC (rev 7748) @@ -1951,27 +1951,65 @@ * continuously. This does rather beg the question of * whether we should only be checking futRec at this stage. */ - while (!(futSnd.isDone() || futRec.isDone())) { + + /* + * Await the Futures, but spend more time waiting on the + * local Future and only check the remote Future every + * second. Timeouts are ignored during this loop - they + * are used to let us wait longer on the local Future + * than on the remote Future. ExecutionExceptions are + * also ignored. We want to continue this loop until + * both Futures are done. Interrupts are not trapped, so + * an interrupt will still exit the loop. + */ + while (!futSnd.isDone() || !futRec.isDone()) { /* * Make sure leader's quorum token remains valid for * ALL writes. */ member.assertLeader(token); try { - futSnd.get(1L, TimeUnit.SECONDS); + futSnd.get(10L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { + /* + * Try the other Future with timeout and cancel + * if not done. + */ + try { + futRec.get(1L, TimeUnit.SECONDS); + } catch(TimeoutException ex) { // Ignore. + } catch(ExecutionException ex) { // Ignore. + } finally { + futRec.cancel(true/* mayInterruptIfRunning */); + } + /* + * Note: Both futures are DONE at this point. + */ } try { futRec.get(10L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { + /* + * Try the other Future with timeout and cancel + * if not done. + */ + try { + futSnd.get(10L, TimeUnit.MILLISECONDS); + } catch(TimeoutException ex) { // Ignore. + } catch(ExecutionException ex) { // Ignore. + } finally { + futSnd.cancel(true/* mayInterruptIfRunning */); + } + /* + * Note: Both futures are DONE at this point. + */ } } - + /* - * Note: Both futures are DONE at this point. However, - * we want to check the remote Future for the downstream + * Note: We want to check the remote Future for the downstream * service first in order to accurately report the * service that was the source of a pipeline replication * problem. @@ -2534,7 +2572,7 @@ * TODO: check the comparative logic with this and robustReplicate * to confirm the equivalence of checking the different futures. */ - while (!(futRec.isDone() || futRep.isDone())) { + while (!futRec.isDone() || !futRep.isDone()) { /* * The token must remain valid, even if this service * is not joined with the met quorum. If fact, @@ -2547,11 +2585,39 @@ futRec.get(1L, TimeUnit.SECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { + /* + * Try the other Future with timeout and cancel + * if not done. + */ + try { + futRep.get(1L, TimeUnit.SECONDS); + } catch(TimeoutException ex) { // Ignore. + } catch(ExecutionException ex) { // Ignore. + } finally { + futRep.cancel(true/* mayInterruptIfRunning */); + } + /* + * Note: Both futures are DONE at this point. + */ } try { futRep.get(10L, TimeUnit.MILLISECONDS); } catch (TimeoutException ignore) { } catch (ExecutionException ignore) { + /* + * Try the other Future with timeout and cancel + * if not done. + */ + try { + futRec.get(1L, TimeUnit.SECONDS); + } catch(TimeoutException ex) { // Ignore. + } catch(ExecutionException ex) { // Ignore. + } finally { + futRec.cancel(true/* mayInterruptIfRunning */); + } + /* + * Note: Both futures are DONE at this point. + */ } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-08 15:14:27
|
Revision: 7747 http://bigdata.svn.sourceforge.net/bigdata/?rev=7747&view=rev Author: thompsonbry Date: 2014-01-08 15:14:09 +0000 (Wed, 08 Jan 2014) Log Message: ----------- Merged in change set from the 1.3.0 maintenance and development branch in preparation for bringing this feature branch back to the main branch. At revision 7746. merge https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_1_3_0 /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0_MGC --- Merging r7684 through r7746 into /Users/bryan/Documents/workspace No conflicts. See #724 (HA wire pulling and sudden kills) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7746&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7684&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7746&view=rev Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/AbstractRunningQuery.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/BOpStats.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/LocalChunkMessage.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/engine/RunState.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/fed/EmptyChunkMessage.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/fed/NIOChunkMessage.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/fed/ThickChunkMessage.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/BaseJoinStats.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashIndexOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/HTreeHashJoinUtility.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/HashIndexOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/HashJoinOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/IHashJoinUtility.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashIndex.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashIndexOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/JVMHashJoinUtility.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/JoinVariableNotBoundException.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/NamedSolutionSetStats.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/PipelineJoinStats.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/join/SolutionSetHashJoinOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/PathIds.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/SampleBase.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/GroupByOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/GroupByRewriter.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/GroupByState.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/PipelinedAggregationOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/ProjectionOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/SliceOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/journal/Options.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/IBindingSetAccessPath.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/AbstractChunkedResolverator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedArrayIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedArraysIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedConvertingIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedOrderedStriterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedResolvingIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/ChunkedWrappedIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/Chunkerator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/CloseableIteratorWrapper.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/Dechunkerator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/DelegateChunkedIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/GenericChunkedStriterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/IChunkedIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/IChunkedStriterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/MergeFilter.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/PushbackIterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/Resolver.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/Striterator.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/util/NT.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/TestAll.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/fed/TestNIOChunkMessage.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/join/AbstractHashJoinUtilityTestCase.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/join/TestPipelineJoin.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java branches/MGC_1_3_0/bigdata/src/test/com/bigdata/striterator/TestAll.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/join/ChunkedMaterializationOp.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/update/ParserStats.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/IPassesMaterialization.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/constraints/SPARQLConstraint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ASTBase.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/AssignmentNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GraphPatternGroup.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/GroupNodeBase.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/NamedSubqueryInclude.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryBase.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryOptimizerEnum.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryRoot.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/SliceNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ValueExpressionListBaseNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SliceServiceFactory.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AbstractChunkSizeHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AbstractQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AnalyticQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/AtOnceHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/BufferChunkCapacityHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/BufferChunkOfChunksCapacityHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/ChunkSizeHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/IQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/OptimizerQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineMaxMessagesPerTaskHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineMaxParallelHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/PipelineQueueCapacityHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/QueryHintRegistry.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBottomUpOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTQueryHintOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTRangeCountOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTSparql11SubqueryOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/ServiceCall.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestQueryHints.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/query-hints-01.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/query-hints-06.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java branches/MGC_1_3_0/bigdata-war/src/html/index.html Added Paths: ----------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java branches/MGC_1_3_0/bigdata/src/java/com/bigdata/striterator/CloseableChunkedIteratorWrapperConverter.java branches/MGC_1_3_0/bigdata/src/resources/deployment/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Berksfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/CHANGELOG.md branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Gemfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/README.md branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Thorfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Vagrantfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/default.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/chefignore branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/default_test.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/metadata.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/default.rb branches/MGC_1_3_0/bigdata/src/test/com/bigdata/striterator/TestCloseableChunkedIteratorWrapperConverter.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOLimitQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTONEdgesQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/hints/RTOSampleTypeQueryHint.java branches/MGC_1_3_0/bigdata-rdf/src/resources/data/lehigh/LUBM-U1.rdf.gz branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestAll.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BAR.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BSBM.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_LUBM.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java Removed Paths: ------------- branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Berksfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/CHANGELOG.md branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Gemfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/README.md branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Thorfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/Vagrantfile branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/attributes/default.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/chefignore branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/files/default/test/default_test.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/metadata.rb branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/ branches/MGC_1_3_0/bigdata/src/resources/deployment/vagrant/systap-bigdata/recipes/default.rb branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestAll.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BAR.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BSBM.java branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_LUBM.java Property Changed: ---------------- branches/MGC_1_3_0/ branches/MGC_1_3_0/bigdata/lib/jetty/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba/ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/bop/util/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/jsr166/ branches/MGC_1_3_0/bigdata/src/test/com/bigdata/util/httpd/ branches/MGC_1_3_0/bigdata-compatibility/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/attr/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/disco/ branches/MGC_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/ branches/MGC_1_3_0/bigdata-perf/ branches/MGC_1_3_0/bigdata-perf/btc/ branches/MGC_1_3_0/bigdata-perf/btc/src/resources/ branches/MGC_1_3_0/bigdata-perf/lubm/ branches/MGC_1_3_0/bigdata-perf/uniprot/ branches/MGC_1_3_0/bigdata-perf/uniprot/src/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/MGC_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/MGC_1_3_0/bigdata-rdf/src/samples/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/MGC_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/MGC_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/MGC_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/MGC_1_3_0/dsi-utils/ branches/MGC_1_3_0/dsi-utils/LEGAL/ branches/MGC_1_3_0/dsi-utils/lib/ branches/MGC_1_3_0/dsi-utils/src/ branches/MGC_1_3_0/dsi-utils/src/java/ branches/MGC_1_3_0/dsi-utils/src/java/it/ branches/MGC_1_3_0/dsi-utils/src/java/it/unimi/ branches/MGC_1_3_0/dsi-utils/src/test/ branches/MGC_1_3_0/dsi-utils/src/test/it/unimi/ branches/MGC_1_3_0/dsi-utils/src/test/it/unimi/dsi/ branches/MGC_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/MGC_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/MGC_1_3_0/osgi/ branches/MGC_1_3_0/src/resources/bin/config/ Property changes on: branches/MGC_1_3_0 ___________________________________________________________________ Modified: svn:ignore - ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI + ant-build src bin bigdata*.jar ant-release standalone test* countersfinal.xml events.jnl .settings *.jnl TestInsertRate.out SYSTAP-BBT-result.txt U10load+query *.hprof com.bigdata.cache.TestHardReferenceQueueWithBatchingUpdates.exp.csv commit-log.txt eventLog dist bigdata-test com.bigdata.rdf.stress.LoadClosureAndQueryTest.*.csv DIST.bigdata-*.tgz REL.bigdata-*.tgz queryLog* queryRunState* sparql.txt benchmark CI bsbm10-dataset.nt.gz bsbm10-dataset.nt.zip Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BIGDATA_RELEASE_1_3_0:7608-7684 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BIGDATA_RELEASE_1_3_0:7608-7746 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/ZK_DISCONNECT_HANDLING:7465-7484 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/MGC_1_3_0/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7608-7684 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7608-7746 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/lib/jetty:7465-7484 Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/AbstractAccessPathOp.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -76,26 +76,26 @@ super(op); } - /** - * @see BufferAnnotations#CHUNK_CAPACITY - */ - protected int getChunkCapacity() { - - return getProperty(Annotations.CHUNK_CAPACITY, - Annotations.DEFAULT_CHUNK_CAPACITY); +// /** +// * @see BufferAnnotations#CHUNK_CAPACITY +// */ +// protected int getChunkCapacity() { +// +// return getProperty(Annotations.CHUNK_CAPACITY, +// Annotations.DEFAULT_CHUNK_CAPACITY); +// +// } +// +// /** +// * @see BufferAnnotations#CHUNK_OF_CHUNKS_CAPACITY +// */ +// protected int getChunkOfChunksCapacity() { +// +// return getProperty(Annotations.CHUNK_OF_CHUNKS_CAPACITY, +// Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); +// +// } - } - - /** - * @see BufferAnnotations#CHUNK_OF_CHUNKS_CAPACITY - */ - protected int getChunkOfChunksCapacity() { - - return getProperty(Annotations.CHUNK_OF_CHUNKS_CAPACITY, - Annotations.DEFAULT_CHUNK_OF_CHUNKS_CAPACITY); - - } - // protected int getFullyBufferedReadThreshold() { // // return getProperty(Annotations.FULLY_BUFFERED_READ_THRESHOLD, @@ -103,14 +103,14 @@ // // } - /** - * @see BufferAnnotations#CHUNK_TIMEOUT - */ - protected long getChunkTimeout() { - - return getProperty(Annotations.CHUNK_TIMEOUT, - Annotations.DEFAULT_CHUNK_TIMEOUT); - - } +// /** +// * @see BufferAnnotations#CHUNK_TIMEOUT +// */ +// protected long getChunkTimeout() { +// +// return getProperty(Annotations.CHUNK_TIMEOUT, +// Annotations.DEFAULT_CHUNK_TIMEOUT); +// +// } } Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpBase.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -175,6 +175,7 @@ } + @Override final public Map<String, Object> annotations() { return Collections.unmodifiableMap(annotations); @@ -234,6 +235,7 @@ } + @Override public BOp get(final int index) { return args[index]; @@ -286,6 +288,7 @@ } + @Override public int arity() { return args.length; @@ -297,6 +300,7 @@ * <p> * Note: This is much less efficient than {@link #argIterator()}. */ + @Override final public List<BOp> args() { return Collections.unmodifiableList(Arrays.asList(args)); @@ -309,6 +313,7 @@ * The iterator does not support removal. (This is more efficient than * #args()). */ + @Override final public Iterator<BOp> argIterator() { return new ArgIterator(); @@ -339,6 +344,7 @@ } // shallow copy + @Override public BOp[] toArray() { final BOp[] a = new BOp[args.length]; @@ -475,6 +481,7 @@ // // } + @Override public Object getProperty(final String name) { return annotations.get(name); @@ -543,6 +550,7 @@ } + @Override public BOpBase setProperty(final String name, final Object value) { final BOpBase tmp = (BOpBase) this.clone(); Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -59,8 +59,9 @@ import com.bigdata.rwstore.sector.IMemoryManager; import com.bigdata.striterator.ChunkedFilter; import com.bigdata.striterator.Chunkerator; -import com.bigdata.striterator.CloseableIteratorWrapper; +import com.bigdata.striterator.CloseableChunkedIteratorWrapperConverter; import com.bigdata.striterator.IChunkedIterator; +import com.bigdata.striterator.IChunkedStriterator; import cutthecrap.utils.striterators.ICloseableIterator; @@ -1078,8 +1079,8 @@ } /** - * Convert an {@link IAccessPath#iterator()} into a stream of - * {@link IBindingSet}s. + * Convert an {@link IAccessPath#iterator()} into a stream of chunks of + * {@link IBindingSet}. * * @param src * The iterator draining the {@link IAccessPath}. This will visit @@ -1090,7 +1091,7 @@ * Statistics to be updated as elements and chunks are consumed * (optional). * - * @return The dechunked iterator visiting the solutions. The order of the + * @return An iterator visiting chunks of solutions. The order of the * original {@link IElement}s is preserved. * * @see https://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath @@ -1105,14 +1106,15 @@ // * The array of distinct variables (no duplicates) to be // * extracted from the visited {@link IElement}s. @SuppressWarnings({ "rawtypes", "unchecked" }) - static public ICloseableIterator<IBindingSet> solutions( + static public ICloseableIterator<IBindingSet[]> solutions( final IChunkedIterator<?> src, // final IPredicate<?> pred,// // final IVariable<?>[] varsx, final BaseJoinStats stats// ) { - return new CloseableIteratorWrapper( + //return new CloseableIteratorWrapper( + final IChunkedStriterator itr1 = new com.bigdata.striterator.ChunkedStriterator(src).addFilter( // new ChunkedFilter() { new ChunkedFilter<IChunkedIterator<Object>, Object, Object>() { @@ -1160,18 +1162,28 @@ } - })) { + }); + //) { +// +// /** +// * Close the real source if the caller closes the returned iterator. +// */ +// @Override +// public void close() { +// super.close(); +// src.close(); +// } +// }; - /** - * Close the real source if the caller closes the returned iterator. - */ - @Override - public void close() { - super.close(); - src.close(); - } - }; + /* + * Convert from IChunkedIterator<IBindingSet> to + * ICloseableIterator<IBindingSet[]>. This is a fly weight conversion. + */ + final ICloseableIterator<IBindingSet[]> itr2 = new CloseableChunkedIteratorWrapperConverter<IBindingSet>( + itr1); + return itr2; + } /* Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -1,5 +1,29 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop; +import java.util.Iterator; import java.util.LinkedHashSet; /** @@ -7,23 +31,102 @@ */ public class BOpIdFactory implements IdFactory { - private final LinkedHashSet<Integer> ids = new LinkedHashSet<Integer>(); - - private int nextId = 0; - - public void reserve(int id) { - ids.add(id); - } + /** The set of reserved bop identifiers. */ + private LinkedHashSet<Integer> ids; - public int nextId() { + private int nextId = 0; - while (ids.contains(nextId)) { + /** + * Reserve a bop id by adding it to a set of known identifiers that will not + * be issued by {@link #nextId()}. + * + * @param id + * The identifier. + */ + public void reserve(final int id) { + + synchronized (this) { + + if (ids == null) { - nextId++; - - } + // Lazily allocated. + ids = new LinkedHashSet<Integer>(); - return nextId++; - } - + ids.add(id); + + } + + } + + } + + @Override + public int nextId() { + + synchronized (this) { + + if (ids != null) { + + while (ids.contains(nextId)) { + + nextId++; + + } + + } + + return nextId++; + + } + + } + + /** + * Reserve ids used by the predicates or constraints associated with some + * join graph. + * + * @param preds + * The vertices of the join graph. + * @param constraints + * The constraints of the join graph (optional). + */ + public void reserveIds(final IPredicate<?>[] preds, + final IConstraint[] constraints) { + + if (preds == null) + throw new IllegalArgumentException(); + + final BOpIdFactory idFactory = this; + + for (IPredicate<?> p : preds) { + + idFactory.reserve(p.getId()); + + } + + if (constraints != null) { + + for (IConstraint c : constraints) { + + final Iterator<BOp> itr = BOpUtility + .preOrderIteratorWithAnnotations(c); + + while (itr.hasNext()) { + + final BOp y = itr.next(); + + final Integer anId = (Integer) y + .getProperty(BOp.Annotations.BOP_ID); + + if (anId != null) + idFactory.reserve(anId.intValue()); + + } + + } + + } + + } + } \ No newline at end of file Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/BOpUtility.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -35,6 +35,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Set; import org.apache.log4j.Logger; @@ -53,6 +54,7 @@ import cutthecrap.utils.striterators.EmptyIterator; import cutthecrap.utils.striterators.Expander; import cutthecrap.utils.striterators.Filter; +import cutthecrap.utils.striterators.ICloseable; import cutthecrap.utils.striterators.ICloseableIterator; import cutthecrap.utils.striterators.SingleValueIterator; import cutthecrap.utils.striterators.Striterator; @@ -72,7 +74,7 @@ * Pre-order recursive visitation of the operator tree (arguments only, no * annotations). */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public static Iterator<BOp> preOrderIterator(final BOp op) { return new Striterator(new SingleValueIterator(op)) @@ -466,6 +468,8 @@ * The type of the node to be extracted. * * @return A list containing those references. + * + * @see #visitAll(BOp, Class) */ public static <C> List<C> toList(final BOp op, final Class<C> clas) { @@ -483,6 +487,44 @@ } + /** + * Return the sole instance of the specified class. + * + * @param op + * The root of the traversal. + * @param class1 + * The class to look for. + * @return The sole instance of that class. + * @throws NoSuchElementException + * if there is no such instance. + * @throws RuntimeException + * if there is more than one such instance. + */ + public static <C> C getOnly(final BOp op, final Class<C> class1) { + final Iterator<C> it = visitAll(op, class1); + if (!it.hasNext()) + throw new NoSuchElementException("No instance found: class=" + + class1); + final C ret = it.next(); + if (it.hasNext()) + throw new RuntimeException("More than one instance exists: class=" + + class1); + return ret; + } + + /** + * Return an iterator visiting references to all nodes of the given type + * (recursive, including annotations). + * + * @param op + * The root of the operator tree. + * @param clas + * The type of the node to be extracted. + * + * @return A iterator visiting those references. + * + * @see #toList(BOp, Class) + */ @SuppressWarnings("unchecked") public static <C> Iterator<C> visitAll(final BOp op, final Class<C> clas) { @@ -947,9 +989,9 @@ } finally { - if (itr instanceof ICloseableIterator<?>) { + if (itr instanceof ICloseable) { - ((ICloseableIterator<?>) itr).close(); + ((ICloseable) itr).close(); } Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/CoreBaseBOp.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -75,6 +75,7 @@ * <p> * {@inheritDoc} */ + @Override public CoreBaseBOp clone() { final Class<? extends CoreBaseBOp> cls = getClass(); final Constructor<? extends CoreBaseBOp> ctor; @@ -98,14 +99,15 @@ * General contract is a short (non-recursive) representation of the * {@link BOp}. */ + @Override public String toShortString() { final BOp t = this; if (t instanceof IValueExpression<?> || t instanceof IValueExpressionNode) { /* - * Note: toString() is intercepted for a few bops, mainly those with - * a pretty simple structure. This delegates to toString() in those - * cases. + * Note: toShortString() is intercepted for a few bops, mainly those + * with a pretty simple structure. This delegates to toString() in + * those cases. */ return t.toString(); } else { @@ -125,6 +127,7 @@ * Return a non-recursive representation of the arguments and annotations * for this {@link BOp}. */ + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -181,6 +184,7 @@ } } + @Override final public Object getRequiredProperty(final String name) { final Object tmp = getProperty(name); @@ -193,6 +197,7 @@ } + @Override @SuppressWarnings("unchecked") final public <T> T getProperty(final String name, final T defaultValue) { @@ -229,18 +234,22 @@ } + @Override final public int getId() { return (Integer) getRequiredProperty(Annotations.BOP_ID); } - + + @Override final public boolean isController() { - - return getProperty(Annotations.CONTROLLER, false); - + + return getProperty(Annotations.CONTROLLER, + Annotations.DEFAULT_CONTROLLER); + } + @Override final public BOpEvaluationContext getEvaluationContext() { return getProperty(Annotations.EVALUATION_CONTEXT, @@ -251,6 +260,7 @@ /** * <code>true</code> if all arguments and annotations are the same. */ + @Override public boolean equals(final Object other) { if (this == other) @@ -378,6 +388,7 @@ /** * The hash code is based on the hash of the operands (cached). */ + @Override public int hashCode() { int h = hash; Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop; /** @@ -5,6 +28,9 @@ */ public interface IdFactory { + /** + * Issue the next bop identifier. + */ public int nextId(); } Copied: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java (from rev 7746, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java) =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java (rev 0) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -0,0 +1,47 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.bop; + +import java.util.concurrent.atomic.AtomicInteger; + +public class SimpleIdFactory implements IdFactory { + + /** + * Note: The ids are assigned using {@link AtomicInteger#incrementAndGet()} + * so ONE (1) is the first id that will be assigned when we pass in ZERO (0) + * as the initial state of the {@link AtomicInteger}. + */ + private final AtomicInteger nextId = new AtomicInteger(0); + + /** + * {@inheritDoc} + */ + @Override + public int nextId() { + + return nextId.incrementAndGet(); + + } + +} Property changes on: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7608-7684 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7608-7746 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 /branches/ZK_DISCONNECT_HANDLING/bigdata/src/java/com/bigdata/bop/aggregate:7465-7484 Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/ap/SampleIndex.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -106,7 +106,13 @@ /** * Sample offsets are computed randomly. */ - RANDOM; + RANDOM, + /** + * The samples will be dense and may bave a front bias. This mode + * emphasizes the locality of the samples on the index pages and + * minimizes the IO associated with sampling. + */ + DENSE; } /** @@ -323,6 +329,9 @@ seed(), limit, accessPath.getFromKey(), accessPath .getToKey()); break; + case DENSE: + advancer = new DenseSampleAdvancer<E>(); + break; default: throw new UnsupportedOperationException("SampleType=" + sampleType); @@ -339,6 +348,23 @@ } /** + * Dense samples in key order (simple index scan). + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @param <E> + */ + private static class DenseSampleAdvancer<E> extends Advancer<E> { + + private static final long serialVersionUID = 1L; + + @Override + protected void advance(final ITuple<E> tuple) { + // NOP + } + + } + + /** * An advancer pattern which is designed to take evenly distributed samples * from an index. The caller specifies the #of tuples to be sampled. This * class estimates the range count of the access path and then computes the Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/constraint/Constraint.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -34,7 +34,7 @@ import com.bigdata.bop.IValueExpressionConstraint; /** - * BOpConstraint that wraps a {@link BooleanValueExpression}. + * {@link Constraint} wraps a {@link BooleanValueExpression}. */ public class Constraint<X> extends BOpBase implements IValueExpressionConstraint<X> { @@ -66,9 +66,8 @@ /** * Required shallow copy constructor. */ - public Constraint(final BOp[] args, - final Map<String, Object> anns) { - + public Constraint(final BOp[] args, final Map<String, Object> anns) { + super(args, anns); if (args.length != 1 || args[0] == null) @@ -83,6 +82,7 @@ super(op); } + @Override @SuppressWarnings("unchecked") public IValueExpression<X> getValueExpression() { @@ -90,6 +90,7 @@ } + @Override public boolean accept(final IBindingSet bs) { // try { Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/HTreeNamedSubqueryOp.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -73,7 +73,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class HTreeNamedSubqueryOp extends PipelineOp { +public class HTreeNamedSubqueryOp extends PipelineOp implements INamedSubqueryOp { static private final transient Logger log = Logger .getLogger(HTreeNamedSubqueryOp.class); @@ -151,7 +151,7 @@ } - public HTreeNamedSubqueryOp(final BOp[] args, NV... annotations) { + public HTreeNamedSubqueryOp(final BOp[] args, final NV... annotations) { this(args, NV.asMap(annotations)); @@ -164,6 +164,7 @@ } + @Override public FutureTask<Void> eval(final BOpContext<IBindingSet> context) { return new FutureTask<Void>(new ControllerTask(this, context)); @@ -266,6 +267,7 @@ /** * Evaluate. */ + @Override public Void call() throws Exception { try { @@ -356,6 +358,7 @@ } + @Override public Void call() throws Exception { // The subquery Copied: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java (from rev 7746, branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java) =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java (rev 0) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/INamedSubqueryOp.java 2014-01-08 15:14:09 UTC (rev 7747) @@ -0,0 +1,42 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2010. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.bop.controller; + +import com.bigdata.bop.join.SolutionSetHashJoinOp; + +/** + * Marker interface for named subquery evaluation. Solutions from the pipeline + * flow through this operator without modification. The subquery is evaluated + * exactly once, the first time this operator is invoked, and the solutions for + * the subquery are written onto a hash index. Those solutions are then joined + * back within the query at latter points in the query plan using a solution set + * hash join. + * + * @see SolutionSetHashJoinOp + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public interface INamedSubqueryOp { + +} Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/controller/JVMNamedSubqueryOp.java 2014-01-07 22:10:34 UTC (rev 7746) +++ branches/MGC_1_3_0/... [truncated message content] |
From: <jer...@us...> - 2014-01-07 22:10:41
|
Revision: 7746 http://bigdata.svn.sourceforge.net/bigdata/?rev=7746&view=rev Author: jeremy_carroll Date: 2014-01-07 22:10:34 +0000 (Tue, 07 Jan 2014) Log Message: ----------- Added tests (both runtime and static optimizer) for trac 794 to do with SERVICE and BIND Also modified comment in ASTJoinOrderByTypeOptimizer which is doing the wrong thing. Tests currently disabled since the fix is not yet in. (Note the fix is non-trivial, but not conceptually hard either) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-01-07 18:34:06 UTC (rev 7745) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -73,7 +73,7 @@ public IQueryNode optimize(final AST2BOpContext context, final IQueryNode queryNode, final IBindingSet[] bindingSet) { - if (bindingSet == null || bindingSet.length > 1) { + if (bindingSet == null || bindingSet.length != 1) { /* * Used iff there is only one input solution. * Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java 2014-01-07 18:34:06 UTC (rev 7745) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTJoinOrderByTypeOptimizer.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -183,17 +183,20 @@ * * Required joins: * - * 3. Service calls (Bigdata SEARCH) + * 3. Some Service calls (e.g. Bigdata SEARCH) * 4. Subquery-includes * 5. Statement patterns * 7. Sparql11 subqueries * 8. Non-optional subgroups + * 9. Other service calls * + * TODO: the placement of OPTIONALS should really be more complicated than this. + * e.g. consider interaction with SERVICE calls etc. * Optional joins: - * 9. Simple optionals & optional subgroups + * 10. Simple optionals & optional subgroups * - * 10. Assignments - * 11. Post-conditionals + * 11. Assignments + * 12. Post-conditionals * </pre> * Most of this logic was lifted out of {@link AST2BOpUtility}. * <p> Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2014-01-07 18:34:06 UTC (rev 7745) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/AbstractOptimizerTestCase.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -64,6 +64,7 @@ import com.bigdata.rdf.sparql.ast.UnionNode; import com.bigdata.rdf.sparql.ast.VarNode; import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; +import com.bigdata.rdf.sparql.ast.service.ServiceNode; public abstract class AbstractOptimizerTestCase extends AbstractASTEvaluationTestCase { @@ -426,6 +427,18 @@ protected FilterNode filter(IValueExpressionNode f) { return new FilterNode(f); } + + protected IValueExpressionNode functionNode(String uri, ValueExpressionNode ... args) { + return new FunctionNode(new URIImpl(uri), null, args); + } + + protected ServiceNode service(TermNode serviceRef, GraphPatternGroup<IGroupMemberNode> groupNode) { + return new ServiceNode(serviceRef, groupNode); + } + + protected AssignmentNode bind(IValueExpressionNode valueNode, VarNode varNode) { + return new AssignmentNode(varNode, valueNode); + } } public AbstractOptimizerTestCase(String name) { Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTMassagedServiceNodeOptimizer.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -0,0 +1,104 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +/** + * Trac 794 concerns interactions between BIND and SERVICE going to a remote + * SPARQL end-point. The service call must be done last. + */ +package com.bigdata.rdf.sparql.ast.optimizers; + + + + +public class TestASTMassagedServiceNodeOptimizer extends AbstractOptimizerTestCase { + + public TestASTMassagedServiceNodeOptimizer(String name) { + super(name); + } + + public TestASTMassagedServiceNodeOptimizer() { + } + @Override + IASTOptimizer newOptimizer() { + return new ASTJoinOrderByTypeOptimizer(); + } + + public void testLeaveBindBeforeService() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + bind(functionNode("eg:foo", varNode(x)), varNode(y) ), + service( constantNode(a), + joinGroupNode( statementPatternNode(varNode(z), constantNode(f), varNode(y)) ) ) + ) + ) ); + + + expected = select( varNode(z), + where ( + joinGroupNode( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + bind(functionNode("eg:foo", varNode(x)), varNode(y) ), + service( constantNode(a), + joinGroupNode( statementPatternNode(varNode(z), constantNode(f), varNode(y)) ) ) + ) + ) ); + + + }}.test(); + } + public void testPutBindBeforeService() { + + new Helper(){{ + + given = select( varNode(z), + where ( + joinGroupNode( + service( constantNode(a), + joinGroupNode( statementPatternNode(varNode(z), constantNode(f), varNode(y)) ) ), + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + bind(functionNode("eg:foo", varNode(x)), varNode(y) ) + ) + ) ); + + + expected = select( varNode(z), + where ( + joinGroupNode( + statementPatternNode(varNode(x), constantNode(c), constantNode(d)), + bind(functionNode("eg:foo", varNode(x)), varNode(y) ), + service( constantNode(a), + joinGroupNode( statementPatternNode(varNode(z), constantNode(f), varNode(y)) ) ) + ) + ) ); + + + }}.test(); + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-01-07 18:34:06 UTC (rev 7745) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractProtocolTest.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -114,6 +114,10 @@ }; private RequestFactory requestFactory = GET; + + protected RequestFactory getRequestFactory() { + return requestFactory; + } @Override public void setUp() throws Exception { super.setUp(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2014-01-07 18:34:06 UTC (rev 7745) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -233,6 +233,8 @@ suite.addTestSuite(TestInsertFilterFalse727.class); suite.addTestSuite(TestCBD731.class); + suite.addTestSuite(TestService794.class); + // SPARQL UPDATE test suite. switch(testMode) { Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestService794.java 2014-01-07 22:10:34 UTC (rev 7746) @@ -0,0 +1,180 @@ +/** +Copyright (C) SYSTAP, LLC 2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +package com.bigdata.rdf.sail.webapp; + +import java.io.IOException; + +import junit.framework.Test; + + +/** + * While writing this comment, early January 2014, + * the status is that the two interesting tests + * both fail and are disabled + * {@link #xtestMassageServiceCall()} and {@link #xtestMassageServiceNested1Call()} + * + * Also {@link #xtestServiceSyntaxError()} shows some bug some where in that + * we take legal SPARQL and make it illegal before the service call .... + * + * Some of the other tests show how to use a subselect as a workaround. + * @author jeremycarroll + * + */ +public class TestService794 extends AbstractProtocolTest { + + public TestService794(String name) { + super(name); + } + + + static public Test suite() { + return ProxySuiteHelper.suiteWhenStandalone(TestService794.class,"test.*", + TestMode.quads,TestMode.sids, + TestMode.triples); + } + + /** + * Execute an ASK query including a SERVICE keyword which gets sent back to this server. + * The test succeeeds if the query returns true, and fails otherwise + * @param args + * @throws IOException + */ + private void abstactAskService(String ... args) throws IOException { + + setMethodisPostUrlEncodedData(); + serviceRequest("update","PREFIX eg: <http://example.com/a#> INSERT { eg:a eg:p \"rs123\" ; eg:q 123, 100 } WHERE {}"); + + StringBuilder bld = new StringBuilder(); + // Set the base URI to be our sparql end point, for re-entrant queries, + // using the idiom SERVICE <> + bld.append("base <"); + bld.append(m_serviceURL); + bld.append("/sparql>"); + for (String arg:args) { + bld.append('\n'); + bld.append(arg); + } + + System.err.println(bld.toString()); + String result = serviceRequest("query",bld.toString()); + System.err.println(result); + assertTrue(result.contains("true")); + + } + + /** + * @throws IOException + */ + public void testSimpleServiceCall() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "ASK {", + "?x eg:p ?y ", + " SERVICE <> {", + " FILTER ( true )", + "{ SELECT ?x ?y {", + "?x eg:p ?y ", + "} ORDER BY ?y LIMIT 1 }", + "} }"); + } + + /** + * This one is currently broken, see trac794 + * + * Note also there is something unintersting with syntax + * going wrong with some expressions like + * SERVICE <> { + * { SELECT * { + * ?x eg:q ?y + * } + * } + * } + */ + public void xtestMassageServiceCall() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>", + "ASK {", + "?x eg:p ?y ", + "BIND (xsd:integer(substr(?y,3)) as ?yy )", + " SERVICE <> {", + " FILTER (true )", + "{ SELECT ?x ?yy {", + "?x eg:q ?yy ", + "} ORDER BY ?yy LIMIT 1 }", + "} }"); + } + /** + * @throws IOException + */ + public void xtestMassageServiceNested1Call() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>", + "ASK {", + "{ ?x eg:p ?y ", + "BIND (xsd:integer(substr(?y,3)) as ?yy ) }", + " SERVICE <> {", + "{ SELECT ?x ?yy {", + "?x eg:q ?yy ", + "} ORDER BY ?yy LIMIT 1 }", + "} }"); + } + /** + * @throws IOException + */ + public void testMassageServiceNested2Call() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>", + "ASK {", + "{ SELECT ?x ?yy ", + " { ?x eg:p ?y ", + " BIND (xsd:integer(substr(?y,3)) as ?yy ) } }", + " SERVICE <> {", + "{ SELECT ?x ?yy {", + "?x eg:q ?yy ", + "} ORDER BY ?yy LIMIT 1 }", + "} }"); + } + public void testMassageServiceNested3Call() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>", + "ASK {", + "{ SELECT ?x (xsd:integer(substr(?y,3)) as ?yy ) ", + " { ?x eg:p ?y } }", + " SERVICE <> {", + "{ SELECT ?x ?yy {", + "?x eg:q ?yy ", + "} ORDER BY ?yy LIMIT 1 }", + "} }"); + } + public void xtestServiceSyntaxError() throws IOException { + abstactAskService("PREFIX eg: <http://example.com/a#>", + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>", + "ASK {", + "{ SELECT ?x (xsd:integer(substr(?y,3)) as ?yy ) ", + " { ?x eg:p ?y } }", + " SERVICE <> {", + "{ SELECT * {", + "?x eg:q ?yy ", + "} ORDER BY ?yy LIMIT 1 }", + "} }"); + } +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-07 18:34:13
|
Revision: 7745 http://bigdata.svn.sourceforge.net/bigdata/?rev=7745&view=rev Author: thompsonbry Date: 2014-01-07 18:34:06 +0000 (Tue, 07 Jan 2014) Log Message: ----------- Modified the QueryEngine to support a listener interface that allows unit tests to hook the IRunningQuery object in order to observe details about the manner in which the query was actually executed (BOpStats, IQueryAttributes), check for proper release of native memory, etc. More work on the RTO integration. See #64 (RTO) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-01-07 18:30:50 UTC (rev 7744) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-01-07 18:34:06 UTC (rev 7745) @@ -38,6 +38,7 @@ import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -74,6 +75,7 @@ import com.bigdata.resources.IndexManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; +import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.util.concurrent.IHaltable; @@ -1886,10 +1888,21 @@ */ protected void halt(final AbstractRunningQuery q) { + boolean interrupted = false; lock.lock(); try { + // notify listener(s) + try { + fireEvent(q); + } catch (Throwable t) { + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + // Defer impact until outside of this critical section. + interrupted = true; + } + } + // insert/touch the LRU of recently finished queries. doneQueries.put(q.getQueryId(), q.getFuture()); @@ -1909,6 +1922,9 @@ } + if (interrupted) + Thread.currentThread().interrupt(); + } /** @@ -1923,7 +1939,8 @@ * @throws RuntimeException * if the query halted with an error. */ - private void handleDoneQuery(final UUID queryId,final Future<Void> doneQueryFuture) { + private void handleDoneQuery(final UUID queryId, + final Future<Void> doneQueryFuture) { try { // Check the Future. doneQueryFuture.get(); @@ -1945,6 +1962,90 @@ } } + /** + * Listener API for {@link IRunningQuery} life cycle events (start/halt). + * <p> + * Note: While this interface makes it possible to catch the start and halt + * of an {@link IRunningQuery}, it imposes an overhead on the query engine + * and the potential for significant latency and other problems depending on + * the behavior of the {@link IRunningQueryListener}. This interface was + * added to facilitate certain test suites which could not otherwise be + * written. It should not be used for protection code. + */ + public interface IRunningQueryListener { + + void notify(IRunningQuery q); + + } + + /** Registered listeners. */ + private final CopyOnWriteArraySet<IRunningQueryListener> listeners = new CopyOnWriteArraySet<IRunningQueryListener>(); + + /** Add a query listener. */ + public void addListener(final IRunningQueryListener l) { + + if (l == null) + throw new IllegalArgumentException(); + + listeners.add(l); + + } + + /** Remove a query listener. */ + public void removeListener(final IRunningQueryListener l) { + + if (l == null) + throw new IllegalArgumentException(); + + listeners.remove(l); + + } + + /** + * Send an event to all registered listeners. + */ + private void fireEvent(final IRunningQuery q) { + + if (q == null) + throw new IllegalArgumentException(); + + if(listeners.isEmpty()) { + + // NOP + return; + + } + + final IRunningQueryListener[] a = listeners + .toArray(new IRunningQueryListener[0]); + + for (IRunningQueryListener l : a) { + + final IRunningQueryListener listener = l; + + try { + + // send event. + listener.notify(q); + + } catch (Throwable t) { + + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + + // Propagate interrupt. + throw new RuntimeException(t); + + } + + // Log and ignore. + log.error(t, t); + + } + + } + + } + /* * RunningQuery factory. */ Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-07 18:30:50 UTC (rev 7744) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-07 18:34:06 UTC (rev 7745) @@ -28,6 +28,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -39,11 +40,14 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.BOpEvaluationContext; import com.bigdata.bop.BOpIdFactory; +import com.bigdata.bop.BadBOpIdTypeException; +import com.bigdata.bop.DuplicateBOpIdException; import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; import com.bigdata.bop.IValueExpression; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; +import com.bigdata.bop.NoBOpIdException; import com.bigdata.bop.PipelineOp; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.ap.SampleIndex.SampleType; @@ -97,6 +101,19 @@ */ public class AST2BOpRTO extends AST2BOpJoins { + public interface Annotations extends AST2BOpJoins.Annotations { + + /** + * Annotation is used to tag the {@link StatementPatternNode}s in a + * {@link JoinGroupNode} with the identified assigned to the + * corresponding {@link IPredicate}. This makes it possible to lookup + * the SPO from the {@link IPredicate} when the RTO hands us back an + * ordered join path. + */ + String PREDICATE_ID = "PredicateId"; + + } + /** * When <code>true</code>, the RTO will only accept simple joins into the * join graph. Simple joins includes triples-mode joins and filters that do @@ -222,7 +239,7 @@ // final Set<StatementPatternNode> sps = new LinkedHashSet<StatementPatternNode>(); // The predicates for the join graph. @SuppressWarnings("rawtypes") - final Set<Predicate> preds = new LinkedHashSet<Predicate>(); + final LinkedList<Predicate> preds = new LinkedList<Predicate>(); // The constraints for the join graph. final List<IConstraint> constraints = new LinkedList<IConstraint>(); // The #of JOINs accepted into the RTO's join group. @@ -296,7 +313,6 @@ // Something the RTO can handle. sp = (StatementPatternNode) sp.clone();// TODO Use destructive move. -// sp.setId(ctx.nextId()); // assign id so we can reference back later. rtoJoinGroup.addChild(sp); // add to group. naccepted++; /* @@ -310,17 +326,24 @@ * when we take the selected join path from the RTO and * compile it into a query plan to fully execute the join * group. + * + * Note: This assigns ids to the predicates as a + * side-effect. Those ids are assigned by the + * AST2BOpContext's BOpIdFactory. You can not rely on + * specific ID values being assigned, so you need to build a + * map and track the correspondence between the SPs and the + * predicates. */ - final Predicate<?> pred = AST2BOpUtility.toPredicate(sp, ctx); - // final int joinId = ctx.nextId(); - // - // // annotations for this join. - // final List<NV> anns = new LinkedList<NV>(); - // - // anns.add(new NV(BOp.Annotations.BOP_ID, joinId)); + final Predicate<?> pred = AST2BOpUtility.toPredicate(sp, + ctx); preds.add(pred); + // tag the SP with predicate's ID. + sp.setProperty(Annotations.PREDICATE_ID, pred.getId()); if (attachedConstraints != null) { - // RTO will figure out where to attach these constraints. + /* + * The RTO will figure out where to attach these + * constraints. + */ constraints.addAll(attachedConstraints); } @@ -492,8 +515,8 @@ final JoinGroupNode rtoJoinGroup = (JoinGroupNode) joinGraph .getRequiredProperty(JoinGraph.Annotations.JOIN_GROUP); -// // Build an index over the bopIds in that JoinGroupNode. -// final Map<Integer, BOp> index = getIndex(rtoJoinGroup); + // Build an index over the bopIds in that JoinGroupNode. + final Map<Integer, StatementPatternNode> index = getIndex(rtoJoinGroup); // Factory avoids reuse of bopIds assigned to the predicates. final BOpIdFactory idFactory = new BOpIdFactory(); @@ -530,34 +553,16 @@ final boolean optional = pred.isOptional(); - /* - * Lookup the AST node for that predicate. - * - * Note: The predicates are assigned bopIds by the RTO starting with - * ONE (1). Therefore we substract out ONE from the predicate's id - * to find its index into the join group. - * - * TODO This assumes that the join group does not contain anything - * other than the SPs for the predicates that we are using., - * - * TODO HINTS: The Predicate's query hints should the hints for that - * specific join (aks the SP or other type of IJoinNode), not the - * hints for the JoinGroupNode or the JoinGraph operator. We could - * just pass the AST nodes themselves from the JoinGroupNode. That - * might make things easier, even if it make the query serialization - * fatter on a cluster. - */ -// final ASTBase astNode = (ASTBase) index.get(pred.getId()); + // Lookup the AST node for that predicate. + final StatementPatternNode sp = index.get(pred.getId()); - final ASTBase astNode = (ASTBase) rtoJoinGroup.get(pred.getId() - 1); - left = join(left, // pred, // optional ? new LinkedHashSet<IVariable<?>>(doneSet) : doneSet, // attachedJoinConstraints == null ? null : Arrays .asList(attachedJoinConstraints),// - astNode.getQueryHints(),// + sp.getQueryHints(),// ctx); } @@ -616,61 +621,60 @@ } -// /** -// * Return an index from the {@link BOp.Annotations#BOP_ID} to the -// * {@link BOp}. -// * <p> -// * {@link BOp}s should form directed acyclic graphs, but this is not -// * strictly enforced. The recursive traversal iterators declared by this -// * class do not protect against loops in the operator tree. However, -// * {@link #getIndex(BOp)} detects and report loops based on duplicate -// * {@link Annotations#BOP_ID}s -or- duplicate {@link BOp} references. -// * -// * @param op -// * A {@link BOp}. -// * -// * @return The index, which is immutable and thread-safe. -// * -// * @throws DuplicateBOpIdException -// * if there are two or more {@link BOp}s having the same -// * {@link Annotations#BOP_ID}. -// * @throws BadBOpIdTypeException -// * if the {@link Annotations#BOP_ID} is not an {@link Integer}. -// * @throws NoBOpIdException -// * if a {@link PipelineOp} does not have a -// * {@link Annotations#BOP_ID}. -// */ -// static private Map<Integer,BOp> getIndex(final JoinGroupNode op) { -// if(op == null) -// throw new IllegalArgumentException(); -// final LinkedHashMap<Integer, BOp> map = new LinkedHashMap<Integer, BOp>(); -// final Iterator<BOp> itr = op.argIterator(); -// while (itr.hasNext()) { -// final BOp t = itr.next(); -//// if(!(t instanceof PipelineOp)) -//// throw new NotPipelineOpException(t.toString()); -// final Object x = t.getProperty(BOp.Annotations.BOP_ID); -// if (x == null) { -// throw new NoBOpIdException(t.toString()); -// } -// if (!(x instanceof Integer)) { -// throw new BadBOpIdTypeException("Must be Integer, not: " -// + x.getClass() + ": " + BOp.Annotations.BOP_ID); -// } -// final Integer id = (Integer) t.getProperty(BOp.Annotations.BOP_ID); -// final BOp conflict = map.put(id, t); -// if (conflict != null) { -// /* -// * BOp appears more than once. This is not allowed for -// * pipeline operators. If you are getting this exception for -// * a non-pipeline operator, you should remove the bopId. -// */ -// throw new DuplicateBOpIdException("duplicate id=" + id -// + " for " + conflict + " and " + t); -// } -// } -// // wrap to ensure immutable and thread-safe. -// return Collections.unmodifiableMap(map); -// } + /** + * Return a map from the {@link Annotations#PREDICATE_ID} to the + * corresponding {@link StatementPatternNode}. + * + * @param op + * The join group. + * + * @return The index, which is immutable and thread-safe. + * + * @throws DuplicateBOpIdException + * if there are two or more {@link BOp}s having the same + * {@link Annotations#PREDICATE_ID}. + * @throws BadBOpIdTypeException + * if the {@link Annotations#PREDICATE_ID} is not an + * {@link Integer}. + * @throws NoBOpIdException + * if a {@link StatementPatternNode} does not have a + * {@link Annotations#PREDICATE_ID}. + */ + static private Map<Integer, StatementPatternNode> getIndex( + final JoinGroupNode op) { + if (op == null) + throw new IllegalArgumentException(); + final LinkedHashMap<Integer, StatementPatternNode> map = new LinkedHashMap<Integer, StatementPatternNode>(); + final Iterator<IGroupMemberNode> itr = op.iterator(); + while (itr.hasNext()) { + final BOp t = itr.next(); + if(!(t instanceof StatementPatternNode)) { + // Skip non-SP nodes. + continue; + } + final StatementPatternNode sp = (StatementPatternNode) t; + final Object x = t.getProperty(Annotations.PREDICATE_ID); + if (x == null) { + throw new NoBOpIdException(t.toString()); + } + if (!(x instanceof Integer)) { + throw new BadBOpIdTypeException("Must be Integer, not: " + + x.getClass() + ": " + Annotations.PREDICATE_ID); + } + final Integer id = (Integer) x; + final BOp conflict = map.put(id, sp); + if (conflict != null) { + /* + * BOp appears more than once. This is not allowed for + * pipeline operators. If you are getting this exception for + * a non-pipeline operator, you should remove the bopId. + */ + throw new DuplicateBOpIdException("duplicate id=" + id + + " for " + conflict + " and " + t); + } + } + // wrap to ensure immutable and thread-safe. + return Collections.unmodifiableMap(map); + } } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java 2014-01-07 18:30:50 UTC (rev 7744) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataAndSPARQLTestCase.java 2014-01-07 18:34:06 UTC (rev 7745) @@ -83,16 +83,26 @@ public abstract class AbstractDataAndSPARQLTestCase extends AbstractASTEvaluationTestCase { + public AbstractDataAndSPARQLTestCase() { + } + + public AbstractDataAndSPARQLTestCase(final String name) { + super(name); + } + public class AbsHelper { protected final String queryStr; + /** * This is the astContainer of the last query executed. */ protected ASTContainer astContainer; - public AbsHelper(String queryStr) { - this.queryStr = queryStr; + public AbsHelper(final String queryStr) { + + this.queryStr = queryStr; + } protected AbstractTripleStore getTripleStore() { @@ -101,17 +111,34 @@ } - protected void compareTupleQueryResults(final TupleQueryResult queryResult, final TupleQueryResult expectedResult, final boolean checkOrder) - throws QueryEvaluationException { - AbstractQueryEngineTestCase.compareTupleQueryResults(getName(), - "", store, astContainer, queryResult, expectedResult, - false, checkOrder); - } + protected void compareTupleQueryResults( + final TupleQueryResult queryResult, + final TupleQueryResult expectedResult, final boolean checkOrder) + throws QueryEvaluationException { + AbstractQueryEngineTestCase.compareTupleQueryResults(getName(), "", + store, astContainer, queryResult, expectedResult, false, + checkOrder); + + } - long loadData(final InputStream is, RDFFormat format, String uri) { - final RDFParser rdfParser = RDFParserRegistry.getInstance().get(format).getParser(); + /** + * Load data from an input stream. + * + * @param is + * The stream (required). + * @param format + * The format (required). + * @param uri + * The baseURL (required). + * @return The #of triples read from the stream. + */ + long loadData(final InputStream is, final RDFFormat format, + final String uri) { + final RDFParser rdfParser = RDFParserRegistry.getInstance() + .get(format).getParser(); + rdfParser.setValueFactory(store.getValueFactory()); rdfParser.setVerifyData(true); @@ -122,7 +149,12 @@ final AddStatementHandler handler = new AddStatementHandler(); - handler.setContext(new URIImpl(uri)); + if (getTripleStore().isQuads()) { + + // Set the default context. + handler.setContext(new URIImpl(uri)); + + } rdfParser.setRDFHandler(handler); @@ -170,7 +202,7 @@ public AddStatementHandler() { - buffer = new StatementBuffer<Statement>(store, 100/* capacity */); + buffer = new StatementBuffer<Statement>(store, 1000/* capacity */); } @@ -180,6 +212,7 @@ } + @Override public void handleStatement(final Statement stmt) throws RDFHandlerException { @@ -214,11 +247,4 @@ } - public AbstractDataAndSPARQLTestCase() { - } - - public AbstractDataAndSPARQLTestCase(String name) { - super(name); - } - } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java 2014-01-07 18:30:50 UTC (rev 7744) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/AbstractDataDrivenSPARQLTestCase.java 2014-01-07 18:34:06 UTC (rev 7745) @@ -71,13 +71,11 @@ import java.net.URL; import java.util.LinkedHashSet; import java.util.Set; +import java.util.zip.GZIPInputStream; +import java.util.zip.ZipInputStream; import org.apache.log4j.Logger; -import org.openrdf.model.Resource; import org.openrdf.model.Statement; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.model.impl.URIImpl; import org.openrdf.query.GraphQueryResult; import org.openrdf.query.QueryEvaluationException; import org.openrdf.query.TupleQueryResult; @@ -90,23 +88,15 @@ import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.query.resultio.TupleQueryResultParser; import org.openrdf.rio.RDFFormat; -import org.openrdf.rio.RDFHandlerException; import org.openrdf.rio.RDFParser; import org.openrdf.rio.RDFParser.DatatypeHandling; -import org.openrdf.rio.RDFParserFactory; -import org.openrdf.rio.RDFParserRegistry; import org.openrdf.rio.Rio; -import org.openrdf.rio.helpers.RDFHandlerBase; import org.openrdf.rio.helpers.StatementCollector; import com.bigdata.bop.engine.AbstractQueryEngineTestCase; -import com.bigdata.rdf.model.StatementEnum; -import com.bigdata.rdf.rio.StatementBuffer; import com.bigdata.rdf.sail.sparql.Bigdata2ASTSPARQLParser; import com.bigdata.rdf.sparql.ast.ASTContainer; -import com.bigdata.rdf.sparql.ast.AbstractASTEvaluationTestCase; import com.bigdata.rdf.sparql.ast.QueryRoot; -import com.bigdata.rdf.sparql.ast.eval.AbstractDataAndSPARQLTestCase.AbsHelper; import com.bigdata.rdf.store.AbstractTripleStore; /** @@ -231,8 +221,9 @@ final String[] dataFileURLs, final String resultFileURL, final boolean checkOrder) throws Exception { - super(getResourceAsString(queryFileURL)); + super(getResourceAsString(queryFileURL)); + if (log.isInfoEnabled()) log.info("\ntestURI:\n" + testURI); @@ -527,12 +518,64 @@ */ protected long loadData(final String resource) { - return loadData(getResourceAsStream(resource), RDFFormat.forFileName(resource), new File(resource).toURI().toString()); + if (log.isInfoEnabled()) + log.info("Loading " + resource); + final String baseURL = new File(resource).toURI().toString(); + + InputStream is = null; + try { + + is = getResourceAsStream(resource); + + final RDFFormat rdfFormat = RDFFormat.forFileName(resource); + + if (rdfFormat == null) + throw new RuntimeException("Unknown format: resource=" + + resource); + + // final RDFFormat rdfFormat = guessFormat(new File(resource), + // null/* default */); + + return loadData(is, rdfFormat, baseURL); + + } finally { + if (is != null) { + try { + is.close(); + } catch (IOException e) { + log.error("Could not close: resource=" + resource, e); + } + is = null; + } + } + + } } +// private static RDFFormat guessFormat(final File file, +// final RDFFormat defaultFormat) { +// +// final String n = file.getName(); +// +// RDFFormat fmt = RDFFormat.forFileName(n); +// +// if (fmt == null && n.endsWith(".zip")) { +// fmt = RDFFormat.forFileName(n.substring(0, n.length() - 4)); +// } +// +// if (fmt == null && n.endsWith(".gz")) { +// fmt = RDFFormat.forFileName(n.substring(0, n.length() - 3)); +// } +// +// if (fmt == null) // fallback +// fmt = defaultFormat; +// +// return fmt; +// +// } private static InputStream getResourceAsStream(final String resource) { @@ -595,6 +638,20 @@ if (is == null) throw new RuntimeException("Not found: " + resource); + if (resource.toLowerCase().endsWith(".gz")) { + + try { + is = new GZIPInputStream(is); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } else if (resource.toLowerCase().endsWith(".zip")) { + + is = new ZipInputStream(is); + + } + return is; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-07 18:28:32
|
Revision: 7743 http://bigdata.svn.sourceforge.net/bigdata/?rev=7743&view=rev Author: thompsonbry Date: 2014-01-07 18:28:20 +0000 (Tue, 07 Jan 2014) Log Message: ----------- Beginnings of test suite for the RTO. This test suite does not yet run in CI. See #64 (RTO). Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-U1.rdf branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-U1.rdf.gz branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BAR.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_BSBM.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/TestRTO_LUBM.java Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/AbstractRTOTestCase.java 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,211 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Sep 4, 2011 + */ + +package com.bigdata.rdf.sparql.ast.eval.rto; + +import java.util.Arrays; +import java.util.UUID; + +import org.apache.log4j.Logger; + +import com.bigdata.bop.BOpUtility; +import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.engine.IRunningQuery; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.engine.QueryEngine.IRunningQueryListener; +import com.bigdata.bop.fed.QueryEngineFactory; +import com.bigdata.bop.joinGraph.rto.JGraph; +import com.bigdata.bop.joinGraph.rto.JoinGraph; +import com.bigdata.bop.joinGraph.rto.Path; +import com.bigdata.bop.rdf.joinGraph.TestJoinGraphOnLubm; +import com.bigdata.journal.IBTreeManager; +import com.bigdata.rdf.sparql.ast.ASTContainer; +import com.bigdata.rdf.sparql.ast.QueryHints; +import com.bigdata.rdf.sparql.ast.eval.AbstractDataDrivenSPARQLTestCase; +import com.bigdata.rdf.sparql.ast.optimizers.IASTOptimizer; + +/** + * Data driven test suite for the Runtime Query Optimizer (RTO). + * <p> + * Note: We reduce the stochastic behavior of the algorithm by using non-random + * sampling techniques. However, the main correctness issues for the RTO are the + * handling of different kinds of join groups, not the specific join orderings + * that it produces. The join orderings depend on how the cutoff joins are + * sampled. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: TestBasicQuery.java 6440 2012-08-14 17:57:33Z thompsonbry $ + * + * @see JGraph + * @see JoinGraph + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/64">Runtime + * Query Optimization</a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/258">Integrate + * RTO into SAIL</a> + * + * TODO See the stubbed out test suite for the RTO for some examples of + * join groups that it should be handling. + * + * TODO The RTO also needs to handle FILTERs that require materialization. + * This should be the subject of a test suite. + * + * TODO The RTO should be extended (together with test coverage) to handle + * more interesting kinds of join groups (optionls, sub-selects, property + * paths, SERVICE calls, etc). + * <p> + * Note: When handling sub-groups, etc., the RTO needs to flow solutions + * into the sub-query. + * + * TODO When adding an {@link IASTOptimizer} for the RTO, modify this class + * to test for the inclusion of the JoinGraphNode for the RTO. + * + * TODO Automate the larger data scale tests on these data sets as part of + * CI and provide automated reporting over time on those performance runs. + * Once this is done, there will be no more reason to keep the older + * {@link TestJoinGraphOnLubm} and related tests. + */ +public class AbstractRTOTestCase extends AbstractDataDrivenSPARQLTestCase { + + private final static Logger log = Logger.getLogger(AbstractRTOTestCase.class); + + /** + * + */ + public AbstractRTOTestCase() { + } + + /** + * @param name + */ + public AbstractRTOTestCase(String name) { + super(name); + } + + /** + * Helper class supports inspection of the terminated {@link IRunningQuery}. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + protected static class MyQueryListener implements IRunningQueryListener { + + private final UUID queryId; + private volatile IRunningQuery q; + + public MyQueryListener(final UUID queryId) { + + if(queryId == null) + throw new IllegalArgumentException(); + + this.queryId = queryId; + + } + + @Override + public void notify(final IRunningQuery q) { + + if(q.getQueryId().equals(queryId)) { + + this.q = q; + + } + + } + + public IRunningQuery getRunningQuery() { + + final IRunningQuery q = this.q; + + if (q == null) + fail("Not found."); + + return q; + + } + + } + + /** + * Helper to run the test and examine the RTO determined solution. + * + * @param expected + * The expected join ordering. + * @param helper + */ + protected void assertSameJoinOrder(final int[] expected, + final TestHelper helper) throws Exception { + + /* + * Assign a UUID to this query so we can get at its outcome. + */ + final UUID queryId = UUID.randomUUID(); + + helper.getASTContainer().setQueryHint(QueryHints.QUERYID, + queryId.toString()); + + final QueryEngine queryEngine = QueryEngineFactory + .getExistingQueryController((IBTreeManager) helper + .getTripleStore().getIndexManager()); + + // Hook up our listener and run the test. + final ASTContainer astContainer; + final MyQueryListener l = new MyQueryListener(queryId); + try { + // Register the listener. + queryEngine.addListener(l); + // Run the test. + astContainer = helper.runTest(); + } finally { + // Unregister the listener. + queryEngine.removeListener(l); + } + +// final QueryRoot optimizedAST = astContainer.getOptimizedAST(); + + final PipelineOp queryPlan = astContainer.getQueryPlan(); + + final JoinGraph joinGraph = BOpUtility.getOnly(queryPlan, + JoinGraph.class); + + assertNotNull(joinGraph); + + // The join path selected by the RTO. + final Path path = joinGraph.getPath(l.getRunningQuery()); + + if (log.isInfoEnabled()) + log.info("path=" + path); + + if (!Arrays.equals(expected, path.getVertexIds())) + fail("RTO JOIN ORDER" // + + ": expected=" + Arrays.toString(expected)// + + ", actual=" + Arrays.toString(path.getVertexIds())); + + // joinGraph.getQueryPlan(q) + + } + +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,17 @@ +PREFIX : <http://test/bar#> +#SELECT ?employeeNum (COUNT(?type) AS ?total) +SELECT (COUNT(*) as ?count) +WHERE { + # Control all RTO parameters for repeatable behavior. + hint:Group hint:optimizer "Runtime". + hint:Group hint:RTO-sampleType "DENSE". + hint:Group hint:RTO-limit "100". + hint:Group hint:RTO-nedges "1". + ?order a <http://test/bar#Order> . + ?order <http://test/bar#orderItems> ?item . + ?item <http://test/bar#beverageType> "Beer" . + ?item <http://test/bar#beverageType> ?type . + ?order <http://test/bar#employee> ?employee . + ?employee <http://test/bar#employeeNum> ?employeeNum . +} +#GROUP BY ?employeeNum Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BAR-Q1.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">8039</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,26 @@ +# BSBM Q1 on pc100. + +PREFIX bsbm-inst: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> + +SELECT (COUNT(DISTINCT *) as ?count) +#SELECT DISTINCT ?product ?label +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + ?product rdfs:label ?label . + ?product a <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType7> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature33> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature22> . + ?product bsbm:productPropertyNumeric1 ?value1 . + FILTER (?value1 > 282) + } +ORDER BY ?label +LIMIT 10 Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q1.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">1</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,27 @@ +# BSBM Q10 on pc100. + +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> +PREFIX dc: <http://purl.org/dc/elements/1.1/> + +SELECT DISTINCT ?offer ?price +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + ?offer bsbm:product <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product27> . + ?offer bsbm:vendor ?vendor . + ?offer dc:publisher ?vendor . + ?vendor bsbm:country <http://downlode.org/rdf/iso-3166/countries#US> . + ?offer bsbm:deliveryDays ?deliveryDays . + FILTER (?deliveryDays <= 3) + ?offer bsbm:price ?price . + ?offer bsbm:validTo ?date . + FILTER (?date > "2008-06-20T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime> ) +} +ORDER BY xsd:double(str(?price)) +LIMIT 10 Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q10.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,66 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="offer" /> + <variable name="price" /> + </head> + <results> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer1780</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">400.97</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer1257</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">2512.78</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer1138</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">2570.83</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer997</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">2590.78</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer233</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">2701.29</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer1636</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">9227.48</literal> + </binding> + </result> + <result> + <binding name="offer"> + <uri>http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromVendor1/Offer573</uri> + </binding> + <binding name="price"> + <literal datatype="http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/USD">9279.54</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,34 @@ +# BSBM Q2 on pc100. + +PREFIX bsbm-inst: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX dc: <http://purl.org/dc/elements/1.1/> + +SELECT (COUNT(*) as ?count) +#SELECT ?label ?comment ?producer ?productFeature ?propertyTextual1 ?propertyTextual2 ?propertyTextual3 +# ?propertyNumeric1 ?propertyNumeric2 ?propertyTextual4 ?propertyTextual5 ?propertyNumeric4 +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> rdfs:label ?label . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> rdfs:comment ?comment . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:producer ?p . + ?p rdfs:label ?producer . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> dc:publisher ?p . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productFeature ?f . + ?f rdfs:label ?productFeature . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyTextual1 ?propertyTextual1 . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyTextual2 ?propertyTextual2 . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyTextual3 ?propertyTextual3 . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyNumeric1 ?propertyNumeric1 . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyNumeric2 ?propertyNumeric2 . + OPTIONAL { <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyTextual4 ?propertyTextual4 } + OPTIONAL { <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyTextual5 ?propertyTextual5 } + OPTIONAL { <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer3/Product97> bsbm:productPropertyNumeric4 ?propertyNumeric4 } +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q2.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">30</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,31 @@ +# BSBM Q3 on pc100. + +PREFIX bsbm-inst: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> + +SELECT (COUNT(*) as ?count) +#SELECT ?product ?label +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + ?product rdfs:label ?label . + ?product a <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType11> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature54> . + ?product bsbm:productPropertyNumeric1 ?p1 . + FILTER ( ?p1 > 147 ) + ?product bsbm:productPropertyNumeric3 ?p3 . + FILTER (?p3 < 91 ) + OPTIONAL { + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature442> . + ?product rdfs:label ?testVar } + FILTER (!bound(?testVar)) +} +ORDER BY ?label +LIMIT 10 Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q3.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">0</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,37 @@ +# BSBM Q4 on pc100. + +PREFIX bsbm-inst: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> + +SELECT DISTINCT ?product ?label ?propertyTextual +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + { + ?product rdfs:label ?label . + ?product rdf:type <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType19> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature158> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature865> . + ?product bsbm:productPropertyTextual1 ?propertyTextual . + ?product bsbm:productPropertyNumeric1 ?p1 . + FILTER ( ?p1 > 217 ) + } UNION { + ?product rdfs:label ?label . + ?product rdf:type <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType19> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature158> . + ?product bsbm:productFeature <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductFeature872> . + ?product bsbm:productPropertyTextual1 ?propertyTextual . + ?product bsbm:productPropertyNumeric2 ?p2 . + FILTER ( ?p2> 124 ) + } +} +ORDER BY ?label +OFFSET 5 +LIMIT 10 \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q4.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,11 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="product" /> + <variable name="label" /> + <variable name="propertyTextual" /> + </head> + <results> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,29 @@ +# BSBM Q5 on pc100. + +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> + +SELECT (COUNT(DISTINCT *) as ?count) +#SELECT DISTINCT ?product ?productLabel +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + ?product rdfs:label ?productLabel . + FILTER (<http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product5> != ?product) + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product5> bsbm:productFeature ?prodFeature . + ?product bsbm:productFeature ?prodFeature . + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product5> bsbm:productPropertyNumeric1 ?origProperty1 . + ?product bsbm:productPropertyNumeric1 ?simProperty1 . + FILTER (?simProperty1 < (?origProperty1 + 120) && ?simProperty1 > (?origProperty1 - 120)) + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product5> bsbm:productPropertyNumeric2 ?origProperty2 . + ?product bsbm:productPropertyNumeric2 ?simProperty2 . + FILTER (?simProperty2 < (?origProperty2 + 170) && ?simProperty2 > (?origProperty2 - 170)) +} +ORDER BY ?productLabel +LIMIT 5 Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q5.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">1</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,39 @@ +# BSBM Q7 on pc100. + +PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> +PREFIX rev: <http://purl.org/stuff/rev#> +PREFIX foaf: <http://xmlns.com/foaf/0.1/> +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX dc: <http://purl.org/dc/elements/1.1/> + +SELECT (COUNT(*) as ?count) +#SELECT ?productLabel ?offer ?price ?vendor ?vendorTitle ?review ?revTitle +# ?reviewer ?revName ?rating1 ?rating2 +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product7> rdfs:label ?productLabel . + OPTIONAL { + ?offer bsbm:product <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product7> . + ?offer bsbm:price ?price . + ?offer bsbm:vendor ?vendor . + ?vendor rdfs:label ?vendorTitle . + ?vendor bsbm:country <http://downlode.org/rdf/iso-3166/countries#DE> . + ?offer dc:publisher ?vendor . + ?offer bsbm:validTo ?date . + FILTER (?date > "2008-06-20T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime> ) + } + OPTIONAL { + ?review bsbm:reviewFor <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product7> . + ?review rev:reviewer ?reviewer . + ?reviewer foaf:name ?revName . + ?review dc:title ?revTitle . + OPTIONAL { ?review bsbm:rating1 ?rating1 . } + OPTIONAL { ?review bsbm:rating2 ?rating2 . } + } +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q7.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">6</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,31 @@ +# BSBM Q8 on pc100. + +PREFIX bsbm: <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/> +PREFIX dc: <http://purl.org/dc/elements/1.1/> +PREFIX rev: <http://purl.org/stuff/rev#> +PREFIX foaf: <http://xmlns.com/foaf/0.1/> + +SELECT (COUNT(*) as ?count) +#SELECT ?title ?text ?reviewDate ?reviewer ?reviewerName ?rating1 ?rating2 ?rating3 ?rating4 +WHERE { + + # Control all RTO parameters for repeatable behavior. + hint:Query hint:optimizer "Runtime". + hint:Query hint:RTO-sampleType "DENSE". + hint:Query hint:RTO-limit "100". + hint:Query hint:RTO-nedges "1". + + ?review bsbm:reviewFor <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/dataFromProducer1/Product43> . + ?review dc:title ?title . + ?review rev:text ?text . + FILTER langMatches( lang(?text), "EN" ) + ?review bsbm:reviewDate ?reviewDate . + ?review rev:reviewer ?reviewer . + ?reviewer foaf:name ?reviewerName . + OPTIONAL { ?review bsbm:rating1 ?rating1 . } + OPTIONAL { ?review bsbm:rating2 ?rating2 . } + OPTIONAL { ?review bsbm:rating3 ?rating3 . } + OPTIONAL { ?review bsbm:rating4 ?rating4 . } +} +ORDER BY DESC(?reviewDate) +LIMIT 20 Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/BSBM-Q8.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">11</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,18 @@ +# LUBM Q2. +PREFIX ub: <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#> +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +#SELECT ?x ?y ?z +SELECT (COUNT(*) as ?count) +WHERE{ + # Control all RTO parameters for repeatable behavior. + hint:Group hint:optimizer "Runtime". + hint:Group hint:RTO-sampleType "DENSE". + hint:Group hint:RTO-limit "100". + hint:Group hint:RTO-nedges "1". + ?x a ub:GraduateStudent . + ?y a ub:University . + ?z a ub:Department . + ?x ub:memberOf ?z . + ?z ub:subOrganizationOf ?y . + ?x ub:undergraduateDegreeFrom ?y +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q2.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">0</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,17 @@ +# LUBM Q8. +PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +PREFIX ub: <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#> +#SELECT ?x ?y ?z +SELECT (COUNT(*) as ?count) +WHERE{ + # Control all RTO parameters for repeatable behavior. + hint:Group hint:optimizer "Runtime". + hint:Group hint:RTO-sampleType "DENSE". + hint:Group hint:RTO-limit "100". + hint:Group hint:RTO-nedges "1". + ?y a ub:Department . + ?x a ub:Student; + ub:memberOf ?y . + ?y ub:subOrganizationOf <http://www.University0.edu> . + ?x ub:emailAddress ?z . +} \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q8.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">6463</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.rq 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,18 @@ +# LUBM Q9. +PREFIX ub: <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#> +#SELECT ?x ?y ?z +SELECT (COUNT(*) as ?count) +WHERE { + # Control all RTO parameters for repeatable behavior. + hint:Group hint:optimizer "Runtime". + hint:Group hint:RTO-sampleType "DENSE". + hint:Group hint:RTO-limit "100". + hint:Group hint:RTO-nedges "1". + ?x a ub:Student . # v0 + ?y a ub:Faculty . # v1 + ?z a ub:Course . # v2 + ?x ub:advisor ?y . # v3 + ?y ub:teacherOf ?z . # v4 + ?x ub:takesCourse ?z . # v5 + FILTER (?x != ?y) +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-Q9.srx 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,14 @@ +<?xml version="1.0"?> +<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#"> + <head> + <variable name="count" /> + </head> + <results> + <result> + <binding name="count"> + <literal datatype="http://www.w3.org/2001/XMLSchema#integer">134</literal> + </binding> + </result> + </results> +</sparql> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-U1.rdf =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-U1.rdf (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/rto/LUBM-U1.rdf 2014-01-07 18:28:20 UTC (rev 7743) @@ -0,0 +1,201803 @@ +<?xml version="1.0" encoding="UTF-8"?> +<rdf:RDF + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:bigdata="http://www.bigdata.com/rdf#"> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Dean"> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Faculty"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Professor"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> + <intersectionOf xmlns="http://www.w3.org/2002/07/owl#" rdf:nodeID="t173"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#degreeFrom"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">has a degree from</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#degreeFrom"/> + <inverseOf xmlns="http://www.w3.org/2002/07/owl#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#hasAlumnus"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">university department</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Department"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Organization"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Director"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">director</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Director"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> + <intersectionOf xmlns="http://www.w3.org/2002/07/owl#" rdf:nodeID="t159"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#doctoralDegreeFrom"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">has a doctoral degree from</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#degreeFrom"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#doctoralDegreeFrom"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#emailAddress"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#DatatypeProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">can be reached at</label> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#emailAddress"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">Employee</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> + <intersectionOf xmlns="http://www.w3.org/2002/07/owl#" rdf:nodeID="t160"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Faculty"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">faculty member</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Faculty"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#FullProfessor"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">full professor</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Faculty"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#FullProfessor"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Professor"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateCourse"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">Graduate Level Courses</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Course"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateCourse"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Work"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">graduate student</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#GraduateStudent"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:nodeID="t163"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#hasAlumnus"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#University"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">has as an alumnus</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#hasAlumnus"/> + <inverseOf xmlns="http://www.w3.org/2002/07/owl#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#degreeFrom"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#headOf"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">is the head of</label> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#headOf"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#memberOf"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#worksFor"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Institute"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">institute</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Institute"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Organization"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#JournalArticle"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">journal article</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Article"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#JournalArticle"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Publication"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Lecturer"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">lecturer</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl"> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Ontology"/> + <comment xmlns="http://www.w3.org/2000/01/rdf-schema#">An university ontology for benchmark tests</comment> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">Univ-bench Ontology</label> + <versionInfo xmlns="http://www.w3.org/2002/07/owl#">univ-bench-ontology-owl, ver April 1, 2004</versionInfo> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AdministrativeStaff"> + <rdf:type rdf:resource="http://www.w3.org/2000/01/rdf-schema#Class"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#Class"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">administrative staff worker</label> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#AdministrativeStaff"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Employee"/> + <subClassOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.w3.org/2000/01/rdf-schema#Resource"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#advisor"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">is being advised by</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Professor"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#advisor"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#affiliatedOrganizationOf"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Organization"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">is affiliated with</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Organization"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#affiliatedOrganizationOf"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#affiliateOf"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ObjectProperty"/> + <domain xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Organization"/> + <label xmlns="http://www.w3.org/2000/01/rdf-schema#">is affiliated with</label> + <range xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#Person"/> + <subPropertyOf xmlns="http://www.w3.org/2000/01/rdf-schema#" rdf:resource="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#affiliateOf"/> +</rdf:Description> + +<rdf:Description rdf:about="http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#age"> + <rdf:type rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"/> + <rdf:type rdf:resource="http://www.w3.org/2002/07/owl#DatatypeProperty"/> + <domain xmlns="htt... [truncated message content] |
From: <tho...@us...> - 2014-01-07 18:24:47
|
Revision: 7742 http://bigdata.svn.sourceforge.net/bigdata/?rev=7742&view=rev Author: thompsonbry Date: 2014-01-07 18:24:40 +0000 (Tue, 07 Jan 2014) Log Message: ----------- @Overrides and javadoc. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2014-01-07 11:05:13 UTC (rev 7741) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2014-01-07 18:24:40 UTC (rev 7742) @@ -208,6 +208,10 @@ * will be no paths identified by the optimizer and the final path length * becomes zero. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/64">Runtime + * Query Optimization</a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/258">Integrate + * RTO into SAIL</a> * @see <a * href="http://www-db.informatik.uni-tuebingen.de/files/research/pathfinder/publications/rox-demo.pdf"> * ROX </a> @@ -286,7 +290,7 @@ * @todo unit test when the join graph has a single vertex. */ public JGraph(final IPredicate<?>[] v, final IConstraint[] constraints, - final SampleType sampleType) { + final SampleType sampleType) { if (v == null) throw new IllegalArgumentException(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2014-01-07 11:05:13 UTC (rev 7741) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2014-01-07 18:24:40 UTC (rev 7742) @@ -50,6 +50,7 @@ import com.bigdata.bop.ap.SampleIndex.SampleType; import com.bigdata.bop.controller.AbstractSubqueryOp; import com.bigdata.bop.engine.AbstractRunningQuery; +import com.bigdata.bop.engine.BOpStats; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.rdf.sparql.ast.IJoinNode; @@ -206,6 +207,9 @@ * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> + * + * TODO This could also be put on a {@link BOpStats} interface, + * which is the other way for accessing shared state. */ public interface Attributes { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java 2014-01-07 11:05:13 UTC (rev 7741) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Vertex.java 2014-01-07 18:24:40 UTC (rev 7742) @@ -83,6 +83,7 @@ } + @Override public String toString() { return "Vertex{pred=" + pred + ",sample=" + sample + "}"; @@ -92,6 +93,7 @@ /** * Equals is based on a reference test. */ + @Override public boolean equals(Object o) { return this == o; } @@ -100,6 +102,7 @@ * The hash code is just the {@link BOp.Annotations#BOP_ID} of the * associated {@link IPredicate}. */ + @Override public int hashCode() { return pred.getId(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-01-07 11:05:25
|
Revision: 7741 http://bigdata.svn.sourceforge.net/bigdata/?rev=7741&view=rev Author: martyncutcher Date: 2014-01-07 11:05:13 +0000 (Tue, 07 Jan 2014) Log Message: ----------- Change future checks to avoid deadlock if socket send blocks. It has not been possible to test this locally since failure has only been observed in CI on EC2 Modified Paths: -------------- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java Modified: branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-06 21:12:19 UTC (rev 7740) +++ branches/MGC_1_3_0/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2014-01-07 11:05:13 UTC (rev 7741) @@ -1944,8 +1944,14 @@ * also ignored. We want to continue this loop until * both Futures are done. Interrupts are not trapped, so * an interrupt will still exit the loop. + * + * It appears that it is possible for futSnd to be blocked + * and not generate an error. If we do not exit the loop + * and check the futRec future in this case then we coul loop + * continuously. This does rather beg the question of + * whether we should only be checking futRec at this stage. */ - while (!futSnd.isDone() || !futRec.isDone()) { + while (!(futSnd.isDone() || futRec.isDone())) { /* * Make sure leader's quorum token remains valid for * ALL writes. @@ -2491,7 +2497,7 @@ @Override public Void call() throws Exception { - + // wrap the messages together. final HAMessageWrapper wrappedMsg = new HAMessageWrapper( req, snd, msg); @@ -2524,8 +2530,11 @@ * also ignored. We want to continue this loop until * both Futures are done. Interrupts are not trapped, so * an interrupt will still exit the loop. + * + * TODO: check the comparative logic with this and robustReplicate + * to confirm the equivalence of checking the different futures. */ - while (!futRec.isDone() || !futRep.isDone()) { + while (!(futRec.isDone() || futRep.isDone())) { /* * The token must remain valid, even if this service * is not joined with the met quorum. If fact, @@ -2547,7 +2556,7 @@ } /* - * Note: Both futures are DONE at this point. However, + * Note: Both futures are DONE (or not - check condition above) at this point. However, * we want to check the remote Future for the downstream * service first in order to accurately report the * service that was the source of a pipeline replication This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 21:12:25
|
Revision: 7740 http://bigdata.svn.sourceforge.net/bigdata/?rev=7740&view=rev Author: thompsonbry Date: 2014-01-06 21:12:19 +0000 (Mon, 06 Jan 2014) Log Message: ----------- javadoc, @Override Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2014-01-06 19:10:46 UTC (rev 7739) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/store/DataLoader.java 2014-01-06 21:12:19 UTC (rev 7740) @@ -654,8 +654,11 @@ * Load from an input stream. * * @param is + * The input stream (required). * @param baseURL + * The base URL (required). * @param rdfFormat + * The format (required). * @return * @throws IOException */ @@ -671,11 +674,16 @@ } /** - * Load from a {@link URL}. + * Load from a {@link URL}. If in quads mode, the triples in the default + * graph will be inserted into the named graph associate with the specified + * <code>url</code>. * * @param url + * The URL (required). * @param baseURL + * The base URL (required). * @param rdfFormat + * The {@link RDFFormat} (required). * @return * @throws IOException */ @@ -898,7 +906,7 @@ if (fmt == null) // fallback fmt = rdfFormat; - + InputStream is = null; try { @@ -1007,7 +1015,8 @@ // loader.setFlush(false); // add listener to log progress. loader.addRioLoaderListener( new RioLoaderListener() { - + + @Override public void processingNotification( final RioLoaderEvent e ) { /* * This reports as statements are parsed. Depending on how @@ -1628,5 +1637,5 @@ ServiceProviderHook.forceLoad(); } - + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 19:10:55
|
Revision: 7739 http://bigdata.svn.sourceforge.net/bigdata/?rev=7739&view=rev Author: thompsonbry Date: 2014-01-06 19:10:46 +0000 (Mon, 06 Jan 2014) Log Message: ----------- Checkpoint on the RTO integration - See #64 (RTO). I have reworked the AST2BOpUtility, AST2BOpJoins, and AST2BOpFilters classes to support reuse of AST2BOpJoins#join() within the RTO and I have verified (for one query using FILTER (?x != ?y)) that the RTO will now generate a final query plan which includes conditional routing operators to materialize variables for filters. However, the sampling logic for cutoff joins has not yet been modified and will ignore any filters that require materialization (or perhaps just die on them). The old versus new integration point is configurable in AST2BOpRTO so the old behavior can be trivially restored. I have also added several static booleans into that class to allow us to gradually enable more capabilities in the RTO integration. The join graph test suite was not running. It is now hooked into CI. However, there is not much in there and at least part of it might be going away (for "fast join graphs"). The bop, AST, SPARQL evaluation test suites are all green. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/BOpIdFactory.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -1,5 +1,29 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop; +import java.util.Iterator; import java.util.LinkedHashSet; /** @@ -7,23 +31,102 @@ */ public class BOpIdFactory implements IdFactory { - private final LinkedHashSet<Integer> ids = new LinkedHashSet<Integer>(); - - private int nextId = 0; - - public void reserve(int id) { - ids.add(id); - } + /** The set of reserved bop identifiers. */ + private LinkedHashSet<Integer> ids; - public int nextId() { + private int nextId = 0; - while (ids.contains(nextId)) { + /** + * Reserve a bop id by adding it to a set of known identifiers that will not + * be issued by {@link #nextId()}. + * + * @param id + * The identifier. + */ + public void reserve(final int id) { + + synchronized (this) { + + if (ids == null) { - nextId++; - - } + // Lazily allocated. + ids = new LinkedHashSet<Integer>(); - return nextId++; - } - + ids.add(id); + + } + + } + + } + + @Override + public int nextId() { + + synchronized (this) { + + if (ids != null) { + + while (ids.contains(nextId)) { + + nextId++; + + } + + } + + return nextId++; + + } + + } + + /** + * Reserve ids used by the predicates or constraints associated with some + * join graph. + * + * @param preds + * The vertices of the join graph. + * @param constraints + * The constraints of the join graph (optional). + */ + public void reserveIds(final IPredicate<?>[] preds, + final IConstraint[] constraints) { + + if (preds == null) + throw new IllegalArgumentException(); + + final BOpIdFactory idFactory = this; + + for (IPredicate<?> p : preds) { + + idFactory.reserve(p.getId()); + + } + + if (constraints != null) { + + for (IConstraint c : constraints) { + + final Iterator<BOp> itr = BOpUtility + .preOrderIteratorWithAnnotations(c); + + while (itr.hasNext()) { + + final BOp y = itr.next(); + + final Integer anId = (Integer) y + .getProperty(BOp.Annotations.BOP_ID); + + if (anId != null) + idFactory.reserve(anId.intValue()); + + } + + } + + } + + } + } \ No newline at end of file Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/IdFactory.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop; /** @@ -5,6 +28,9 @@ */ public interface IdFactory { + /** + * Issue the next bop identifier. + */ public int nextId(); } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/SimpleIdFactory.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -0,0 +1,47 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.bop; + +import java.util.concurrent.atomic.AtomicInteger; + +public class SimpleIdFactory implements IdFactory { + + /** + * Note: The ids are assigned using {@link AtomicInteger#incrementAndGet()} + * so ONE (1) is the first id that will be assigned when we pass in ZERO (0) + * as the initial state of the {@link AtomicInteger}. + */ + private final AtomicInteger nextId = new AtomicInteger(0); + + /** + * {@inheritDoc} + */ + @Override + public int nextId() { + + return nextId.incrementAndGet(); + + } + +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/PartitionedJoinGroup.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -1018,24 +1018,25 @@ /* * Reserve ids used by the join graph or its constraints. */ - { - for (IPredicate<?> p : preds) { - idFactory.reserve(p.getId()); - } - if (constraints != null) { - for (IConstraint c : constraints) { - final Iterator<BOp> itr = BOpUtility - .preOrderIteratorWithAnnotations(c); - while (itr.hasNext()) { - final BOp y = itr.next(); - final Integer anId = (Integer) y - .getProperty(BOp.Annotations.BOP_ID); - if (anId != null) - idFactory.reserve(anId.intValue()); - } - } - } - } + idFactory.reserveIds(preds, constraints); +// { +// for (IPredicate<?> p : preds) { +// idFactory.reserve(p.getId()); +// } +// if (constraints != null) { +// for (IConstraint c : constraints) { +// final Iterator<BOp> itr = BOpUtility +// .preOrderIteratorWithAnnotations(c); +// while (itr.hasNext()) { +// final BOp y = itr.next(); +// final Integer anId = (Integer) y +// .getProperty(BOp.Annotations.BOP_ID); +// if (anId != null) +// idFactory.reserve(anId.intValue()); +// } +// } +// } +// } // figure out which constraints are attached to which predicates. final IConstraint[][] assignedConstraints = PartitionedJoinGroup Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JoinGraph.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -29,6 +29,7 @@ import java.util.LinkedHashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; @@ -37,11 +38,11 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.BOpContext; -import com.bigdata.bop.BOpIdFactory; import com.bigdata.bop.BOpUtility; import com.bigdata.bop.IBindingSet; import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IQueryAttributes; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; @@ -51,7 +52,12 @@ import com.bigdata.bop.engine.AbstractRunningQuery; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; -import com.bigdata.bop.joinGraph.PartitionedJoinGroup; +import com.bigdata.rdf.sparql.ast.IJoinNode; +import com.bigdata.rdf.sparql.ast.JoinGroupNode; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpContext; +import com.bigdata.rdf.sparql.ast.eval.AST2BOpRTO; +import com.bigdata.rdf.sparql.ast.optimizers.IASTOptimizer; +import com.bigdata.util.NT; import com.bigdata.util.concurrent.Haltable; import cutthecrap.utils.striterators.ICloseableIterator; @@ -88,10 +94,11 @@ */ public interface Annotations extends PipelineOp.Annotations { - /** - * The variables which are projected out of the join graph. - */ - String SELECTED = JoinGraph.class.getName() + ".selected"; + /** + * The variables to be projected out of the join graph (optional). When + * <code>null</code>, all variables will be projected out. + */ + String SELECTED = JoinGraph.class.getName() + ".selected"; /** * The vertices of the join graph, expressed an an {@link IPredicate}[] @@ -136,12 +143,65 @@ String DEFAULT_SAMPLE_TYPE = SampleType.RANDOM.name(); + /** + * The set of variables that are known to have already been materialized + * in the context in which the RTO was invoked. + * + * FIXME In order to support left-to-right evaluation fully, the + * {@link JGraph} needs to accept this, track it as it binds variables, + * and pass it through when doing cutoff joins to avoid pipeline + * materialization steps for variables that are already known to be + * materialized. Otherwise the RTO will assume that it needs to + * materialize everything that needs to be materialized for a FILTER and + * thus do too much work (which is basically the assumption of bottom-up + * evaluation, or if you prefer that it is executing in its own little + * world). + */ + String DONE_SET = JoinGraph.class.getName() + ".doneSet"; + +// /** +// * The query hints from the dominating AST node (if any). These query +// * hints will be passed through and made available when we compile the +// * query plan once the RTO has decided on the join ordering. While the +// * RTO is running, it needs to override many of the query hints for the +// * {@link IPredicate}s, {@link PipelineJoin}s, etc. in order to ensure +// * that the cutoff evaluation semantics are correctly applied while it +// * is exploring the plan state space for the join graph. +// */ +// String AST_QUERY_HINTS = JoinGraph.class.getName() + ".astQueryHints"; + + /** + * The AST {@link JoinGroupNode} for the joins and filters that we are + * running through the RTO (required). + * + * FIXME This should be set by an ASTRTOOptimizer. That class should + * rewrite the original join group, replacing some set of joins with a + * JoinGraphNode which implements {@link IJoinNode} and gets hooked into + * AST2BOpUtility#convertJoinGroup() normally rather than through + * expectional processing. This will simplify the code and adhere to the + * general {@link IASTOptimizer} pattern and avoid problems with cloning + * children out of the {@link JoinGroupNode} when we set it up to run + * the RTO. [Eventually, we will need to pass this in rather than the + * {@link IPredicate}[] in order to handle JOINs that are not SPs, e.g., + * sub-selects, etc.] + */ + String JOIN_GROUP = JoinGraph.class.getName() + ".joinGroup"; + + /** + * An {@link NT} object specifying the namespace and timestamp of the KB + * view against which the RTO is running. This is necessary in order to + * reconstruct the {@link AST2BOpContext} when it comes time to evaluate + * either a cutoff join involving filters that need materialization or + * the selected join path. + */ + String NT = JoinGraph.class.getName() + ".nt"; + } /** - * Query attribute names for the {@link JoinGraph}. The fully qualified name - * of the attribute is formed by appending the attribute name to the - * "bopId-", where <code>bopId</code> is the value returned by + * {@link IQueryAttributes} names for the {@link JoinGraph}. The fully + * qualified name of the attribute is formed by appending the attribute name + * to the "bopId-", where <code>bopId</code> is the value returned by * {@link BOp#getId()} * * @author <a href="mailto:tho...@us...">Bryan @@ -168,6 +228,10 @@ } + /* + * JoinGraph operator annotations. + */ + /** * @see Annotations#SELECTED */ @@ -223,6 +287,24 @@ } + /** + * Return the set of variables that are known to have already been + * materialized at the point in the overall query plan where the RTO is + * being executed. + * + * @see Annotations#DONE_SET + */ + @SuppressWarnings("unchecked") + public Set<IVariable<?>> getDoneSet() { + + return (Set<IVariable<?>>) getRequiredProperty(Annotations.DONE_SET); + + } + + /* + * IQueryAttributes + */ + /** * Return the computed join path. * @@ -301,15 +383,15 @@ super(args, anns); - // required property. - final IVariable<?>[] selected = (IVariable[]) getProperty(Annotations.SELECTED); + // optional property. +// final IVariable<?>[] selected = (IVariable[]) getProperty(Annotations.SELECTED); +// +// if (selected == null) +// throw new IllegalArgumentException(Annotations.SELECTED); +// +// if (selected.length == 0) +// throw new IllegalArgumentException(Annotations.SELECTED); - if (selected == null) - throw new IllegalArgumentException(Annotations.SELECTED); - - if (selected.length == 0) - throw new IllegalArgumentException(Annotations.SELECTED); - // required property. final IPredicate<?>[] vertices = (IPredicate[]) getProperty(Annotations.VERTICES); @@ -325,6 +407,18 @@ if (getNEdges() <= 0) throw new IllegalArgumentException(Annotations.NEDGES); + /* + * TODO Check DONE_SET, NT, JOIN_NODES. These annotations are required + * for the new code path. We should check for their presence. However, + * the old code path is used by some unit tests which have not yet been + * updated and do not supply these annotations. + */ +// // Required. +// getDoneSet(); +// +// // Required. +// getRequiredProperty(Annotations.NT); + if (!isController()) throw new IllegalArgumentException(); @@ -345,7 +439,6 @@ } - /** * Evaluation of a {@link JoinGraph}. * @@ -400,11 +493,11 @@ final Map<PathIds, EdgeSample> edgeSamples = new LinkedHashMap<PathIds, EdgeSample>(); // Find the best join path. - final Path p = g.runtimeOptimizer(context.getRunningQuery() + final Path path = g.runtimeOptimizer(context.getRunningQuery() .getQueryEngine(), getLimit(), getNEdges(), edgeSamples); // Set attribute for the join path result. - setPath(context.getRunningQuery(), p); + setPath(context.getRunningQuery(), path); // Set attribute for the join path samples. setSamples(context.getRunningQuery(), edgeSamples); @@ -413,20 +506,11 @@ final long elapsed_queryOptimizer = mark - begin; - // Factory avoids reuse of bopIds assigned to the predicates. - final BOpIdFactory idFactory = new BOpIdFactory(); - /* - * Generate the query from the join path. - * - * FIXME Update this using StaticAnalysis logic. Also, both this and - * the JGraph need to handle triples versus named graph versus - * default graph APs. Further, JGraph should handle filters that - * require conditional materialization. + * Generate the query from the selected join path. */ - final PipelineOp queryOp = PartitionedJoinGroup.getQuery(idFactory, - false/* distinct */, getSelected(), p.getPredicates(), - getConstraints()); + final PipelineOp queryOp = AST2BOpRTO.compileJoinGraph(context + .getRunningQuery().getQueryEngine(), JoinGraph.this, path); // Set attribute for the join path samples. setQueryPlan(context.getRunningQuery(), queryOp); @@ -482,8 +566,9 @@ /* * Run the query. * - * @todo pass in the source binding sets here and also when sampling the - * vertices. + * TODO Pass in the source binding sets here and also when sampling the + * vertices? Otherwise it is as if we are doing bottom-up evaluation (in + * which case the doneSet should be empty on entry). */ ICloseableIterator<IBindingSet[]> subquerySolutionItr = null; @@ -505,13 +590,9 @@ null/* mergeSolution */, null/* selectVars */, null/* constraints */, null/* stats */); -// System.out.println("nout=" + nout); - // verify no problems. runningSubquery.get(); -// System.out.println("Future Ok"); - } catch (Throwable t) { if (Haltable.isTerminationByInterrupt(t)) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/TestAll.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/TestAll.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -47,7 +47,7 @@ /** * @param arg0 */ - public TestAll(String arg0) { + public TestAll(final String arg0) { super(arg0); @@ -114,6 +114,9 @@ // high level query optimization and evaluation. suite.addTest(com.bigdata.bop.controller.TestAll.suite()); + // join graph processing (RTO, etc). + suite.addTest(com.bigdata.bop.joinGraph.TestAll.suite()); + /* * Note: This is tested later once we have gone through the core unit * tests for the services. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestAll.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -45,7 +45,7 @@ /** * @param arg0 */ - public TestAll(String arg0) { + public TestAll(final String arg0) { super(arg0); @@ -64,7 +64,8 @@ suite.addTestSuite(TestJoinGraph.class); // runtime query optimizer behavior. - suite.addTestSuite(TestJGraph.class); + // FIXME This test suite is empty. Either test at the AST eval level or add tests here. +// suite.addTestSuite(TestJGraph.class); return suite; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJGraph.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -60,15 +60,15 @@ /** * @param name */ - public TestJGraph(String name) { + public TestJGraph(final String name) { super(name); } - public void test_something() { - - fail("write tests"); - - } +// public void test_something() { +// +// fail("write tests"); +// +// } // /** // * Test ability to recognize when there is a predicate without any shared Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/rto/TestJoinGraph.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -34,6 +34,7 @@ import com.bigdata.bop.Constant; import com.bigdata.bop.IConstraint; import com.bigdata.bop.IPredicate; +import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.Var; import com.bigdata.bop.ap.Predicate; @@ -75,6 +76,7 @@ }; final IConstraint[] constraints = null; final JoinGraph joinGraph = new JoinGraph(new BOp[0],// + new NV(JoinGraph.Annotations.SELECTED, new IVariable[]{}),// new NV(JoinGraph.Annotations.VERTICES, vertices),// new NV(JoinGraph.Annotations.CONTROLLER, true), // new NV(JoinGraph.Annotations.EVALUATION_CONTEXT, Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -28,7 +28,6 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import com.bigdata.bop.BOp; @@ -38,6 +37,7 @@ import com.bigdata.bop.IdFactory; import com.bigdata.bop.NamedSolutionSetRefUtility; import com.bigdata.bop.PipelineOp; +import com.bigdata.bop.SimpleIdFactory; import com.bigdata.bop.engine.IRunningQuery; import com.bigdata.bop.engine.QueryEngine; import com.bigdata.bop.fed.QueryEngineFactory; @@ -79,7 +79,7 @@ public class AST2BOpContext implements IdFactory, IEvaluationContext { /** - * The {@link ASTContainer} + * The {@link ASTContainer} and never <code>null</code>. */ public final ASTContainer astContainer; @@ -88,7 +88,7 @@ * * @see #nextId() */ - private final AtomicInteger idFactory; + private final IdFactory idFactory; /** * The KB instance. @@ -368,8 +368,9 @@ /** * - * @param queryRoot - * The root of the query. + * @param astContainer + * The top-level {@link ASTContainer} for the query or update + * request to be evaluated (required). * @param db * The KB instance. * @@ -382,26 +383,32 @@ * {@link FunctionRegistry}. */ public AST2BOpContext(final ASTContainer astContainer, - final AbstractTripleStore db) { + final AbstractTripleStore db) { + this(astContainer, db, new SimpleIdFactory()); + + } + + // Note: Exposed to AST2BOpRTO + AST2BOpContext(final ASTContainer astContainer, + final AbstractTripleStore db, final IdFactory idFactory) { + if (astContainer == null) throw new IllegalArgumentException(); if (db == null) throw new IllegalArgumentException(); + if (idFactory == null) + throw new IllegalArgumentException(); + this.astContainer = astContainer; this.db = db; this.optimizers = new DefaultOptimizerList(); - /* - * Note: The ids are assigned using incrementAndGet() so ONE (1) is the - * first id that will be assigned when we pass in ZERO (0) as the - * initial state of the AtomicInteger. - */ - this.idFactory = new AtomicInteger(0); + this.idFactory = idFactory; this.queryEngine = QueryEngineFactory.getQueryController(db .getIndexManager()); @@ -496,7 +503,8 @@ @Override public int nextId() { - return idFactory.incrementAndGet(); + return idFactory.nextId(); +// return idFactory.incrementAndGet(); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -523,10 +523,11 @@ // remove any terms already materialized terms.removeAll(alreadyMaterialized); - if (c instanceof INeedsMaterialization && ((INeedsMaterialization) c).getRequirement() == Requirement.ALWAYS) { - - // add any new terms to the list of already materialized - alreadyMaterialized.addAll(terms); + if (c instanceof INeedsMaterialization + && ((INeedsMaterialization) c).getRequirement() == Requirement.ALWAYS) { + + // add any new terms to the list of already materialized + alreadyMaterialized.addAll(terms); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -220,7 +220,7 @@ anns.add(new NV(Predicate.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.SHARDED)); - pred = (Predicate) pred.setProperty( + pred = (Predicate<?>) pred.setProperty( Predicate.Annotations.REMOTE_ACCESS_PATH, false); } else { @@ -267,7 +267,7 @@ */ anns.add(new NV(Predicate.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.SHARDED)); - pred = (Predicate) pred.setProperty( + pred = (Predicate<?>) pred.setProperty( Predicate.Annotations.REMOTE_ACCESS_PATH, false); } else { anns.add(new NV(Predicate.Annotations.EVALUATION_CONTEXT, Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-06 17:19:27 UTC (rev 7738) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-06 19:10:46 UTC (rev 7739) @@ -26,6 +26,8 @@ */ package com.bigdata.rdf.sparql.ast.eval; +import java.util.Arrays; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -36,21 +38,33 @@ import com.bigdata.bop.BOp; import com.bigdata.bop.BOpEvaluationContext; +import com.bigdata.bop.BOpIdFactory; import com.bigdata.bop.IConstraint; +import com.bigdata.bop.IPredicate; import com.bigdata.bop.IValueExpression; import com.bigdata.bop.IVariable; import com.bigdata.bop.NV; import com.bigdata.bop.PipelineOp; import com.bigdata.bop.ap.Predicate; import com.bigdata.bop.ap.SampleIndex.SampleType; +import com.bigdata.bop.engine.QueryEngine; +import com.bigdata.bop.joinGraph.PartitionedJoinGroup; import com.bigdata.bop.joinGraph.rto.JGraph; import com.bigdata.bop.joinGraph.rto.JoinGraph; +import com.bigdata.bop.joinGraph.rto.Path; +import com.bigdata.bop.solutions.ProjectionOp; +import com.bigdata.journal.IIndexManager; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.constraints.INeedsMaterialization; +import com.bigdata.rdf.sparql.ast.ASTBase; +import com.bigdata.rdf.sparql.ast.ASTContainer; +import com.bigdata.rdf.sparql.ast.IBindingProducerNode; import com.bigdata.rdf.sparql.ast.IGroupMemberNode; import com.bigdata.rdf.sparql.ast.JoinGroupNode; import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.rdf.sparql.ast.StatementPatternNode; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.util.NT; /** * Integration with the Runtime Optimizer (RTO). @@ -84,6 +98,70 @@ public class AST2BOpRTO extends AST2BOpJoins { /** + * When <code>true</code>, the RTO will only accept simple joins into the + * join graph. Simple joins includes triples-mode joins and filters that do + * not have materialization requirements. Non-simple joins includes + * quads-mode joins and filters that have materialization contexts. + * + * TODO Eventually we can drop this. It is being used while we refactor the + * RTO to support more complex joins and variable materialization for + * filters. + */ + static private final boolean onlySimpleJoins = false; + + /** + * When <code>true</code>, the RTO will not accept OPTIONAL joins into the + * join graph. Optional joins can not decrease the intermediate cardinality + * since they can not eliminate solutions, but they can increase the + * intermediate cardinality by finding multiple bindings in the OPTIONAL + * join. Therefore, it makes sense to execute optional in an order that + * defers as long as possible any increase in the intermediate cardinality. + * This is very much the same as ordering required joins, but the OPTIONAL + * joins need to be handled AFTER the required joins. + * + * TODO RTO OPTIONALS: Handle optional SPs in joinGraph by ordering them in + * the tail so as to minimize the cost function. Once implemented, we can + * drop this field. + */ + static private final boolean onlyRequiredJoins = true; + + /** + * When <code>true</code>, the RTO will only accept statement patterns into + * the join graph. When <code>false</code>, it will attempt to handle non-SP + * joins, e.g., sub-query, exists, property paths, etc. + * + * TODO Eventually we can drop this. + */ + static private final boolean onlySPs = true; + + /** + * When <code>true</code>, the RTO will be applied as in we were doing + * bottom-up query optimization. In this case, it WILL NOT receive any + * solutions from the upstream operators in the pipeline when it performs + * its runtime sampling and it will ignore the <code>doneSet</code> for the + * context in which it is invoked. When run in this manner, the RTO *could* + * be run before the main query is executed. The only way to facilitate this + * at this time would be to lift out the joins on which the RTO would be run + * into a named subquery and then optimize that named subquery before the + * rest of the query. + * <p> + * When <code>false</code>, the RTO solutions from upstream operators will + * flow into the RTO. + * + * TODO We could still pass in exogenous solutions for bottom up evaluation. + * This would help constraint the RTOs exploration. + * + * TODO The RTO is not operating 100% in either an left-to-right or a + * bottom-up fashion, primarily because we are not passing in either + * exogenous bindings or meaningfully using the bindings from the upstream + * operator when exploring the join graph. In fact, the RTO could accept a + * new sample from the upstream operator in each iteration drawing from + * amoung those solutions which had already been materialized by the + * upstream operator. + */ + static private final boolean bottomUp = true; + + /** * Inspect the remainder of the join group. If we can isolate a join graph * and filters, then we will push them down into an RTO JoinGroup. Since the * joins have already been ordered by the static optimizer, we can accept @@ -101,117 +179,190 @@ final JoinGroupNode joinGroup, final Set<IVariable<?>> doneSet, final AST2BOpContext ctx, final AtomicInteger start) { - if (ctx.isQuads()) { + /* + * Snapshot of the doneSet on entry. This gets passed into the RTO. + */ + final Set<IVariable<?>> doneSetIn = Collections + .unmodifiableSet(doneSet); - // FIXME RTO: The RTO does not handle quads yet. + if (onlySimpleJoins && ctx.isQuads()) { + return left; } - + + /* + * Consider the join group. See if it is complex enough to warrant + * running the RTO. + * + * TODO Can we also make a decision based on whether there is uncertain, + * e.g., a swing in stakes, about the cardinality estimates for the + * predicates in the join graph, etc.? This could give us a means to + * avoid using the RTO if the join graph is known to run quickly or the + * ordering of the joins generated by the static query optimizer is + * known to be good. + * + * TODO The static optimizer could simply annotation join groups for + * which it recognizes that it could have a bad join plan. + */ final int arity = joinGroup.arity(); - // The predicates for the RTO join group. - final Set<StatementPatternNode> sps = new LinkedHashSet<StatementPatternNode>(); + /* + * Create a JoinGroup just for the pieces that the RTO will handle. + * + * FIXME These joins are CLONED right now since they also exist in the + * caller's joinGroup. Eventually, this code should be moved into an AST + * rewrite and we can then destructively move the children from the + * original JoinGroupNode into a JoinGraphNode that will be the AST side + * of the RTO. + */ + final JoinGroupNode rtoJoinGroup = new JoinGroupNode(); + rtoJoinGroup.setQueryHints(joinGroup.getQueryHints()); + +// final Set<StatementPatternNode> sps = new LinkedHashSet<StatementPatternNode>(); + // The predicates for the join graph. @SuppressWarnings("rawtypes") final Set<Predicate> preds = new LinkedHashSet<Predicate>(); + // The constraints for the join graph. final List<IConstraint> constraints = new LinkedList<IConstraint>(); - - // Examine the remaining joins, stopping at the first non-SP. - for (int i = start.get(); i < arity; i++) { - - final IGroupMemberNode child = (IGroupMemberNode) joinGroup - .get(i); - - if (child instanceof StatementPatternNode) { - // SP - final StatementPatternNode sp = (StatementPatternNode) child; - final boolean optional = sp.isOptional(); - if(optional) { + // The #of JOINs accepted into the RTO's join group. + int naccepted = 0; + { + /* + * The [doneSet] will be modified iff we actually accept JOINs into the + * RTO. Therefore, we also make a temporary copy that we will use to + * avoid side-effects if this join group is not complex enough to run + * the RTO. + */ + final Set<IVariable<?>> doneSetTmp = new LinkedHashSet<IVariable<?>>( + doneSet); + + // Examine the remaining joins, stopping at the first non-SP. + for (int i = start.get(); i < arity; i++) { + + final IGroupMemberNode child = (IGroupMemberNode) joinGroup + .get(i); + + if (child instanceof StatementPatternNode) { + // SP + StatementPatternNode sp = (StatementPatternNode) child; + final boolean optional = sp.isOptional(); + if (onlyRequiredJoins && optional) { + /* + * TODO The RTO does not order optional joins yet so we + * can not accept this join into the join graph. + */ + break; + } + + final List<IConstraint> attachedConstraints = getJoinConstraints(sp); + + @SuppressWarnings("rawtypes") + final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = + new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); + + getJoinConstraints(attachedConstraints, needsMaterialization); + + if (onlySimpleJoins && !needsMaterialization.isEmpty()) { + /* + * At least one variable requires (or might require) + * materialization. This is not currently handled by the RTO + * so we break out of the loop. + * + * TODO Handle materialization patterns within the RTO, in + * which case we need to collect up the doneSet here, but + * only conditionally since we might not actually execute the + * RTO depending on the number of SPs that we find. + */ + break; + } + + // // Add constraints to the join for that predicate. + // anns.add(new NV(JoinAnnotations.CONSTRAINTS, getJoinConstraints( + // constraints, needsMaterialization))); + + // /* + // * Pull off annotations before we clear them from the predicate. + // */ + // final Scope scope = (Scope) pred.getProperty(Annotations.SCOPE); + // + // // true iff this is a quads access path. + // final boolean quads = pred.getProperty(Annotations.QUADS, + // Annotations.DEFAULT_QUADS); + // + // // pull of the Sesame dataset before we strip the annotations. + // final DatasetNode dataset = (DatasetNode) pred + // .getProperty(Annotations.DATASET); + + // Something the RTO can handle. + sp = (StatementPatternNode) sp.clone();// TODO Use destructive move. +// sp.setId(ctx.nextId()); // assign id so we can reference back later. + rtoJoinGroup.addChild(sp); // add to group. + naccepted++; /* - * TODO Handle optional SPs in joinGraph (by ordering them - * in the tail so as to minimize the cost function). + * FIXME RTO: Handle Triples vs Quads, Default vs Named + * Graph, and DataSet. This probably means pushing more + * logic down into the RTO from AST2BOpJoins. + * Path.cutoffJoin() will need to be call through to logic + * on this class that does the right thing with named graph + * joins, default graph joins, triples mode joins, remote AP + * joins, etc. This is the same code that we call through to + * when we take the selected join path from the RTO and + * compile it into a query plan to fully execute the join + * group. */ - break; - } - - final List<IConstraint> attachedConstraints = getJoinConstraints(sp); - - @SuppressWarnings("rawtypes") - final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = - new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); - - getJoinConstraints(attachedConstraints, needsMaterialization); - - if (!needsMaterialization.isEmpty()) { + final Predicate<?> pred = AST2BOpUtility.toPredicate(sp, ctx); + // final int joinId = ctx.nextId(); + // + // // annotations for this join. + // final List<NV> anns = new LinkedList<NV>(); + // + // anns.add(new NV(BOp.Annotations.BOP_ID, joinId)); + preds.add(pred); + if (attachedConstraints != null) { + // RTO will figure out where to attach these constraints. + constraints.addAll(attachedConstraints); + } + + } else { + // Non-SP. + if (onlySPs) + break; /* - * At least one variable requires (or might require) - * materialization. This is not currently handled by - * the RTO so we break out of the loop. - * - * TODO Handle materialization patterns within the RTO. + * TODO Handle non-SPs in the RTO. See convertJoinGroup() + * for how we handle non-SPs during normal query plan + * conversion. All of that would also have to be handled + * here for the RTO to allow in non-SPs. */ - break; + throw new UnsupportedOperationException(); } - -// // Add constraints to the join for that predicate. -// anns.add(new NV(JoinAnnotations.CONSTRAINTS, getJoinConstraints( -// constraints, needsMaterialization))); + + } -// /* -// * Pull off annotations before we clear them from the predicate. -// */ -// final Scope scope = (Scope) pred.getProperty(Annotations.SCOPE); -// -// // true iff this is a quads access path. -// final boolean quads = pred.getProperty(Annotations.QUADS, -// Annotations.DEFAULT_QUADS); -// -// // pull of the Sesame dataset before we strip the annotations. -// final DatasetNode dataset = (DatasetNode) pred -// .getProperty(Annotations.DATASET); + if (naccepted < 3) { - // Something the RTO can handle. - sps.add(sp); /* - * FIXME RTO: Handle Triples vs Quads, Default vs Named Graph, and - * DataSet. This probably means pushing more logic down into - * the RTO from AST2BOpJoins. + * There are not enough joins for the RTO. + * + * TODO For incremental query construction UIs, it would be + * useful to run just the RTO and to run it with even a single + * join. This will give us sample values as well as estimates + * cardinalities. If the UI has triple patterns that do not join + * (yet), then those should be grouped. */ - final Predicate<?> pred = AST2BOpUtility.toPredicate(sp, ctx); -// final int joinId = ctx.nextId(); -// -// // annotations for this join. -// final List<NV> anns = new LinkedList<NV>(); -// -// anns.add(new NV(BOp.Annotations.BOP_ID, joinId)); - preds.add(pred); - if (attachedConstraints != null) { - // RTO will figure out where to attach these constraints. - constraints.addAll(attachedConstraints); - } + return left; - } else { - // Non-SP. - break; } - } - - if (sps.size() < 3) { - /* - * There are not enough joins for the RTO. - * - * TODO For incremental query construction UIs, it would be useful - * to run just the RTO and to run it with even a single join. This - * will give us sample values as well as estimates cardinalities. If - * the UI has triple patterns that do not join (yet), then those - * should be grouped. + * Since we will run the RTO, we now record any variables that are + * known to be materialized in order to support the FILTERs + * associated with the join group that we feed into the RTO. */ - return left; + doneSet.addAll(doneSetTmp); } - + /* * Figure out which variables are projected out of the RTO. * @@ -221,11 +372,15 @@ final Set<IVariable<?>> selectVars = new LinkedHashSet<IVariable<?>>(); { - for (StatementPatternNode sp : sps) { + for (IGroupMemberNode child : rtoJoinGroup.getChildren()) { + if (!(child instanceof IBindingProducerNode)) + continue; + // Note: recursive only matters for complex nodes, not SPs. - ctx.sa.getDefinitelyProducedBindings(sp, selectVars, true/* recursive */); - + ctx.sa.getDefinitelyProducedBindings( + (IBindingProducerNode) child, selectVars, true/* recursive */); + } } @@ -237,12 +392,16 @@ * (unless we are going to run the RTO "bottom up") and build a hash * index. When the hash index is ready, we can execute the join group. */ + final SampleType sampleType = joinGroup.getProperty( QueryHints.RTO_SAMPLE_TYPE, QueryHints.DEFAULT_RTO_SAMPLE_TYPE); + final int limit = joinGroup.getProperty(QueryHints.RTO_LIMIT, QueryHints.DEFAULT_RTO_LIMIT); + final int nedges = joinGroup.getProperty(QueryHints.RTO_NEDGES, QueryHints.DEFAULT_RTO_NEDGES); + left = new JoinGraph(leftOrEmpty(left),// new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, @@ -256,16 +415,262 @@ preds.toArray(new Predicate[preds.size()])),// new NV(JoinGraph.Annotations.CONSTRAINTS, constraints .toArray(new IConstraint[constraints.size()])),// + new NV(JoinGraph.Annotations.JOIN_GROUP, rtoJoinGroup),// new NV(JoinGraph.Annotations.LIMIT, limit),// new NV(JoinGraph.Annotations.NEDGES, nedges),// - new NV(JoinGraph.Annotations.SAMPLE_TYPE, sampleType.name())// + new NV(JoinGraph.Annotations.SAMPLE_TYPE, sampleType.name()),// + new NV(JoinGraph.Annotations.DONE_SET, doneSetIn),// + new NV(JoinGraph.Annotations.NT, new NT(ctx.getNamespace(), + ctx.getTimestamp()))// ); // These joins were consumed. - start.addAndGet(sps.size()); + start.addAndGet(naccepted); return left; } - + + /** + * Compile a join graph into a query plan. + * + * @param queryEngine + * The {@link QueryEngine} on which the RTO has been executing + * and on which the returned query plan may be executed. + * @param joinGraph + * The operator that executed the RTO. + * @param path + * The join path that was selected for full execution by the RTO + * based on deep sampling of the join graph. + * + * @return The query plan to fully execute that join graph. + */ + public static PipelineOp compileJoinGraph(final QueryEngine queryEngine, + final JoinGraph joinGraph, final Path path) { + + if (queryEngine == null) + throw new IllegalArgumentException(); + + if (joinGraph == null) + throw new IllegalArgumentException(); + + if (path == null) + throw new IllegalArgumentException(); + + final IVariable<?>[] selected = joinGraph.getSelected(); + + final IPredicate<?>[] predicates = path.getPredicates(); + + final IConstraint[] constraints = joinGraph.getConstraints(); + + if (onlySimpleJoins) { + + /* + * This is the old code. It does not handle variable materialization + * for filters. + */ + + // Factory avoids reuse of bopIds assigned to the predicates. + final BOpIdFactory idFactory = new BOpIdFactory(); + + return PartitionedJoinGroup.getQuery(idFactory, + false/* distinct */, selected, predicates, constraints); + + } + + /* + * TODO The RTO is ignoring the doneSet so it always runs all + * materialization steps even if some variable is known to be + * materialized on entry. + */ + final Set<IVariable<?>> doneSet = joinGraph.getDoneSet(); + + /* + * The AST JoinGroupNode for the joins and filters that we are running + * through the RTO. + */ + final JoinGroupNode rtoJoinGroup = (JoinGroupNode) joinGraph + .getRequiredProperty(JoinGraph.Annotations.JOIN_GROUP); + +// // Build an index over the bopIds in that JoinGroupNode. +// final Map<Integer, BOp> index = getIndex(rtoJoinGroup); + + // Factory avoids reuse of bopIds assigned to the predicates. + final BOpIdFactory idFactory = new BOpIdFactory(); + + /* + * Reserve ids used by the join graph or its constraints. + */ + idFactory.reserveIds(predicates, constraints); + + /* + * Figure out which constraints are attached to which predicates. + * + * TODO Can we reconcile this code with the filter assignment code in + * AST2BOpFilter? If so, then we can get rid of the + * PartitionedJoinGroup. + */ + final IConstraint[][] assignedConstraints = PartitionedJoinGroup + .getJoinGraphConstraints(predicates, constraints, + null/* knownBound */, true/* pathIsComplete */); + + // Create an execution context for the query. + final AST2BOpContext ctx = getExecutionContext(queryEngine, + // Identifies the KB instance (namespace and timestamp). + (NT) joinGraph.getRequiredProperty(JoinGraph.Annotations.NT)); + + // Start with an empty plan. + PipelineOp left = null; + + for (int i = 0; i < predicates.length; i++) { + + final Predicate<?> pred = (Predicate<?>) predicates[i]; + + final IConstraint[] attachedJoinConstraints = assignedConstraints[i]; + + final boolean optional = pred.isOptional(); + + /* + * Lookup the AST node for that predicate. + * + * Note: The predicates are assigned bopIds by the RTO starting with + * ONE (1). Therefore we substract out ONE from the predicate's id + * to find its index into the join group. + * + * TODO This assumes that the join group does not contain anything + * other than the SPs for the predicates that we are using., + * + * TODO HINTS: The Predicate's query hints should the hints for that + * specific join (aks the SP or other type of IJoinNode), not the + * hints for the JoinGroupNode or the JoinGraph operator. We could + * just pass the AST nodes themselves from the JoinGroupNode. That + * might make things easier, even if it make the query serialization + * fatter on a cluster. + */ +// final ASTBase astNode = (ASTBase) index.get(pred.getId()); + + final ASTBase astNode = (ASTBase) rtoJoinGroup.get(pred.getId() - 1); + + left = join(left, // + pred, // + optional ? new LinkedHashSet<IVariable<?>>(doneSet) + : doneSet, // + attachedJoinConstraints == null ? null : Arrays + .asList(attachedJoinConstraints),// + astNode.getQueryHints(),// + ctx); + + } + + if (selected != null && selected.length != 0) { + + // Drop variables that are not projected out. + left = applyQueryHints(new ProjectionOp(// + leftOrEmpty(left), // + new NV(ProjectionOp.Annotations.BOP_ID, idFactory.nextId()),// + new NV(ProjectionOp.Annotations.SELECT, selected)// + ), rtoJoinGroup, ctx); + + } + + return left; + + } + + /** + * Return an execution context that may be used to execute a cutoff join + * during sampling or the entire join path once it has been identified. + * + * @param queryEngine + * The query engine on which the RTO is executing. + * @param nt + * The namespace and timestamp of the KB view against which the + * RTO is running. + * + * @throws RuntimeException + * if there is no such KB instance. + */ + private static AST2BOpContext getExecutionContext( + final QueryEngine queryEngine, final NT nt) { + + // The index manager that can be used to resolve the KB view. + final IIndexManager indexManager = queryEngine.getFederation() == null ? queryEngine + .getIndexManager() : queryEngine.getFederation(); + + // Resolve the KB instance. + final AbstractTripleStore db = (AbstractTripleStore) indexManager + .getResourceLocator().locate(nt.getName(), nt.getTimestamp()); + + if (db == null) + throw new RuntimeException("No such KB? " + nt); + + /* + * An empty container. You can not use any of the top-level SPARQL query + * conversion routines with this container, but it is enough for the + * low-level things that we need to run the RTO. + */ + final ASTContainer astContainer = new ASTContainer(BOp.NOARGS, + BOp.NOANNS); + + return... [truncated message content] |
From: <tho...@us...> - 2014-01-06 17:19:33
|
Revision: 7738 http://bigdata.svn.sourceforge.net/bigdata/?rev=7738&view=rev Author: thompsonbry Date: 2014-01-06 17:19:27 +0000 (Mon, 06 Jan 2014) Log Message: ----------- @Overrides. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/NT.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/NT.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/NT.java 2014-01-06 17:13:48 UTC (rev 7737) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/util/NT.java 2014-01-06 17:19:27 UTC (rev 7738) @@ -60,19 +60,21 @@ } + @Override public int hashCode() { return hashCode; } - public boolean equals(Object o) { + @Override + public boolean equals(final Object o) { return equals((NT) o); } - public boolean equals(NT o) { + public boolean equals(final NT o) { if (o == null) { @@ -99,6 +101,7 @@ } + @Override public String toString() { return "NT{name=" + name + ",timestamp=" + timestamp + "}"; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 17:13:58
|
Revision: 7737 http://bigdata.svn.sourceforge.net/bigdata/?rev=7737&view=rev Author: thompsonbry Date: 2014-01-06 17:13:48 +0000 (Mon, 06 Jan 2014) Log Message: ----------- javadoc fix. @Override and final annotations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java 2014-01-06 17:02:58 UTC (rev 7736) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/inf/BackchainTypeResourceIterator.java 2014-01-06 17:13:48 UTC (rev 7737) @@ -30,7 +30,9 @@ import java.util.Arrays; import java.util.Iterator; import java.util.NoSuchElementException; + import org.apache.log4j.Logger; + import com.bigdata.bop.IPredicate; import com.bigdata.bop.IVariableOrConstant; import com.bigdata.rdf.internal.IV; @@ -48,8 +50,10 @@ import com.bigdata.striterator.IChunkedIterator; import com.bigdata.striterator.IChunkedOrderedIterator; import com.bigdata.striterator.IKeyOrder; + import cutthecrap.utils.striterators.Filter; import cutthecrap.utils.striterators.FilterBase; +import cutthecrap.utils.striterators.ICloseable; import cutthecrap.utils.striterators.ICloseableIterator; import cutthecrap.utils.striterators.Resolver; import cutthecrap.utils.striterators.Striterator; @@ -281,7 +285,8 @@ private static final long serialVersionUID = 1L; - public boolean isValid(Object arg0) { + @Override + public boolean isValid(final Object arg0) { final SPO o = (SPO) arg0; @@ -318,9 +323,6 @@ * The source iterator. {@link #nextChunk()} will sort statements * into the {@link IKeyOrder} reported by this iterator (as long * as the {@link IKeyOrder} is non-<code>null</code>). - * @param db - * The database from which we will read the distinct subject - * identifiers (iff this is an all unbound triple pattern). * @param rdfType * The term identifier that corresponds to rdf:Type for the * database. @@ -331,7 +333,7 @@ * @see #newInstance(IChunkedOrderedIterator, IAccessPath, * AbstractTripleStore, long, long) */ - @SuppressWarnings( { "unchecked", "serial" }) + @SuppressWarnings( "rawtypes" ) private BackchainTypeResourceIterator(IChunkedOrderedIterator<ISPO> _src,// Iterator<ISPO> src,// PushbackIterator<IV> resourceIds,// @@ -359,12 +361,14 @@ } + @Override public IKeyOrder<ISPO> getKeyOrder() { return keyOrder; } + @Override public void close() { if (!open) @@ -388,6 +392,7 @@ } + @Override public boolean hasNext() { if (!open) { @@ -447,6 +452,7 @@ * statement iterator is an explicit statement for the current subject, then * we emit the explicit statement. Otherwise we emit an inferred statement. */ + @Override public ISPO next() { if (!hasNext()) { @@ -560,6 +566,7 @@ * a chunk the backchained entailments will always begin on a chunk * boundary. */ + @Override public ISPO[] nextChunk() { if (!hasNext()) @@ -648,7 +655,8 @@ } - public ISPO[] nextChunk(IKeyOrder<ISPO> keyOrder) { + @Override + public ISPO[] nextChunk(final IKeyOrder<ISPO> keyOrder) { if (keyOrder == null) throw new IllegalArgumentException(); @@ -672,6 +680,7 @@ * statement visited by {@link #next()} is "explicit" then the request is * delegated to the source iterator. */ + @Override public void remove() { if (!open) @@ -719,6 +728,7 @@ } + @Override public void close() { src1.close(); @@ -730,10 +740,12 @@ /** * Note: Not implemented since not used above and this class is private. */ + @Override public T[] nextChunk() { throw new UnsupportedOperationException(); } + @Override public boolean hasNext() { return tmp1 != null || tmp2 != null || src1.hasNext() @@ -744,6 +756,7 @@ private T tmp1; private T tmp2; + @Override public T next() { if (!hasNext()) @@ -813,6 +826,7 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); @@ -881,12 +895,14 @@ } + @Override public boolean hasNext() { return buffer != null || src.hasNext(); } + @Override public E next() { if (!hasNext()) @@ -932,17 +948,19 @@ } + @Override public void remove() { throw new UnsupportedOperationException(); } + @Override public void close() { - if (src instanceof ICloseableIterator) { + if (src instanceof ICloseable) { - ((ICloseableIterator<E>) src).close(); + ((ICloseable) src).close(); } @@ -986,17 +1004,20 @@ } } + @Override public boolean hasNext() { return _src.hasNext() || (appender != null && appender.hasNext()); } + @Override public IKeyOrder<ISPO> getKeyOrder() { return _src.getKeyOrder(); } - public ISPO[] nextChunk(IKeyOrder<ISPO> keyOrder) { + @Override + public ISPO[] nextChunk(final IKeyOrder<ISPO> keyOrder) { if (_src.hasNext()) { - ISPO[] chunk = _src.nextChunk(keyOrder); + final ISPO[] chunk = _src.nextChunk(keyOrder); for (ISPO spo : chunk) { testSPO(spo); } @@ -1009,9 +1030,10 @@ return null; } + @Override public ISPO next() { if (_src.hasNext()) { - ISPO spo = _src.next(); + final ISPO spo = _src.next(); testSPO(spo); canRemove = true; return spo; @@ -1022,9 +1044,10 @@ return null; } - public ISPO[] nextChunk() { + @Override + public ISPO[] nextChunk() { if (_src.hasNext()) { - ISPO[] chunk = _src.nextChunk(); + final ISPO[] chunk = _src.nextChunk(); for (ISPO spo : chunk) { testSPO(spo); } @@ -1037,12 +1060,14 @@ return null; } + @Override public void remove() { if (canRemove) { _src.remove(); } } + @Override public void close() { _src.close(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 17:03:04
|
Revision: 7736 http://bigdata.svn.sourceforge.net/bigdata/?rev=7736&view=rev Author: thompsonbry Date: 2014-01-06 17:02:58 +0000 (Mon, 06 Jan 2014) Log Message: ----------- final annotation. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java 2014-01-06 15:04:15 UTC (rev 7735) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/solutions/DropOp.java 2014-01-06 17:02:58 UTC (rev 7736) @@ -71,7 +71,7 @@ /** * @param op */ - public DropOp(DropOp op) { + public DropOp(final DropOp op) { super(op); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 15:04:22
|
Revision: 7735 http://bigdata.svn.sourceforge.net/bigdata/?rev=7735&view=rev Author: thompsonbry Date: 2014-01-06 15:04:15 +0000 (Mon, 06 Jan 2014) Log Message: ----------- javadoc update indicating that these two classes are dead code and could be removed. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java 2014-01-06 14:12:44 UTC (rev 7734) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/paths/ZeroLengthPathOp.java 2014-01-06 15:04:15 UTC (rev 7735) @@ -46,7 +46,9 @@ /** * An attempt to solve the zero length path problem with its own operator. * - * @deprecated Does not work. Leads to cardinality problems. + * @deprecated Does not work. Leads to cardinality problems and can be removed. + * Zero Length Paths are integrated into the ALP node / + * ArbitraryLengthPathOp now. */ public class ZeroLengthPathOp extends PipelineOp { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java 2014-01-06 14:12:44 UTC (rev 7734) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ZeroLengthPathNode.java 2014-01-06 15:04:15 UTC (rev 7735) @@ -14,7 +14,9 @@ * A special kind of AST node that represents the SPARQL 1.1 zero length path * operator. * - * @deprecated Does not work - leads to cardinality problems. + * @deprecated Does not work - leads to cardinality problems and can be removed. + * Zero Length Paths are integrated into the ALP node / + * ArbitraryLengthPathOp now. */ public class ZeroLengthPathNode extends GroupMemberNodeBase<ZeroLengthPathNode> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 14:12:51
|
Revision: 7734 http://bigdata.svn.sourceforge.net/bigdata/?rev=7734&view=rev Author: thompsonbry Date: 2014-01-06 14:12:44 +0000 (Mon, 06 Jan 2014) Log Message: ----------- Removed spurious fixme. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 14:12:15 UTC (rev 7733) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 14:12:44 UTC (rev 7734) @@ -75,8 +75,6 @@ /** * Convenience class for passing around the various pieces of context necessary * to construct the bop pipeline. - * - * FIXME Rolling back r7319 which broke UNION processing. */ public class AST2BOpContext implements IdFactory, IEvaluationContext { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 14:12:21
|
Revision: 7733 http://bigdata.svn.sourceforge.net/bigdata/?rev=7733&view=rev Author: thompsonbry Date: 2014-01-06 14:12:15 +0000 (Mon, 06 Jan 2014) Log Message: ----------- @Override. Removed spurious FIXME. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 14:07:38 UTC (rev 7732) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java 2014-01-06 14:12:15 UTC (rev 7733) @@ -495,7 +495,7 @@ } - /** FIXME Rolling back r7319 which broke UNION processing. */ + @Override public int nextId() { return idFactory.incrementAndGet(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 14:07:47
|
Revision: 7732 http://bigdata.svn.sourceforge.net/bigdata/?rev=7732&view=rev Author: thompsonbry Date: 2014-01-06 14:07:38 +0000 (Mon, 06 Jan 2014) Log Message: ----------- Refactored AST2BOpUtility, AST2BOpFilters, and AST2BOpJoins to pass down the query hints as a Properties object from the dominating AST node. This causes query hints to be propagated to conditional routing operators, chunked materialization operators, etc. It also prepares the code for reuse by the RTO. I relabeled the "addMaterialization()" methods as 1, 2, and 3. This makes it significantly easier to identify the recursion patterns in the call hierarchy. I reorganized the method signatures to be more consistent in terms of where the AST2BOpContext and query hints appear in the list of arguments. Change to QueryLog to set pred==null for the summary line to get rid of unwanted details. The AST2BOpContext.queryHints field is now correctly '''ignored''' by AST2BOpBase.applyQueryHints(). The semantics of the AST2BOpContext.queryHints have already been applied to the AST nodes by the ASTQueryHintOptimizer. They do not need to be reapplied in applyQueryHints(). The test suites for the AST and SPARQL are green. I have confirmed that the atOnce and chunkSize query hints are still be propagated correctly. TODO: The only remaining issue that I am aware of for query hints is that there are a number of places where we lift out named subqueries. These all need to be reviewed and the query hints correctly brought across for both the NamedSubqueryRoot? and the NamedSubqueryInclude?. Failure to address this prevents hints such as hint:atOnce from correctly being applied to all operators in the query plan. @see #791 (Clean up query hints) @see #64 (RTO) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/engine/QueryLog.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -1108,7 +1108,7 @@ } @SuppressWarnings("rawtypes") - final IPredicate pred = (IPredicate<?>) bop + final IPredicate pred = summary ? null : (IPredicate<?>) bop .getProperty(PipelineJoin.Annotations.PREDICATE); final Integer predId = pred == null ? null : (Integer) pred .getProperty(BOp.Annotations.BOP_ID); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpBase.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -41,7 +41,6 @@ import com.bigdata.bop.cost.SubqueryCostReport; import com.bigdata.rdf.sparql.ast.ASTBase; import com.bigdata.rdf.sparql.ast.StatementPatternNode; -import com.bigdata.rdf.sparql.ast.optimizers.ASTQueryHintOptimizer; /** * Base class provides support for triples, sids, and quads mode joins which @@ -164,36 +163,37 @@ } - /** - * Apply any query hints to the operator as annotations of that operator. - * <p> - * Note: This method is responsible for transferring query hints from - * {@link ASTBase#getQueryHints()} onto a generated {@link PipelineOp}. - * - * @param op - * The operator. - * @param queryHints - * The query hints (from {@link ASTBase#getQueryHints()}). - * - * @return A copy of that operator to which the query hints (if any) have - * been applied. If there are no query hints then the original - * operator is returned. - * - * @deprecated by - * {@link #applyQueryHints(PipelineOp, ASTBase, AST2BOpContext)} - * which allows by global and AST node specific query hints to - * be applied. - * - * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/791" > - * Clean up query hints </a> - */ - protected static PipelineOp applyQueryHints(PipelineOp op, - final Properties queryHints) { +// /** +// * Apply any query hints to the operator as annotations of that operator. +// * <p> +// * Note: This method is responsible for transferring query hints from +// * {@link ASTBase#getQueryHints()} onto a generated {@link PipelineOp}. +// * +// * @param op +// * The operator. +// * @param queryHints +// * The query hints (from {@link ASTBase#getQueryHints()}). +// * +// * @return A copy of that operator to which the query hints (if any) have +// * been applied. If there are no query hints then the original +// * operator is returned. +// * +// * @deprecated by +// * {@link #applyQueryHints(PipelineOp, ASTBase, AST2BOpContext)} +// * which allows by global and AST node specific query hints to +// * be applied. +// * +// * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/791" > +// * Clean up query hints </a> +// */ +// @Deprecated +// protected static PipelineOp applyQueryHints(PipelineOp op, +// final Properties queryHints) { +// +// return _applyQueryHints(op, queryHints); +// +// } - return _applyQueryHints(op, queryHints); - - } - /** * Apply any query hints to the operator as annotations of that operator. * <p> @@ -203,11 +203,12 @@ * @param op * The pipeline operator generated from some AST node. * @param node - * The AST node from which the pipeline operator was generated. - * The query hints (from {@link ASTBase#getQueryHints()}) will be - * applied to that pipeline operator. + * The AST node from which the pipeline operator was generated + * (required). The query hints (from + * {@link ASTBase#getQueryHints()}) will be applied to that + * pipeline operator. * @param ctx - * The evaluation context (required). Global query hints declared + * The evaluation context (ignored). Global query hints declared * here will be applied to the generated pipeline operator. * Global hints are applied <strong>first</strong> so they can be * override by AST node specific hints. @@ -218,24 +219,22 @@ * * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/791" > * Clean up query hints </a> - * - * FIXME HINTS: {@link AST2BOpContext#queryHints} is applied by - * {@link ASTQueryHintOptimizer} to annotate the AST. Therefore its - * impact is <strong>already</strong> accounted for in the - * <code>node</code>'s query hints. It really DOES NOT need to be - * passed in here. What is important are the query hints as applied to - * the AST nodes, not the global defaults. */ protected static PipelineOp applyQueryHints(PipelineOp op, final ASTBase node, final AST2BOpContext ctx) { - // Apply global query hints from ASTContext. - op = _applyQueryHints(op, ctx.queryHints); + /* + * Note: The global query hints are transferred onto the AST nodes by + * the ASTQueryHintOptimizer and the registered IQueryHint classes. They + * do NOT need to be reapplied here. + */ +// // Apply global query hints from ASTContext. +// op = _applyQueryHints(op, ctx.queryHints); - if (node != null) { +// if (node != null) { // Apply ASTBase node specific query hints. op = _applyQueryHints(op, node.getQueryHints()); - } +// } return op; @@ -256,9 +255,10 @@ * The pipeline operator generated from some AST node (required). * @param nodeQueryHints * The query hints for the AST node from which the pipeline - * operator was generated (optional). + * operator was generated or its dominating operator context + * since not all operators have query hints applied (required). * @param ctx - * The evaluation context (required). Global query hints declared + * The evaluation context (ignored). Global query hints declared * here will be applied to the generated pipeline operator. * Global hints are applied <strong>first</strong> so they can be * override by AST node specific hints. @@ -273,13 +273,18 @@ protected static PipelineOp applyQueryHints(PipelineOp op, final Properties nodeQueryHints, final AST2BOpContext ctx) { - // Apply global query hints from ASTContext. - op = _applyQueryHints(op, ctx.queryHints); + /* + * Note: The global query hints are transferred onto the AST nodes by + * the ASTQueryHintOptimizer and the registered IQueryHint classes. They + * do NOT need to be reapplied here. + */ +// // Apply global query hints from ASTContext. +// op = _applyQueryHints(op, ctx.queryHints); - if (nodeQueryHints != null) { +// if (nodeQueryHints != null) { // Apply ASTBase node specific query hints. op = _applyQueryHints(op, nodeQueryHints); - } +// } return op; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpFilters.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -70,7 +70,8 @@ import com.bigdata.rdf.sparql.ast.StaticAnalysis; /** - * Class handles the materialization pattern for filters. + * Class handles the materialization pattern for filters by adding a series of + * materialization steps to materialize terms needed downstream. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ @@ -85,86 +86,130 @@ protected AST2BOpFilters() { } + /* + * Note: There was only one caller left for this method and it was from + * within this same class, so I rewrote the calling context very slightly + * and removed this method. The documentation on the general approach has + * been moved to the delegate method (immediate below). bbt. + */ +// /** +// * Adds a series of materialization steps to materialize terms needed +// * downstream. +// * +// * To materialize the variable ?term, the pipeline looks as follows: +// * +// * <pre> +// * left +// * -> +// * ConditionalRoutingOp1 (condition=!IsMaterialized(?term), alt=right) +// * -> +// * ConditionalRoutingOp2 (condition=IsInline(?term), alt=PipelineJoin) +// * -> +// * InlineMaterializeOp (predicate=LexPredicate(?term), sink=right) +// * -> +// * PipelineJoin (predicate=LexPredicate(?term)) +// * -> +// * right +// * </pre> +// * +// * @param context +// * @param left +// * the left (upstream) operator that immediately proceeds the +// * materialization steps +// * @param right +// * the right (downstream) operator that immediately follows the +// * materialization steps +// * @param c +// * the constraint to run on the IsMaterialized op to see if the +// * materialization pipeline can be bypassed (bypass if true and +// * no {@link NotMaterializedException} is thrown). +// * @param varsToMaterialize +// * the terms to materialize +// * @param queryHints +// * the query hints +// * @return the final bop added to the pipeline by this method +// * +// * @see AST2BOpUtility#addMaterializationSteps(PipelineOp, int, +// * IValueExpression, Collection, AST2BOpContext) +// */ +// @SuppressWarnings({ "rawtypes", "unchecked" }) +// protected static PipelineOp addMaterializationSteps( +// final AST2BOpContext context,// +// PipelineOp left, // +// final int right,// +// final IConstraint c,// +// final Collection<IVariable<IV>> varsToMaterialize,// +//// final AtomicInteger idFactory, +// final Properties queryHints) { +// +//// final AST2BOpContext context = new AST2BOpContext( +//// null/* astContainer */, idFactory, db, queryEngine, queryHints); +// +// final IValueExpression<IV> ve = (IValueExpression) c.get(0); +// +// return addMaterializationSteps(left, right, ve, varsToMaterialize, +// context); +// +// } + /** - * Adds a series of materialization steps to materialize terms needed - * downstream. + * If the value expression that needs the materialized variables can run + * without a {@link NotMaterializedException} then just route to the + * <i>rightId</i> (around the rest of the materialization pipeline steps). + * This happens in the case of a value expression that only "sometimes" + * needs materialized values, but not always (i.e. materialization + * requirement depends on the data flowing through). A good example of this + * is {@link CompareBOp}, which can sometimes work on internal values and + * sometimes can't. * - * To materialize the variable ?term, the pipeline looks as follows: + * To materialize the variable <code>?term</code>, the pipeline looks as + * follows: * * <pre> * left * -> - * ConditionalRoutingOp1 (condition=!IsMaterialized(?term), alt=right) + * ConditionalRoutingOp1 (condition=!IsMaterialized(?term), alt=rightId) * -> * ConditionalRoutingOp2 (condition=IsInline(?term), alt=PipelineJoin) * -> - * InlineMaterializeOp (predicate=LexPredicate(?term), sink=right) + * InlineMaterializeOp (predicate=LexPredicate(?term), sink=rightId) * -> * PipelineJoin (predicate=LexPredicate(?term)) * -> - * right + * rightId * </pre> * - * @param db - * the database - * @param queryEngine - * the query engine * @param left - * the left (upstream) operator that immediately proceeds the - * materialization steps - * @param right - * the right (downstream) operator that immediately follows the - * materialization steps - * @param c - * the constraint to run on the IsMaterialized op to see if the - * materialization pipeline can be bypassed (bypass if true and - * no {@link NotMaterializedException} is thrown). + * The left (upstream) operator that immediately proceeds the + * materialization steps. + * @param rightId + * The id for the right (downstream) operator that immediately + * follows the materialization steps. This needs to be + * pre-reserved by the caller. + * @param ve + * The {@link IValueExpression} for the {@link SPARQLConstraint}. * @param varsToMaterialize - * the terms to materialize + * The variables to be materialize. * @param queryHints - * the query hints - * @return the final bop added to the pipeline by this method + * The query hints to be applied from the dominating operator + * context. + * @param ctx + * The evaluation context. * - * @see AST2BOpUtility#addMaterializationSteps(PipelineOp, int, - * IValueExpression, Collection, AST2BOpContext) - */ - @SuppressWarnings({ "rawtypes", "unchecked" }) - public static PipelineOp addMaterializationSteps( - final AST2BOpContext context, -// final AbstractTripleStore db, -// final QueryEngine queryEngine, - PipelineOp left, final int right, - final IConstraint c, - final Collection<IVariable<IV>> varsToMaterialize, -// final AtomicInteger idFactory, - final Properties queryHints) { - -// final AST2BOpContext context = new AST2BOpContext( -// null/* astContainer */, idFactory, db, queryEngine, queryHints); - - final IValueExpression<IV> ve = (IValueExpression) c.get(0); - - return addMaterializationSteps(left, right, ve, varsToMaterialize, - context); - - } - - /** - * If the value expression that needs the materialized variables can run - * without a {@link NotMaterializedException} then just route to the - * <i>rightId</i> (around the rest of the materialization pipeline steps). - * This happens in the case of a value expression that only "sometimes" - * needs materialized values, but not always (i.e. materialization - * requirement depends on the data flowing through). A good example of this - * is {@link CompareBOp}, which can sometimes work on internal values and - * sometimes can't. + * @return The final bop added to the pipeline by this method * * @see TryBeforeMaterializationConstraint + * @see AST2BOpUtility#addMaterializationSteps1(PipelineOp, int, + * IValueExpression, Collection, AST2BOpContext) */ @SuppressWarnings("rawtypes") - protected static PipelineOp addMaterializationSteps(PipelineOp left, - final int rightId, final IValueExpression<IV> ve, - final Collection<IVariable<IV>> vars, final AST2BOpContext ctx) { + protected static PipelineOp addMaterializationSteps1(// + PipelineOp left,// + final int rightId, // + final IValueExpression<IV> ve,// + final Collection<IVariable<IV>> vars, // + final Properties queryHints, + final AST2BOpContext ctx) { /* * If the constraint "c" can run without a NotMaterializedException then @@ -177,9 +222,9 @@ new NV(BOp.Annotations.BOP_ID, ctx.nextId()),// new NV(ConditionalRoutingOp.Annotations.CONDITION, c2),// new NV(PipelineOp.Annotations.ALT_SINK_REF, rightId)// - ), ctx.queryHints); + ), queryHints, ctx); - return addMaterializationSteps(left, rightId, vars, ctx); + return addMaterializationSteps2(left, rightId, vars, queryHints, ctx); } @@ -200,35 +245,42 @@ * <pre> * left * -> - * ConditionalRoutingOp1 (condition=!IsMaterialized(?term), alt=right) + * ConditionalRoutingOp1 (condition=!IsMaterialized(?term), alt=rightId) * -> * ConditionalRoutingOp2 (condition=IsInline(?term), alt=PipelineJoin) * -> - * InlineMaterializeOp (predicate=LexPredicate(?term), sink=right) + * InlineMaterializeOp (predicate=LexPredicate(?term), sink=rightId) * -> * PipelineJoin (predicate=LexPredicate(?term)) * -> - * right + * rightId * </pre> * * @param left - * the left (upstream) operator that immediately proceeds the - * materialization steps + * The left (upstream) operator that immediately proceeds the + * materialization steps. * @param rightId - * the id of the right (downstream) operator that immediately - * follows the materialization steps + * The id of the right (downstream) operator that immediately + * follows the materialization steps. * @param vars - * the terms to materialize + * The terms to materialize + * @param queryHints + * The query hints from the dominating AST node. + * @param ctx + * The evaluation context. * - * @return the final bop added to the pipeline by this method + * @return The final bop added to the pipeline by this method * * @see TryBeforeMaterializationConstraint * - * TODO make [vars] a Set. + * TODO make [vars] a Set. */ @SuppressWarnings("rawtypes") - protected static PipelineOp addMaterializationSteps(PipelineOp left, - final int rightId, final Collection<IVariable<IV>> vars, + protected static PipelineOp addMaterializationSteps2(// + PipelineOp left,// + final int rightId, // + final Collection<IVariable<IV>> vars,// + final Properties queryHints, // final AST2BOpContext ctx) { final int nvars = vars.size(); @@ -257,8 +309,7 @@ new NV(ChunkedMaterializationOp.Annotations.TIMESTAMP, timestamp), // new NV(PipelineOp.Annotations.SHARED_STATE, !ctx.isCluster()),// live stats, but not on the cluster. new NV(BOp.Annotations.BOP_ID, ctx.nextId())// - ), (Properties) null/*nodeQueryHints*/, ctx // FIXME HINTS: Pass in the caller's AST node query hints. - ); + ), queryHints, ctx); // vars.toArray(new IVariable[nvars]), ns, timestamp) // .setProperty(BOp.Annotations.BOP_ID, ctx.nextId()); } @@ -309,7 +360,7 @@ c1),// new NV(PipelineOp.Annotations.SINK_REF, condId2),// new NV(PipelineOp.Annotations.ALT_SINK_REF, endId)// - ), ctx.queryHints); + ), queryHints, ctx); if (log.isDebugEnabled()) { log.debug("adding 1st conditional routing op: " + condOp1); @@ -327,7 +378,7 @@ inlineMaterializeId), // new NV(PipelineOp.Annotations.ALT_SINK_REF, lexJoinId)// - ), ctx.queryHints); + ), queryHints, ctx); if (log.isDebugEnabled()) { log.debug("adding 2nd conditional routing op: " + condOp2); @@ -361,7 +412,7 @@ new NV(InlineMaterializeOp.Annotations.PREDICATE, lexPred.clone()),// new NV(PipelineOp.Annotations.SINK_REF, endId)// - ), ctx.queryHints); + ), queryHints, ctx); if (log.isDebugEnabled()) { log.debug("adding inline materialization op: " @@ -393,7 +444,7 @@ final PipelineOp lexJoinOp = applyQueryHints(// new PipelineJoin(leftOrEmpty(inlineMaterializeOp), // anns.toArray(new NV[anns.size()])), - ctx.queryHints); + queryHints, ctx); // final PipelineOp lexJoinOp = newJoin(inlineMaterializeOp, anns, // ctx.queryHints); @@ -444,20 +495,19 @@ * @param needsMaterialization * A map of constraints and their variable materialization * requirements. - * @param context * @param queryHints + * Query hints from the dominating AST node. + * @param ctx + * The evaluation context. */ @SuppressWarnings("rawtypes") - protected static PipelineOp addMaterializationSteps( - final AST2BOpContext ctx, - PipelineOp left, - final Set<IVariable<?>> doneSet, + protected static PipelineOp addMaterializationSteps3(// + PipelineOp left,// + final Set<IVariable<?>> doneSet,// final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization, -// final AbstractTripleStore db,// -// final QueryEngine queryEngine,// -// final AtomicInteger idFactory,// -// final BOpContextBase context,// - final Properties queryHints) { + final Properties queryHints,// + final AST2BOpContext ctx// + ) { if (!needsMaterialization.isEmpty()) { @@ -486,22 +536,35 @@ if (!terms.isEmpty()) { // Add materialization steps for remaining variables. - left = addMaterializationSteps( - ctx, -// db, queryEngine, - left, - condId, c, terms, - // idFactory, - queryHints - ); + @SuppressWarnings("unchecked") + final IValueExpression<IV> ve = (IValueExpression) c.get(0); + + left = addMaterializationSteps1(// + left, // + condId, // right + ve, // value expression + terms,// varsToMaterialize, + queryHints,// + ctx); + +// left = addMaterializationSteps(// +// ctx,// +// left,// +// condId,// rightId +// c, // eval c.get(0) +// terms, // varsToMaterialize +// // idFactory, +// queryHints// +// ); + } left = applyQueryHints(// new ConditionalRoutingOp(leftOrEmpty(left),// new NV(BOp.Annotations.BOP_ID, condId),// new NV(ConditionalRoutingOp.Annotations.CONDITION,c)// - ), queryHints); + ), queryHints, ctx); } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpJoins.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -97,7 +97,6 @@ * named-graph and default graph join patterns whether on a single machine * or on a cluster. * - * @param ctx * @param left * @param pred * The predicate describing the statement pattern. @@ -107,16 +106,16 @@ * Constraints on that join (optional). * @param queryHints * Query hints associated with that {@link StatementPatternNode}. - * @return + * @param ctx The evaluation context. */ @SuppressWarnings("rawtypes") public static PipelineOp join(// - final AST2BOpContext ctx,// PipelineOp left,// Predicate pred,// final Set<IVariable<?>> doneSet,// variables known to be materialized. final Collection<IConstraint> constraints,// - final Properties queryHints// + final Properties queryHints,// + final AST2BOpContext ctx// ) { final int joinId = ctx.nextId(); @@ -189,8 +188,8 @@ * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, - needsMaterialization, queryHints); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + queryHints, ctx); return left; @@ -233,8 +232,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE, pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, null/* summary */); + return newJoin(left, anns, false/* defaultGraphFilter */, + null/* summary */, queryHints, ctx); } @@ -284,8 +283,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, null/* summary */); + return newJoin(left, anns, false/* defaultGraphFilter */, + null/* summary */, queryHints, ctx); } @@ -297,8 +296,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE, pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, null/* summary */); + return newJoin(left, anns, false/* defaultGraphFilter */, + null/* summary */, queryHints, ctx); } /* @@ -325,8 +324,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } @@ -349,8 +348,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } @@ -425,8 +424,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } else { @@ -456,8 +455,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } @@ -495,8 +494,9 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE, pred)); - return newJoin(ctx, left, anns, queryHints, true/* defaultGraphFilter */, - summary); + return newJoin(left, anns, true/* defaultGraphFilter */, summary, + queryHints, ctx); + } if (summary != null && summary.nknown == 0) { @@ -513,8 +513,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } @@ -542,8 +542,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE, pred)); - return newJoin(ctx, left, anns, queryHints, - false/* defaultGraphFilter */, summary); + return newJoin(left, anns, false/* defaultGraphFilter */, summary, + queryHints, ctx); } @@ -754,8 +754,8 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE, pred)); - return newJoin(ctx, left, anns, queryHints, - true/* defaultGraphFilter */, summary); + return newJoin(left, anns, true/* defaultGraphFilter */, summary, + queryHints, ctx); } else { @@ -842,9 +842,9 @@ anns.add(new NV(PipelineJoin.Annotations.PREDICATE,pred)); - return newJoin(ctx, left, anns, queryHints, - true/* defaultGraphFilter */, summary); - + return newJoin(left, anns, true/* defaultGraphFilter */, summary, + queryHints, ctx); + } } @@ -930,7 +930,6 @@ * * @param left * @param anns - * @param queryHints * @param defaultGraphFilter * <code>true</code> iff a DISTINCT filter must be imposed on the * SPOs. This is never done for a named graph query. It is @@ -939,6 +938,10 @@ * need to bother. * @param summary * The {@link DataSetSummary} (when available). + * @param queryHints + * The query hints from the dominating operator context. + * @param ctx + * The evaluation context. * @return * * @see Annotations#HASH_JOIN @@ -946,11 +949,13 @@ * @see Annotations#ESTIMATED_CARDINALITY */ @SuppressWarnings({ "rawtypes", "unchecked" }) - static private PipelineOp newJoin(final AST2BOpContext ctx, - PipelineOp left, final List<NV> anns, - final Properties queryHints, - final boolean defaultGraphFilter, - final DataSetSummary summary) { + static private PipelineOp newJoin(// + PipelineOp left, // + final List<NV> anns,// + final boolean defaultGraphFilter,// + final DataSetSummary summary,// + final Properties queryHints, // + final AST2BOpContext ctx) { final Map<String, Object> map = NV.asMap(anns.toArray(new NV[anns .size()])); @@ -1073,7 +1078,7 @@ } - left = applyQueryHints(left, queryHints); + left = applyQueryHints(left, queryHints, ctx); return left; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpRTO.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -103,7 +103,7 @@ if (ctx.isQuads()) { - // FIXME The RTO does not handle quads yet. + // FIXME RTO: The RTO does not handle quads yet. return left; } @@ -173,7 +173,7 @@ // Something the RTO can handle. sps.add(sp); /* - * FIXME Handle Triples vs Quads, Default vs Named Graph, and + * FIXME RTO: Handle Triples vs Quads, Default vs Named Graph, and * DataSet. This probably means pushing more logic down into * the RTO from AST2BOpJoins. */ @@ -231,7 +231,7 @@ } /* - * FIXME When running the RTO as anything other than the top-level join + * FIXME RTO: When running the RTO as anything other than the top-level join * group in the query plan and for the *FIRST* joins in the query plan, * we need to flow in any solutions that are already in the pipeline * (unless we are going to run the RTO "bottom up") and build a hash Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2014-01-06 12:52:53 UTC (rev 7731) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2014-01-06 14:07:38 UTC (rev 7732) @@ -244,7 +244,7 @@ * output. */ - left = addStartOp(ctx); + left = addStartOp(optimizedQuery, ctx); } @@ -363,7 +363,7 @@ if (left == null) { - left = addStartOpOnCluster(ctx); + left = addStartOpOnCluster(queryBase, ctx); } @@ -443,8 +443,8 @@ for (AssignmentNode assignmentNode : projection .getAssignmentProjections()) { - left = addAssignment(left, assignmentNode, doneSet, ctx, - true/* projection */); + left = addAssignment(left, assignmentNode, doneSet, + projection.getQueryHints(), ctx, true/* projection */); } @@ -482,7 +482,7 @@ preserveOrder = true; - left = addOrderBy(left, orderBy, ctx); + left = addOrderBy(left, queryBase, orderBy, ctx); } else { @@ -504,7 +504,7 @@ if (orderBy != null && !orderBy.isEmpty()) { - left = addOrderBy(left, orderBy, ctx); + left = addOrderBy(left, queryBase, orderBy, ctx); } @@ -591,7 +591,7 @@ if(queryBase.hasSlice()) { - left = addSlice(left, queryBase.getSlice(), ctx); + left = addSlice(left, queryBase, queryBase.getSlice(), ctx); } @@ -839,6 +839,9 @@ final ServiceNode serviceNode, final Set<IVariable<?>> doneSet, final AST2BOpContext ctx) { + // The query hints are taken from the SERVICE node. + final Properties queryHints = serviceNode.getQueryHints(); + @SuppressWarnings("rawtypes") final Map<IConstraint, Set<IVariable<IV>>> needsMaterialization = new LinkedHashMap<IConstraint, Set<IVariable<IV>>>(); @@ -994,7 +997,8 @@ if (!vars.isEmpty()) { // Add the materialization step. - left = addMaterializationSteps(left, rightId, (Collection) vars, ctx); + left = addMaterializationSteps2(left, rightId, + (Collection) vars, queryHints, ctx); // These variables have now been materialized. doneSet.addAll(vars); @@ -1046,15 +1050,15 @@ anns.put(JoinAnnotations.CONSTRAINTS, joinConstraints); left = applyQueryHints(new ServiceCallJoin(leftOrEmpty(left), anns), - serviceNode.getQueryHints()); + queryHints, ctx); /* * For each filter which requires materialization steps, add the * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, needsMaterialization, - serviceNode.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + queryHints, ctx); return left; @@ -1455,8 +1459,8 @@ * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, needsMaterialization, - nsi.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + nsi.getQueryHints(), ctx); return left; @@ -1575,8 +1579,8 @@ * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, - needsMaterialization, nsi.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + nsi.getQueryHints(), ctx); return left; @@ -1762,8 +1766,8 @@ * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, needsMaterialization, - subqueryRoot.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + subqueryRoot.getQueryHints(), ctx); return left; @@ -2018,8 +2022,8 @@ * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, needsMaterialization, - subqueryRoot.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + subqueryRoot.getQueryHints(), ctx); return left; @@ -2220,9 +2224,10 @@ } - left = applyQueryHints(new Tee(leftOrEmpty(left), - NV.asMap(anns.toArray(new NV[anns.size()]))), ctx.queryHints); - + left = applyQueryHints( + new Tee(leftOrEmpty(left), NV.asMap(anns + .toArray(new NV[anns.size()]))), unionNode, ctx); + } /* @@ -2286,7 +2291,7 @@ new NV(Predicate.Annotations.BOP_ID, downstreamId),// new NV(BOp.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.CONTROLLER)// - ), ctx.queryHints); + ), unionNode, ctx); // Add in anything which was known materialized for all child groups. doneSet.addAll(doneSetsIntersection); @@ -2302,7 +2307,7 @@ final ArbitraryLengthPathNode alpNode, final Set<IVariable<?>> doneSet, final AST2BOpContext ctx) { - final JoinGroupNode subgroup = (JoinGroupNode) alpNode.subgroup(); + final JoinGroupNode subgroup = (JoinGroupNode) alpNode.subgroup(); // Convert the child join group. final PipelineOp subquery = convertJoinGroup(null, @@ -2324,10 +2329,10 @@ * We need to drop the internal variables bound by the subquery (in the * case where the subquery is a nested path). */ - final Set<IVariable<?>> varsToDrop = new LinkedHashSet<IVariable<?>>(); - ctx.sa.getDefinitelyProducedBindings(subgroup, varsToDrop, true); - varsToDrop.remove(tVarLeft); - varsToDrop.remove(tVarRight); + final Set<IVariable<?>> varsToDrop = new LinkedHashSet<IVariable<?>>(); + ctx.sa.getDefinitelyProducedBindings(subgroup, varsToDrop, true); + varsToDrop.remove(tVarLeft); + varsToDrop.remove(tVarRight); left = applyQueryHints(new ArbitraryLengthPathOp(leftOrEmpty(left),// new NV(ArbitraryLengthPathOp.Annotations.SUBQUERY, subquery), @@ -2342,7 +2347,7 @@ new NV(Predicate.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.CONTROLLER)// - ), ctx.queryHints); + ), alpNode, ctx); return left; @@ -2367,7 +2372,7 @@ new NV(Predicate.Annotations.BOP_ID, ctx.nextId()),// new NV(BOp.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.CONTROLLER)// - ), ctx.queryHints); + ), zlpNode, ctx); return left; @@ -2549,11 +2554,11 @@ */ final Predicate<?> pred = toPredicate(sp, ctx); final boolean optional = sp.isOptional(); - left = join(ctx, left, pred, + left = join(left, pred, optional ? new LinkedHashSet<IVariable<?>>(doneSet) : doneSet, getJoinConstraints(sp), - sp.getQueryHints()); + sp.getQueryHints(), ctx); continue; } else if (child instanceof ArbitraryLengthPathNode) { final ArbitraryLengthPathNode alpNode = (ArbitraryLengthPathNode) child; @@ -2641,12 +2646,12 @@ continue; } // FILTER - left = addConditional(left, filter, doneSet, ctx); + left = addConditional(left, joinGroup, filter, doneSet, ctx); continue; } else if (child instanceof AssignmentNode) { // LET / BIND left = addAssignment(left, (AssignmentNode) child, doneSet, - ctx, false/* projection */); + joinGroup.getQueryHints(), ctx, false/* projection */); continue; } else { throw new UnsupportedOperationException("child: " + child); @@ -2656,8 +2661,8 @@ if (!dropVars.isEmpty()) { final IVariable<?>[] a = dropVars.toArray(new IVariable[dropVars .size()]); - left = applyQueryHints(new DropOp(leftOrEmpty(left), new NV(BOp.Annotations.BOP_ID, - ctx.nextId()), // + left = applyQueryHints(new DropOp(leftOrEmpty(left), // + new NV(BOp.Annotations.BOP_ID, ctx.nextId()), // new NV(DropOp.Annotations.DROP_VARS, a)// ), joinGroup, ctx); } @@ -3060,17 +3065,21 @@ } /** - * Conditionally add a {@link StartOp} iff the query will rin on a cluster. - * - * @param ctx - * - * @return The {@link StartOp} iff this query will run on a cluster and - * otherwise <code>null</code>. - * - * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/478"> - * Cluster does not map input solution(s) across shards</a> - */ - private static final PipelineOp addStartOpOnCluster(final AST2BOpContext ctx) { + * Conditionally add a {@link StartOp} iff the query will rin on a cluster. + * + * @param queryBase + * The {@link QueryBase} for which a {@link StartOp} might be + * required. + * @param ctx + * + * @return The {@link StartOp} iff this query will run on a cluster and + * otherwise <code>null</code>. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/478"> + * Cluster does not map input solution(s) across shards</a> + */ + private static final PipelineOp addStartOpOnCluster( + final QueryBase queryBase, final AST2BOpContext ctx) { if (ctx.isCluster()) { @@ -3086,7 +3095,7 @@ * @see https://sourceforge.net/apps/trac/bigdata/ticket/478 */ - return addStartOp(ctx); + return addStartOp(queryBase, ctx); } @@ -3106,7 +3115,8 @@ * * @return The {@link StartOp}. */ - private static final PipelineOp addStartOp(final AST2BOpContext ctx) { + private static final PipelineOp addStartOp(final QueryBase queryBase, + final AST2BOpContext ctx) { final PipelineOp start = applyQueryHints( new StartOp(BOp.NOARGS, NV.asMap(new NV[] {// @@ -3114,7 +3124,7 @@ .nextId()), new NV(SliceOp.Annotations.EVALUATION_CONTEXT, BOpEvaluationContext.CONTROLLER), })), - ctx.queryHints); + queryBase, ctx); return start; @@ -3125,15 +3135,23 @@ * * @param left * @param assignmentNode + * The {@link AssignmentNode} (LET() or BIND()). * @param doneSet + * @param queryHints + * The query hints from the AST node that dominates the + * assignment and from which we will take any query hints. E.g., + * a PROJECTION or a JOIN GROUP. * @param ctx * @param projection * @return */ @SuppressWarnings({ "unchecked", "rawtypes" }) private static final PipelineOp addAssignment(PipelineOp left, - final AssignmentNode assignmentNode, - final Set<IVariable<?>> doneSet, final AST2BOpContext ctx, +// final ASTBase dominatingASTNode,// + final AssignmentNode assignmentNode,// + final Set<IVariable<?>> doneSet, // + final Properties queryHints,// + final AST2BOpContext ctx,// final boolean projection) { final IValueExpression ve = assignmentNode.getValueExpression(); @@ -3167,7 +3185,8 @@ */ if (vars.size() > 0) { - left = addMaterializationSteps(left, bopId, ve, vars, ctx); + left = addMaterializationSteps1(left, bopId, ve, vars, + queryHints, ctx); if(req.getRequirement()==Requirement.ALWAYS) { @@ -3186,9 +3205,9 @@ left = applyQueryHints(// new ConditionalRoutingOp(leftOrEmpty(left), // - new NV(BOp.Annotations.BOP_ID, bopId), // - new NV(ConditionalRoutingOp.Annotations.CONDITION, c)// - ), ctx.queryHints); + new NV(BOp.Annotations.BOP_ID, bopId), // + new NV(ConditionalRoutingOp.Annotations.CONDITION, c)// + ), queryHints, ctx); return left; @@ -3199,6 +3218,8 @@ * pipeline. * * @param left + * @param joinGroup + * The parent join group. * @param filter * The filter. * @param doneSet @@ -3208,9 +3229,12 @@ * @return */ @SuppressWarnings("rawtypes") - private static final PipelineOp addConditional(PipelineOp left, - final FilterNode filter, - final Set<IVariable<?>> doneSet, final AST2BOpContext ctx) { + private static final PipelineOp addConditional(// + PipelineOp left,// + final JoinGroupNode joinGroup,// + final FilterNode filter,// + final Set<IVariable<?>> doneSet, // + final AST2BOpContext ctx) { @SuppressWarnings("unchecked") final IValueExpression<IV> ve = (IValueExpression<IV>) filter @@ -3240,7 +3264,8 @@ if (!vars.isEmpty()) { // Add materialization steps for those variables. - left = addMaterializationSteps(left, bopId, ve, vars, ctx); + left = addMaterializationSteps1(left, bopId, ve, vars, + joinGroup.getQueryHints(), ctx); if (req.getRequirement() == Requirement.ALWAYS) { @@ -3261,7 +3286,7 @@ left = applyQueryHints(new ConditionalRoutingOp(leftOrEmpty(left),// new NV(BOp.Annotations.BOP_ID, bopId), // new NV(ConditionalRoutingOp.Annotations.CONDITION, c)// - ), ctx.queryHints); + ), joinGroup, ctx); return left; @@ -3277,6 +3302,9 @@ * keep it, we could use a {@link JVMSolutionSetHashJoinOp}. Just push * the data into the hash index and then just that operator to join it * into the pipeline. Then we could ditch the {@link DataSetJoin}. + * <p> + * See {@link JoinGroupNode#getInFilters()} for how and where this is + * currently disabled. */ @SuppressWarnings("rawtypes") private static final PipelineOp addKnownInConditional(PipelineOp left, @@ -3518,8 +3546,8 @@ * materializations steps to the pipeline and then add the filter to the * pipeline. */ - left = addMaterializationSteps(ctx, left, doneSet, needsMaterialization, - subgroup.getQueryHints()); + left = addMaterializationSteps3(left, doneSet, needsMaterialization, + subgroup.getQueryHints(), ctx); return left; @@ -3663,10 +3691,11 @@ } /** - * Add an aggregation operator. It will handle the grouping (if any), - * optional having filter, and projected select expressions. A generalized - * aggregation operator will be used unless the aggregation corresponds to - * some special case. + * Add an aggregation operator. It will handle the <code>GROUP BY</code> (if + * any), the optional <code>HAVING</code> filter, and projected + * <code>SELECT</code> expressions. A generalized aggregation operator will + * be used unless the aggregation corresponds to some special case, e.g., + * pipelined aggregation. * * @param left * The previous operator in the pipeline. @@ -3701,6 +3730,9 @@ final IGroupByRewriteState groupByRewrite = new GroupByRewriter( groupByState); + // The query hints are taken from the PROJECTION. + final Properties queryHints = projection.getQueryHints(); + final int bopId = ctx.nextId(); final GroupByOp op; @@ -3784,7 +3816,7 @@ } - left = addMaterializationSteps(left, bopId, vars, ctx); + left = addMaterializationSteps2(left, bopId, vars, queryHints, ctx); if (!groupByState.isAnyDistinct() && !groupByState.isSelectDependency() && !groupByState.isNestedAggregates()) { @@ -3847,7 +3879,7 @@ } - left = applyQueryHints(op, ctx.queryHints); + left = applyQueryHints(op, queryHints, ctx); return left; @@ -3858,8 +3890,12 @@ */ @SuppressWarnings({ "unchecked", "rawtypes" }) private static final PipelineOp addOrderBy(PipelineOp left, - final OrderByNode orderBy, final AST2BOpContext ctx) { + final QueryBase queryBase, final OrderByNode orderBy, + final AST2BOpContext ctx) { + // The query hints are taken from the QueryBase + final Properties queryHints = queryBase.getQueryHints(); + final Set<IVariable<IV>> vars = new LinkedHashSet<IVariable<IV>>(); final ISortOrder<IV>[] sortOrders = new ISortOrder[orderBy.size()]; @@ -3899,7 +3935,7 @@ final int sortId = ctx.nextId(); - left = addMaterializationSteps(left, sortId, vars, ctx); + left = addMaterializationSteps2(left, sortId, vars, queryHints, ctx); left = applyQueryHints( new MemorySortOp( @@ -3919,7 +3955,7 @@ // new NV(MemorySortOp.Annotations.SHARED_STATE, // true),// new NV(MemorySortOp.Annotations.LAST_PASS, true),// - })), ctx.queryHints); + })), queryHints, ctx); return left; @@ -3929,7 +3965,8 @@ * Impose an OFFSET and/or LIMIT on a query. */ private static final PipelineOp addSlice(PipelineOp left, - final SliceNode slice, final AST2BOpContext ctx) { + final QueryBase queryBase, final SliceNode slice, + final AST2BOpContext ctx) { final int bopId = ctx.nextId(); @@ -3942,7 +3979,7 @@ new NV(SliceOp.Annotations.PIPELINED, true),// new NV(SliceOp.Annotations.MAX_PARALLEL, 1),// new NV(MemorySortOp.Annotations.SHARED_STATE, true)// - ), slice, ctx); + ), queryBase, ctx); return left; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 12:52:59
|
Revision: 7731 http://bigdata.svn.sourceforge.net/bigdata/?rev=7731&view=rev Author: thompsonbry Date: 2014-01-06 12:52:53 +0000 (Mon, 06 Jan 2014) Log Message: ----------- @Overrides Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java 2014-01-06 12:01:53 UTC (rev 7730) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/OrderByNode.java 2014-01-06 12:52:53 UTC (rev 7731) @@ -93,6 +93,7 @@ } + @Override @SuppressWarnings({ "unchecked", "rawtypes" }) public Iterator<OrderByExpr> iterator() { @@ -100,6 +101,7 @@ } + @Override public String toString(final int indent) { final StringBuilder sb = new StringBuilder(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-06 12:02:00
|
Revision: 7730 http://bigdata.svn.sourceforge.net/bigdata/?rev=7730&view=rev Author: thompsonbry Date: 2014-01-06 12:01:53 +0000 (Mon, 06 Jan 2014) Log Message: ----------- @Override annotations. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java 2014-01-05 19:23:06 UTC (rev 7729) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultLeafCoder.java 2014-01-06 12:01:53 UTC (rev 7730) @@ -72,6 +72,7 @@ private IRabaCoder keysCoder; private IRabaCoder valsCoder; + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -89,6 +90,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { out.write(VERSION0); @@ -100,6 +102,7 @@ } /** Yes. */ + @Override final public boolean isLeafDataCoder() { return true; @@ -107,12 +110,14 @@ } /** No. */ + @Override public boolean isNodeDataCoder() { return false; } + @Override public String toString() { return super.toString() + "{keysCoder=" + keysCoder + ", valsCoder=" @@ -149,12 +154,14 @@ } + @Override public ILeafData decode(final AbstractFixedByteArrayBuffer data) { return new ReadOnlyLeafData(data, keysCoder, valsCoder); } + @Override public ILeafData encodeLive(final ILeafData leaf, final DataOutputBuffer buf) { if (leaf == null) @@ -469,6 +476,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final ILeafData leaf, final DataOutputBuffer buf) { @@ -562,6 +570,7 @@ */ private final int O_rawRecords; + @Override public final AbstractFixedByteArrayBuffer data() { return b; @@ -931,6 +940,7 @@ /** * Always returns <code>true</code>. */ + @Override final public boolean isLeaf() { return true; @@ -940,6 +950,7 @@ /** * Yes. */ + @Override final public boolean isReadOnly() { return true; @@ -949,6 +960,7 @@ /** * Yes. */ + @Override final public boolean isCoded() { return true; @@ -958,6 +970,7 @@ /** * {@inheritDoc}. This field is cached. */ + @Override final public int getKeyCount() { return nkeys; @@ -976,24 +989,28 @@ /** * For a leaf, the #of values is always the #of keys. */ + @Override final public int getValueCount() { return nkeys; } + @Override final public boolean hasVersionTimestamps() { return (flags & FLAG_VERSION_TIMESTAMPS) != 0; } + @Override final public boolean hasDeleteMarkers() { return (flags & FLAG_DELETE_MARKERS) != 0; } + @Override final public boolean hasRawRecords() { return (flags & FLAG_RAW_RECORDS) != 0; @@ -1006,6 +1023,7 @@ // // } + @Override public long getMinimumVersionTimestamp() { if (!hasVersionTimestamps()) @@ -1016,6 +1034,7 @@ } + @Override public long getMaximumVersionTimestamp() { if (!hasVersionTimestamps()) @@ -1025,6 +1044,7 @@ } + @Override final public long getVersionTimestamp(final int index) { if (!hasVersionTimestamps()) @@ -1070,6 +1090,7 @@ } + @Override final public boolean getDeleteMarker(final int index) { if (!hasDeleteMarkers()) @@ -1079,6 +1100,7 @@ } + @Override final public long getRawRecord(final int index) { if (!hasRawRecords()) @@ -1238,18 +1260,21 @@ // // } + @Override final public IRaba getKeys() { return keys; } + @Override final public IRaba getValues() { return vals; } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); @@ -1272,6 +1297,7 @@ * Return <code>true</code> if the leaf encodes the address or the prior and * next leaves. */ + @Override public final boolean isDoubleLinked() { return b.getByte(0) == LINKED_LEAF; @@ -1325,6 +1351,7 @@ // // } + @Override public final long getPriorAddr() { if(!isDoubleLinked()) @@ -1334,6 +1361,7 @@ } + @Override public final long getNextAddr() { if(!isDoubleLinked()) Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-01-05 19:23:06 UTC (rev 7729) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java 2014-01-06 12:01:53 UTC (rev 7730) @@ -74,6 +74,7 @@ private IRabaCoder keysCoder; + @Override public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { @@ -89,6 +90,7 @@ } + @Override public void writeExternal(final ObjectOutput out) throws IOException { out.write(VERSION0); @@ -98,6 +100,7 @@ } /** No. */ + @Override final public boolean isLeafDataCoder() { return false; @@ -105,12 +108,14 @@ } /** Yes. */ + @Override public boolean isNodeDataCoder() { return true; } + @Override public String toString() { return super.toString() + "{keysCoder=" + keysCoder + "}"; @@ -138,13 +143,14 @@ } + @Override public INodeData decode(final AbstractFixedByteArrayBuffer data) { return new ReadOnlyNodeData(data, keysCoder); } - + @Override public INodeData encodeLive(final INodeData node, final DataOutputBuffer buf) { if (node == null) @@ -349,6 +355,7 @@ } + @Override public AbstractFixedByteArrayBuffer encode(final INodeData node, final DataOutputBuffer buf) { @@ -630,12 +637,14 @@ } + @Override final public boolean hasVersionTimestamps() { return ((flags & FLAG_VERSION_TIMESTAMPS) != 0); } + @Override final public long getMinimumVersionTimestamp() { if(!hasVersionTimestamps()) @@ -648,6 +657,7 @@ } + @Override final public long getMaximumVersionTimestamp() { if(!hasVersionTimestamps()) @@ -663,6 +673,7 @@ /** * Always returns <code>false</code>. */ + @Override final public boolean isLeaf() { return false; @@ -672,6 +683,7 @@ /** * Yes. */ + @Override final public boolean isReadOnly() { return true; @@ -681,6 +693,7 @@ /** * Yes. */ + @Override final public boolean isCoded() { return true; @@ -690,6 +703,7 @@ /** * {@inheritDoc}. This field is cached. */ + @Override final public int getKeyCount() { return nkeys; @@ -699,6 +713,7 @@ /** * {@inheritDoc}. This field is cached. */ + @Override final public int getChildCount() { return nkeys + 1; @@ -708,6 +723,7 @@ /** * {@inheritDoc}. This field is cached. */ + @Override final public long getSpannedTupleCount() { return nentries; @@ -732,6 +748,7 @@ } + @Override final public long getChildAddr(final int index) { assert assertChildIndex(index); @@ -740,6 +757,7 @@ } + @Override final public long getChildEntryCount(final int index) { assert assertChildIndex(index); @@ -793,12 +811,14 @@ } + @Override final public IRaba getKeys() { return keys; } + @Override public String toString() { final StringBuilder sb = new StringBuilder(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-01-05 19:23:14
|
Revision: 7729 http://bigdata.svn.sourceforge.net/bigdata/?rev=7729&view=rev Author: thompsonbry Date: 2014-01-05 19:23:06 +0000 (Sun, 05 Jan 2014) Log Message: ----------- Increased parallelism in the RTO. The cutoff joins for the initial edges in round zero are now executed in parallel. See #64. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2014-01-05 17:35:43 UTC (rev 7728) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/JGraph.java 2014-01-05 19:23:06 UTC (rev 7729) @@ -720,30 +720,30 @@ } - /** - * Resample the initial vertices for the specified join paths and then - * resample the cutoff join for each given join path in path order. - * - * @param queryEngine - * The query engine. - * @param limitIn - * The original limit. - * @param round - * The round number in [1:n]. - * @param a - * The set of paths from the previous round. For the first round, - * this is formed from the initial set of edges to consider. - * @param edgeSamples - * A map used to associate join path segments (expressed as an - * ordered array of bopIds) with {@link EdgeSample}s to avoid - * redundant effort. - * - * @return The number of join paths which are experiencing cardinality - * estimate underflow. - * - * @throws Exception - */ - public int resamplePaths(final QueryEngine queryEngine, int limitIn, + /** + * Resample the initial vertices for the specified join paths and then + * resample the cutoff join for each given join path in path order. + * + * @param queryEngine + * The query engine. + * @param limitIn + * The original limit. + * @param round + * The round number in [1:n]. + * @param a + * The set of paths from the previous round. For the first round, + * this is formed from the initial set of edges to consider. + * @param edgeSamples + * A map used to associate join path segments (expressed as an + * ordered array of bopIds) with {@link EdgeSample}s to avoid + * redundant effort. + * + * @return The number of join paths which are experiencing cardinality + * estimate underflow. + * + * @throws Exception + */ + protected int resamplePaths(final QueryEngine queryEngine, int limitIn, final int round, final Path[] a, final Map<PathIds, EdgeSample> edgeSamples) throws Exception { @@ -799,43 +799,90 @@ } - // re-sample vertices. + // re-sample vertices (this is done in parallel). sampleVertices(queryEngine, vertexLimit); - -// for (Map.Entry<Vertex, AtomicInteger> e : vertexLimit.entrySet()) { -// -//// final Vertex v = x.vertices[0]; -//// final int limit = vertexLimit.get(v).intValue(); -// -// final Vertex v = e.getKey(); -// -// final int limit = e.getValue().get(); -// -// v.sample(queryEngine, limit, sampleType); -// -// } } /* - * Re-sample the cutoff join for each edge in each of the existing - * paths using the newly re-sampled vertices. + * Re-sample the cutoff join for each edge in each of the existing paths + * using the newly re-sampled vertices. * * Note: The only way to increase the accuracy of our estimates for - * edges as we extend the join paths is to re-sample each edge in - * the join path in path order. + * edges as we extend the join paths is to re-sample each edge in the + * join path in path order. * - * Note: An edge must be sampled for each distinct join path prefix - * in which it appears within each round. However, it is common for - * surviving paths to share a join path prefix, so do not re-sample - * a given path prefix more than once per round. + * Note: An edge must be sampled for each distinct join path prefix in + * which it appears within each round. However, it is common for + * surviving paths to share a join path prefix, so do not re-sample a + * given path prefix more than once per round. + * + * FIXME PARALLELIZE: Parallelize the re-sampling for the active paths. + * This step is responsible for deepening the samples on the non-pruned + * paths. There is a data race that can occur since the [edgeSamples] + * map can contain samples for the same sequence of edges in different + * paths. This is because two paths can shared a common prefix sequence + * of edges, e.g., [2, 4, 6, 7] and [2, 4, 6, 9] share the path prefix + * [2, 4, 6]. Therefore both inspection and update of the [edgeSamples] + * map MUST be synchronized. This code is single threaded since that + * synchronization mechanism has not yet been put into place. The + * obvious way to handle this is to use a memoization pattern for the + * [ids] key for the [edgeSamples] map. This will ensure that the + * threads that need to resample a given edge will coordinate with the + * first such thread doing the resampling and the other thread(s) + * blocking until the resampled edge is available. */ if (log.isDebugEnabled()) log.debug("Re-sampling in-use path segments."); + // #of paths with cardinality estimate underflow. int nunderflow = 0; + final List<Callable<Boolean>> tasks = new LinkedList<Callable<Boolean>>(); for (Path x : a) { + tasks.add(new ResamplePathTask(queryEngine, x, limitIn, edgeSamples)); + + } // next Path [x]. + + // Execute in the caller's thread. + for (Callable<Boolean> task : tasks) { + + if (task.call()) { + + nunderflow++; + + } + + } + + return nunderflow; + + } + + /** + * Resample the edges along a join path. Edges are resampled based on the + * desired cutoff limit and only as necessary. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private class ResamplePathTask implements Callable<Boolean> { + + private final QueryEngine queryEngine; + private final Path x; + private final int limitIn; + private final Map<PathIds, EdgeSample> edgeSamples; + + public ResamplePathTask(final QueryEngine queryEngine, final Path x, + final int limitIn, final Map<PathIds, EdgeSample> edgeSamples) { + this.queryEngine = queryEngine; + this.x = x; + this.limitIn = limitIn; + this.edgeSamples = edgeSamples; + } + + @Override + public Boolean call() throws Exception { /* * Get the new sample limit for the path. * @@ -845,9 +892,9 @@ * round of expansion, which means that we are reading more data * than we really need to read. */ - final int limit = x.getNewLimit(limitIn); + final int limit = x.getNewLimit(limitIn); - // The cutoff join sample of the one step shorter path segment. + // The cutoff join sample of the one step shorter path segment. EdgeSample priorEdgeSample = null; for (int segmentLength = 2; segmentLength <= x.vertices.length; segmentLength++) { @@ -893,7 +940,7 @@ queryEngine, limit,// x.getPathSegment(2),// 1st edge. C,// constraints - V.length == 2,// pathIsComplete + V.length == 2,// pathIsComplete x.vertices[0].sample// source sample. ); @@ -955,19 +1002,19 @@ // Save the result on the path. x.edgeSample = priorEdgeSample; - - if (x.edgeSample.estimateEnum == EstimateEnum.Underflow) { - if (log.isDebugEnabled()) - log.debug("Cardinality underflow: " + x); - nunderflow++; + + final boolean underflow = x.edgeSample.estimateEnum == EstimateEnum.Underflow; + if (underflow) { + if (log.isDebugEnabled()) + log.debug("Cardinality underflow: " + x); } - } // next Path [x]. - - return nunderflow; - + // Done. + return underflow; + } + } - + /** * Do one breadth first expansion. In each breadth first expansion we extend * each of the active join paths by one vertex for each remaining vertex @@ -1269,30 +1316,36 @@ return null; } - /** - * Obtain a sample and estimated cardinality (fast range count) for each - * vertex. - * - * @param queryEngine - * The query engine. - * @param limit - * The sample size. - * - * TODO Only sample vertices with an index. - * - * TODO Consider other cases where we can avoid sampling a vertex - * or an initial edge. - * <p> - * Be careful about rejecting high cardinality vertices here as - * they can lead to good solutions (see the "bar" data set - * example). - * <p> - * BSBM Q5 provides a counter example where (unless we translate - * it into a key-range constraint on an index) some vertices do - * not share a variable directly and hence will materialize the - * full cross product before filtering which is *really* - * expensive. - */ + /** + * Obtain a sample and estimated cardinality (fast range count) for each + * vertex. + * + * @param queryEngine + * The query engine. + * @param limit + * The sample size. + * + * TODO Only sample vertices with an index. + * + * TODO Consider other cases where we can avoid sampling a vertex + * or an initial edge. + * <p> + * Be careful about rejecting high cardinality vertices here as + * they can lead to good solutions (see the "bar" data set + * example). + * <p> + * BSBM Q5 provides a counter example where (unless we translate + * it into a key-range constraint on an index) some vertices do + * not share a variable directly and hence will materialize the + * full cross product before filtering which is *really* + * expensive. + * + * FIXME We need attach any access path filters that are required + * for named graphs or scale-out for the RTO to function in those + * environments. We DO NOT need to attach SPARQL FILTERs here - + * those get applied when we evaluate the cutoff joins from one + * vertex to another. + */ public void sampleAllVertices(final QueryEngine queryEngine, final int limit) { final Map<Vertex, AtomicInteger> vertexLimit = new LinkedHashMap<Vertex, AtomicInteger>(); @@ -1436,7 +1489,7 @@ private Path[] estimateInitialEdgeWeights(final QueryEngine queryEngine, final int limit) throws Exception { - final List<Path> paths = new LinkedList<Path>(); + final List<Callable<Path>> tasks = new LinkedList<Callable<Path>>(); /* * Examine all unordered vertex pairs (v1,v2) once. If any vertex has @@ -1519,30 +1572,86 @@ } - // The path segment - final IPredicate<?>[] preds = new IPredicate[] { v.pred, vp.pred }; + tasks.add(new CutoffJoinTask(queryEngine, limit, v, vp, + pathIsComplete)); - // cutoff join of the edge (v,vp) - final EdgeSample edgeSample = Path.cutoffJoin(queryEngine,// - limit, // sample limit - preds, // ordered path segment. - C, // constraints - pathIsComplete,// - v.sample // sourceSample - ); + } // next other vertex. + + } // next vertex - final Path p = new Path(v, vp, edgeSample); + /* + * Now sample those paths in parallel. + */ - paths.add(p); + final List<Path> paths = new LinkedList<Path>(); - } // next other vertex. +// // Sample the paths in the caller's thread. +// for(Callable<Path> task : tasks) { +// +// paths.add(task.call()); +// +// } - } // next vertex - + // Sample the paths in parallel. + final List<Future<Path>> futures = queryEngine.getIndexManager() + .getExecutorService().invokeAll(tasks); + + // Check future, collecting new paths from each task. + for (Future<Path> f : futures) { + + paths.add(f.get()); + + } + return paths.toArray(new Path[paths.size()]); } + + /** + * Cutoff sample an initial join path consisting of two vertices. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private class CutoffJoinTask implements Callable<Path> { + private final QueryEngine queryEngine; + private final int limit; + private final Vertex v; + private final Vertex vp; + private final boolean pathIsComplete; + + public CutoffJoinTask(final QueryEngine queryEngine, final int limit, + final Vertex v, final Vertex vp, final boolean pathIsComplete) { + this.queryEngine = queryEngine; + this.limit = limit; + this.v = v; + this.vp = vp; + this.pathIsComplete = pathIsComplete; + } + + @Override + public Path call() throws Exception { + + // The path segment + final IPredicate<?>[] preds = new IPredicate[] { v.pred, vp.pred }; + + // cutoff join of the edge (v,vp) + final EdgeSample edgeSample = Path.cutoffJoin(queryEngine,// + limit, // sample limit + preds, // ordered path segment. + C, // constraints + pathIsComplete,// + v.sample // sourceSample + ); + + final Path p = new Path(v, vp, edgeSample); + + return p; + + } + + } + /** * Prune paths which are dominated by other paths. Paths are extended in * each round. Paths from previous rounds are always pruned. Of the new @@ -1607,10 +1716,10 @@ final Path Pj = a[j]; if (Pj.edgeSample == null) throw new RuntimeException("Not sampled: " + Pj); - if (pruned.contains(Pj)) { - // already pruned. + if (pruned.contains(Pj)) { + // already pruned. continue; - } + } final boolean isPiSuperSet = Pi.isUnorderedVariant(Pj); if (!isPiSuperSet) { // Can not directly compare these join paths. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java 2014-01-05 17:35:43 UTC (rev 7728) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/rto/Path.java 2014-01-05 19:23:06 UTC (rev 7729) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2011. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.bop.joinGraph.rto; import java.util.Arrays; @@ -847,7 +870,7 @@ final List<IBindingSet> result = new LinkedList<IBindingSet>(); try { - int nresults = 0; + int nresults = 0; try { IBindingSet bset = null; // Figure out the #of source samples consumed. @@ -862,7 +885,7 @@ } } } finally { - // ensure terminated regardless. + // ensure terminated regardless. runningQuery.cancel(true/* mayInterruptIfRunning */); } } finally { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |