This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2013-08-09 16:01:24
|
Revision: 7276 http://bigdata.svn.sourceforge.net/bigdata/?rev=7276&view=rev Author: thompsonbry Date: 2013-08-09 16:01:16 +0000 (Fri, 09 Aug 2013) Log Message: ----------- The leader variable was not set by the time we tested the quorum token and HAReadyToken. This made it impossible to report the error back to the quorum leader. The leader UUID is now passed as part of the gather protocol message and the follower discovered the leader synchronously when it receives that message and then passes the leader proxy into the gather task. This ensures that we can report the error back to the service that initiated the gather. @see #720 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-09 15:37:16 UTC (rev 7275) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/HAGatherReleaseTimeRequest.java 2013-08-09 16:01:16 UTC (rev 7276) @@ -23,6 +23,8 @@ */ package com.bigdata.ha.msg; +import java.util.UUID; + public class HAGatherReleaseTimeRequest implements IHAGatherReleaseTimeRequest { @@ -33,17 +35,19 @@ private final long token; private final long timestampOnLeader; + private final UUID leaderId; public HAGatherReleaseTimeRequest(final long token, - final long timestampOnLeader) { + final long timestampOnLeader, final UUID leaderId) { this.token = token; this.timestampOnLeader = timestampOnLeader; + this.leaderId = leaderId; } @Override public String toString() { return super.toString() + "{token=" + token + ",timestampOnLeader=" - + timestampOnLeader + "}"; + + timestampOnLeader + ", leaderId=" + leaderId + "}"; } @Override @@ -55,5 +59,10 @@ public long getTimestampOnLeader() { return timestampOnLeader; } + + @Override + public UUID getLeaderId() { + return leaderId; + } } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java 2013-08-09 15:37:16 UTC (rev 7275) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/msg/IHAGatherReleaseTimeRequest.java 2013-08-09 16:01:16 UTC (rev 7276) @@ -23,6 +23,8 @@ */ package com.bigdata.ha.msg; +import java.util.UUID; + /** * Message used to request information about the earliest commit point that is * pinned on a follower. This is used by the leader to make a decision about the @@ -48,5 +50,11 @@ * where the clocks are not synchronized on the services. */ public long getTimestampOnLeader(); + + /** + * The UUID of the leader. This may be used to discover the service that + * is (or was) the leader even if the token has been invalidated. + */ + public UUID getLeaderId(); } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 15:37:16 UTC (rev 7275) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 16:01:16 UTC (rev 7276) @@ -7691,10 +7691,28 @@ } + /* + * Lookup the leader using its UUID. + * + * Note: We do not use the token to find the leader. If the token is + * invalid, then we will handle that once we are in the GatherTask. + * + * Note: We do this early and pass it into the GatherTask. We can + * not send back an RMI response unless we know the leader's proxy. + */ + + final UUID leaderId = req.getLeaderId(); + + final HAGlue leader = getQuorum().getClient().getService(leaderId); + + if (leader == null) + throw new RuntimeException( + "Could not discover the quorum leader."); + final Callable<IHANotifyReleaseTimeResponse> task = ((AbstractHATransactionService) AbstractJournal.this .getLocalTransactionManager() .getTransactionService()) - .newGatherMinimumVisibleCommitTimeTask(req); + .newGatherMinimumVisibleCommitTimeTask(leader, req); final FutureTask<IHANotifyReleaseTimeResponse> ft = new FutureTask<IHANotifyReleaseTimeResponse>(task); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 15:37:16 UTC (rev 7275) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 16:01:16 UTC (rev 7276) @@ -598,7 +598,7 @@ try { final IHAGatherReleaseTimeRequest msg = new HAGatherReleaseTimeRequest( - token, timestampOnLeader); + token, timestampOnLeader, leaderId); // Do not send message to self (leader is at index 0). for (int i = 1; i < joinedServiceIds.length; i++) { @@ -1376,9 +1376,9 @@ @Override public Callable<IHANotifyReleaseTimeResponse> newGatherMinimumVisibleCommitTimeTask( - final IHAGatherReleaseTimeRequest req) { + final HAGlue leader, final IHAGatherReleaseTimeRequest req) { - return new GatherTask(req); + return new GatherTask(leader, req); } @@ -1415,13 +1415,19 @@ */ private class GatherTask implements Callable<IHANotifyReleaseTimeResponse> { + private final HAGlue leader; private final IHAGatherReleaseTimeRequest req; - public GatherTask(final IHAGatherReleaseTimeRequest req) { + public GatherTask(final HAGlue leader, final IHAGatherReleaseTimeRequest req) { + if (leader == null) + throw new IllegalArgumentException(); + if (req == null) throw new IllegalArgumentException(); + this.leader = leader; + this.req = req; } @@ -1445,7 +1451,6 @@ */ long now = 0L; UUID serviceId = null; - HAGlue leader = null; boolean didNotifyLeader = false; @@ -1461,8 +1466,10 @@ * this case. */ + // Verify quorum valid for token (implies leader valid) getQuorum().assertQuorum(token); + // Verify this service is HAReady for token. assertHAReady(token); /* @@ -1481,12 +1488,13 @@ */ now = newConsensusProtocolTimestamp(); - /* - * If the token is invalid, making it impossible for us to - * discover and message the leader, then then leader will - * reset() the CyclicBarrier. - */ - leader = quorumService.getLeader(token); + // The leader is obtained by its serviceId above. +// /* +// * If the token is invalid, making it impossible for us to +// * discover and message the leader, then the leader will +// * reset() the CyclicBarrier. +// */ +// leader = quorumService.getLeader(token); /* * Note: At this point we have everything we need to form up @@ -1588,7 +1596,7 @@ log.error(t, t); - if (!didNotifyLeader && leader != null) { + if (!didNotifyLeader) { /* * Send mock response to the leader so it does not block Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-09 15:37:16 UTC (rev 7275) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/AbstractHATransactionService.java 2013-08-09 16:01:16 UTC (rev 7276) @@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import com.bigdata.ha.HAGlue; import com.bigdata.ha.HATXSGlue; import com.bigdata.ha.msg.IHAGatherReleaseTimeRequest; import com.bigdata.ha.msg.IHANotifyReleaseTimeResponse; @@ -66,7 +67,7 @@ * Native thread leak in HAJournalServer process </a> */ abstract public Callable<IHANotifyReleaseTimeResponse> newGatherMinimumVisibleCommitTimeTask( - final IHAGatherReleaseTimeRequest req); + final HAGlue leader, final IHAGatherReleaseTimeRequest req); /** * Coordinate the update of the <i>releaseTime</i> on each service that is This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 15:37:23
|
Revision: 7275 http://bigdata.svn.sourceforge.net/bigdata/?rev=7275&view=rev Author: thompsonbry Date: 2013-08-09 15:37:16 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Added more logging around the gather protocol. See #720 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 15:00:32 UTC (rev 7274) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 15:37:16 UTC (rev 7275) @@ -7701,7 +7701,13 @@ // Save reference to the gather Future. gatherFuture.set(ft); - // Fire and forget. The Future is checked by prepare2Phase. + /* + * Fire and forget. The Future is checked by prepare2Phase. + * + * Note: This design pattern was used to due a DGC thread leak + * issue. The gather protocol should be robust even through the + * Future is not checked (or awaited) here. + */ getExecutorService().execute(ft); return; Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 15:00:32 UTC (rev 7274) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 15:37:16 UTC (rev 7275) @@ -1600,6 +1600,9 @@ final IHANotifyReleaseTimeRequest resp = new HANotifyReleaseTimeRequest( serviceId, 0L/* pinnedCommitTime */, 1L/* pinnedCommitCounter */, now/* timestamp */); + log.warn("Sending mock response for gather protocol: cause=" + + t); + // Will block until barrier breaks on leader. leader.notifyEarliestCommitTime(resp); } catch (Throwable t2) { log.error(t2, t2); @@ -1650,7 +1653,7 @@ final BarrierState barrierState = barrierRef.get(); if (barrierState == null) { - + /* * If the BarrierState reference has been cleared then it is not * possible for us to count down at the barrier for this message @@ -1664,6 +1667,9 @@ try { + if (haLog.isInfoEnabled()) + haLog.info("resp=" + req); + getQuorum().assertLeader(barrierState.token); // ServiceId of the follower (NPE if req is null). @@ -1697,8 +1703,18 @@ * the catch{} block above. */ - // follower blocks on Thread on the leader here. - barrierState.barrier.await(); + try { + if (haLog.isInfoEnabled()) { + haLog.info("Awaiting barrier: #responses=" + + barrierState.responses.size() + ", #parties=" + + barrierState.barrier.getParties() + + ", #joinedUUIDs=" + + barrierState.joinedServiceIds.length); + } + } finally { + // follower blocks on Thread on the leader here. + barrierState.barrier.await(); + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 15:00:40
|
Revision: 7274 http://bigdata.svn.sourceforge.net/bigdata/?rev=7274&view=rev Author: thompsonbry Date: 2013-08-09 15:00:32 +0000 (Fri, 09 Aug 2013) Log Message: ----------- We have analyzed a failure mode for startABCSimultaneous. There were three services (A, B, C). B and C met in a quorum with B as the leader. A joined the quorum after the KB create but before the atomic decision point for joined versus non-joined services. However, A was not "HAReady" at the time that the PREPARE message was delivered. A therefore voted NO (it threw out an exception complaining about the storeUUID, which was incorrect because it did not yet have the root blocks from the quorum leader). The test failed because this error condition was not automatically corrected. That occurred because an exception thrown out of the ErrorTask did not cause the ErrorTask to be re-run. - done. Will the gather task fail in unpleasant ways if the service isJoined but is-not-HAReady? [We have added some guards to verify that the service is HAReady.] - done. Why did A enter the ErrorTask? [Because it failed to PREPARE and threw an exception back to B (the leader). The guards that we added will provide a clearer message regarding the cause in the future.] - done. Why didn't A recover from the ErrorTask? [The ErrorTask was not re-run after it threw out an exception. This has been fixed as described above.] - done: How did A enter the Error task - we do not see anything logged. [Nothing was logged on A, but the event was logged on B when the PREPARE message threw out an exception. We have added a StackReportInfo to enterErrorState() such that the entry point will always be report.] With the changes described above, I am still observing a failure mode for testStartABCSimultaneous(). @see https://sourceforge.net/apps/trac/bigdata/ticket/720 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 14:39:41 UTC (rev 7273) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-09 15:00:32 UTC (rev 7274) @@ -5770,7 +5770,7 @@ } else { - throw new AssertionError("VOID setToken"); + throw new AssertionError("VOID setToken");// FIXME HA-ABC } @@ -5928,6 +5928,25 @@ return haStatus; } + + /** + * Assert that the {@link #getHAReady()} token has the specified value. + * + * @param token + * The specified value. + */ + final public void assertHAReady(final long token) throws QuorumException { + + if (quorum == null) + return; + + if (token != haReadyToken) { + + throw new QuorumException(HAStatusEnum.NotReady.toString()); + + } + + } /** * Install identical root blocks on the journal. This is used for a few @@ -6586,6 +6605,8 @@ // Do not prepare if the token is wrong. quorum.assertQuorum(prepareToken); + assertHAReady(prepareToken); + // Save off a reference to the prepare request. prepareRequest.set(prepareMessage); @@ -6771,10 +6792,11 @@ if (rootBlock == null) throw new IllegalStateException(); - - // Validate the new root block against the current root block. - validateNewRootBlock(/*isJoined,*/ isLeader, AbstractJournal.this._rootBlock, rootBlock); - + + // Validate new root block against current root block. + validateNewRootBlock(/* isJoined, */isLeader, + AbstractJournal.this._rootBlock, rootBlock); + if (haLog.isInfoEnabled()) haLog.info("validated=" + rootBlock); @@ -7058,6 +7080,9 @@ // Verify that the same quorum is still met. quorum.assertQuorum(prepareToken); + // Verify HA ready for that token. + assertHAReady(prepareToken); + if (!isLeader) {// && isJoined) { /* Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 14:39:41 UTC (rev 7273) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/Journal.java 2013-08-09 15:00:32 UTC (rev 7274) @@ -1460,8 +1460,11 @@ * invalid. The leader will reset() the CylicBarrier for * this case. */ + getQuorum().assertQuorum(token); + assertHAReady(token); + /* * If the quorumService is null because this service is * shutting down then the leader will notice the Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-09 14:39:41 UTC (rev 7273) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-09 15:00:32 UTC (rev 7274) @@ -1101,28 +1101,27 @@ } else { - log.error(t, t); - /* * Unhandled error. */ + log.error(t, t); + /* * Sleep for a moment to avoid tight error handling * loops that can generate huge log files. */ - Thread.sleep(250/*ms*/); - - if (runState != RunStateEnum.Error) { + Thread.sleep(250/* ms */); - /* - * Transition to the Error task (but do not allow - * the error task to interrupt itself). - */ + /* + * Transition to the Error task. + * + * Note: The ErrorTask can not interrupt itself even if + * it was the current task since the current task has + * been terminated by the theows exception! + */ - enterErrorState();// enterRunState(new ErrorTask()); - - } + enterErrorState(); // Done. return null; @@ -1243,6 +1242,8 @@ @Override public void enterErrorState() { + log.warn(new StackInfoReport()); + enterRunState(new ErrorTask()); } @@ -1592,7 +1593,7 @@ } public Void call() throws Exception { - enterRunState(new ErrorTask()); + enterErrorState(); return null; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 14:39:48
|
Revision: 7273 http://bigdata.svn.sourceforge.net/bigdata/?rev=7273&view=rev Author: thompsonbry Date: 2013-08-09 14:39:41 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Added some more backup retry latency values. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2013-08-09 13:09:14 UTC (rev 7272) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/pipeline/HASendService.java 2013-08-09 14:39:41 UTC (rev 7273) @@ -308,7 +308,7 @@ * A series of timeouts used when we need to re-open the * {@link SocketChannel}. */ - private final static long[] retryMillis = new long[] { 1, 5, 10, 50, 100 }; + private final static long[] retryMillis = new long[] { 1, 5, 10, 50, 100, 250, 500 }; /** * (Re-)open the {@link SocketChannel} if it is closed and this service is This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 13:09:28
|
Revision: 7272 http://bigdata.svn.sourceforge.net/bigdata/?rev=7272&view=rev Author: thompsonbry Date: 2013-08-09 13:09:14 +0000 (Fri, 09 Aug 2013) Log Message: ----------- @ r7271. {{{ merge --depth=infinity https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/READ_CACHE /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH C /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/service/jini/master/ResourceBufferTask.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/service/jini/master/MappedTaskMaster.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/disco U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/disco --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/attr U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/attr --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/util/config U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-jini/src/java/com/bigdata/util/config --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/lubm U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/lubm --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/uniprot/src U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/uniprot/src --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/uniprot U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/uniprot --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/btc/src/resources U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/btc/src/resources --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/btc U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf/btc --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-perf --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/src/resources/bin/config U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/src/resources/bin/config --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/lib/jetty U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/lib/jetty --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/bop/joinGraph U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/bop/joinGraph --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/bop/util U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/bop/util --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/jsr166 U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/jsr166 --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractKeyRangeMasterTestCase.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskIdleTimeout.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithErrors.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractMasterTestCase.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestFileSystemScanner.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/service/TestRangeQuery.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBuffer.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBufferWithChunks.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/util/httpd U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/test/com/bigdata/util/httpd --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/aggregate U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/aggregate --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/util U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/util --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/joinGraph U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/bop/joinGraph --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/jsr166 U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/jsr166 --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/htree/raba U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/htree/raba --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/osgi U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/osgi --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-compatibility U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-compatibility --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket693.txt C /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket693.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/bench U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/bench --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/bench --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.srx A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.ttl A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.rq A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-02.rq U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestOptionals.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/internal U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/internal --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/relation U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/test/com/bigdata/rdf/relation --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/changesets --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/error U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/error --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathNode.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTEmptyGroupOptimizer.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java A /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/internal U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/internal --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/relation U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/relation --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/util U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/java/com/bigdata/rdf/util --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/samples U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/bigdata-rdf/src/samples --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/LEGAL U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/LEGAL --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/lib U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/lib --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test/it/unimi/dsi U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test/it/unimi/dsi --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test/it/unimi U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test/it/unimi --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/test --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java/it/unimi U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java/it/unimi --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java/it U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java/it --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src/java --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils/src --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils U /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH/dsi-utils --- Merging r7214 through r7271 into /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH C /Users/bryan/Documents/workspace/READ_CACHE_CLEAN_FOR_PATCH Merge complete. ===== File Statistics: ===== Added: 11 Updated: 51 ==== Property Statistics: ===== Updated: 49 ==== Conflict Statistics: ===== File conflicts: 2 Property conflicts: 1 }}} The conflicts were resolved as follows: - AbstractHAJournalServerTestCase: the changes to support the setting of HTTP headers on the ConnectOpts class were accepted. The current version of the class was used otherwise. - BigdataSPARQLUpdateTest2: resolved using the incoming version. This was a change backported from branches/READ_CACHE into the 1.2.x maintenance branch. - A properties conflict existed on the top-level directory of the project in SVN for the "ant-build" property. I have discarded that update. Hopefully we will pick up the update when we merge READ_CACHE2 back to READ_CACHE. @see https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7214&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBuffer.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBufferWithChunks.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/TestRangeQuery.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractKeyRangeMasterTestCase.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractMasterTestCase.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestFileSystemScanner.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskIdleTimeout.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithErrors.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java branches/READ_CACHE2/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/service/jini/master/MappedTaskMaster.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/service/jini/master/ResourceBufferTask.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTEmptyGroupOptimizer.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestOptionals.java branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java Added Paths: ----------- branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.rq branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.srx branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.ttl branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-02.rq branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket693.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket693.txt branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java Property Changed: ---------------- branches/READ_CACHE2/ branches/READ_CACHE2/bigdata/lib/jetty/ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/aggregate/ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/util/ branches/READ_CACHE2/bigdata/src/java/com/bigdata/htree/raba/ branches/READ_CACHE2/bigdata/src/java/com/bigdata/jsr166/ branches/READ_CACHE2/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/READ_CACHE2/bigdata/src/test/com/bigdata/bop/util/ branches/READ_CACHE2/bigdata/src/test/com/bigdata/jsr166/ branches/READ_CACHE2/bigdata/src/test/com/bigdata/util/httpd/ branches/READ_CACHE2/bigdata-compatibility/ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/attr/ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/disco/ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/util/config/ branches/READ_CACHE2/bigdata-perf/ branches/READ_CACHE2/bigdata-perf/btc/ branches/READ_CACHE2/bigdata-perf/btc/src/resources/ branches/READ_CACHE2/bigdata-perf/lubm/ branches/READ_CACHE2/bigdata-perf/uniprot/ branches/READ_CACHE2/bigdata-perf/uniprot/src/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/READ_CACHE2/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/READ_CACHE2/bigdata-rdf/src/samples/ branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/READ_CACHE2/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/READ_CACHE2/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/READ_CACHE2/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE2/dsi-utils/ branches/READ_CACHE2/dsi-utils/LEGAL/ branches/READ_CACHE2/dsi-utils/lib/ branches/READ_CACHE2/dsi-utils/src/ branches/READ_CACHE2/dsi-utils/src/java/ branches/READ_CACHE2/dsi-utils/src/java/it/ branches/READ_CACHE2/dsi-utils/src/java/it/unimi/ branches/READ_CACHE2/dsi-utils/src/test/ branches/READ_CACHE2/dsi-utils/src/test/it/unimi/ branches/READ_CACHE2/dsi-utils/src/test/it/unimi/dsi/ branches/READ_CACHE2/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE2/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE2/osgi/ branches/READ_CACHE2/src/resources/bin/config/ Property changes on: branches/READ_CACHE2 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7213 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7270 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE:7215-7271 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/READ_CACHE2/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7213 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7270 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/lib/jetty:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 Property changes on: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -63,7 +63,6 @@ import com.bigdata.rwstore.sector.IMemoryManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ICloseableIterator; -import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.Memoizer; import com.sun.jini.thread.Executor; @@ -806,6 +805,11 @@ * is invoked from within the running task in order to remove * the latency for that RMI from the thread which submits tasks * to consume chunks. + * + * FIXME This is a protocol that should be optimized to provide + * better throughput for scale-out. E.g., a single socket on + * which we transmit and receive notice about operator + * start/stop metadata using some non-blocking service. */ // final boolean lastPassRequested = ((PipelineOp) (t.bop)) @@ -1292,7 +1296,7 @@ halt(new Exception("task=" + toString() + ", cause=" + t, t)); if (getCause() != null) { // Abnormal termination - wrap and rethrow. - + // TODO Why is this line empty? (I think that it is handled by the ChunkTaskWrapper.) } // otherwise ignore exception (normal completion). } finally { @@ -1304,6 +1308,19 @@ * it is closed. */ context.getSource().close(); + /** + * Ensure that the task is cancelled. + * + * Note: This does not appear to be necessary. I am observing + * the interrupt of the operator evaluation task regardless. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/707 + * (BlockingBuffer.close() does not unblock threads) + * @see https://sourceforge.net/apps/trac/bigdata/ticket/716 + * (Verify that IRunningQuery instances (and nested + * queries) are correctly cancelled when interrupted) + */ +// ft.cancel(true/*mayInterruptIfRunning*/); } // Done. return null; Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -886,7 +886,11 @@ shutdown = true; - // stop the query engine. + /* + * Stop the QueryEngineTask: this is the task that accepts chunks that + * are available for evaluation and assigns them to the + * AbstractRunningQuery. + */ final Future<?> f = engineFuture.get(); if (f != null) { if (log.isInfoEnabled()) @@ -894,7 +898,7 @@ f.cancel(true/* mayInterruptIfRunning */); } - // stop the service on which we ran the query engine. + // stop the service on which we ran the QueryEngineTask. final ExecutorService s = engineService.get(); if (s != null) { if (log.isInfoEnabled()) @@ -1425,8 +1429,8 @@ * a safety check against UUID collisions which might be non-random. */ throw new RuntimeException("Query exists with that UUID: uuid=" - + runningQuery.getQueryId()); - + + runningQuery.getQueryId()); + } // final String tag = query.getProperty(QueryHints.TAG, @@ -1521,7 +1525,34 @@ } - // Query was newly registered. + /* + * Query was newly registered. + */ + try { + + // Verify QueryEngine is running. + assertRunning(); + + } catch (IllegalStateException ex) { + + /** + * The query engine either is not initialized or was shutdown + * concurrent with adding the new query to the running query + * table. We yank the query out of the running query table in + * order to have no net effect and then throw out the exception + * indicating that the QueryEngine has been shutdown. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/705"> + * Race condition in QueryEngine.putIfAbsent() </a> + */ + + runningQueries.remove(queryId, runningQuery); + + throw ex; + + } + return runningQuery; } finally { Property changes on: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -265,7 +265,8 @@ final IBindingSet[] chunk = sitr.next(); - processChunk(chunk); + for (IBindingSet bs : chunk) + processChunk(new IBindingSet[] { bs }); } Property changes on: branches/READ_CACHE2/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -44,11 +44,11 @@ private volatile boolean didStart = false; - public FutureTaskMon(Callable<T> callable) { + public FutureTaskMon(final Callable<T> callable) { super(callable); } - public FutureTaskMon(Runnable runnable, T result) { + public FutureTaskMon(final Runnable runnable, final T result) { super(runnable, result); } @@ -76,14 +76,13 @@ final boolean ret = super.cancel(mayInterruptIfRunning); - if (didStart && mayInterruptIfRunning && ret && log.isDebugEnabled()) { - try { - throw new RuntimeException("cancel call trace"); - } catch (RuntimeException re) { - log.debug("May interrupt running task", re); - } - } + if (didStart && mayInterruptIfRunning && ret && log.isDebugEnabled()) { + log.debug("May have interrupted running task", + new RuntimeException("Stack trace of cancel() invocation")); + + } + return ret; } Property changes on: branches/READ_CACHE2/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 Property changes on: branches/READ_CACHE2/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166:7215-7271 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -31,7 +31,7 @@ import java.util.Iterator; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.RejectedExecutionException; import org.apache.log4j.Logger; @@ -1182,14 +1182,21 @@ final BlockingBuffer<R[]> buffer = new BlockingBuffer<R[]>( chunkOfChunksCapacity); - final ExecutorService executorService = indexManager - .getExecutorService(); + /** + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ - final Future<Void> future = executorService - .submit(new ChunkConsumerTask<R>(this, src, buffer)); + // Wrap computation as FutureTask. + final FutureTask<Void> ft = new FutureTask<Void>( + new ChunkConsumerTask<R>(this, src, buffer)); - buffer.setFuture(future); + // Set Future on BlockingBuffer *before* starting computation. + buffer.setFuture(ft); + // Start computation. + indexManager.getExecutorService().submit(ft); + return new ChunkConsumerIterator<R>(buffer.iterator(), keyOrder); } Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-09 12:34:38 UTC (rev 7271) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-09 13:09:14 UTC (rev 7272) @@ -1541,7 +1541,17 @@ log.info("Interrupted:... [truncated message content] |
From: <tho...@us...> - 2013-08-09 12:34:51
|
Revision: 7271 http://bigdata.svn.sourceforge.net/bigdata/?rev=7271&view=rev Author: thompsonbry Date: 2013-08-09 12:34:38 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Merge changes in the 1.2.x maintenance branch into the READ_CACHE branch. This catches up the HA branch with the ongoing development and maintenance of the 1.2.x release. @ r7270. Merging r7213 through r7270 from branches/BIGDATA_RELEASE_1_2_0 into branches/BIGDATA_READ_CACHE {{{ merge --depth=infinity https://bigdata.svn.sourceforge.net/svnroot/bigdata/branches/BIGDATA_RELEASE_1_2_0 /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE --- Merging r7213 through r7270 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/jsk-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/outrigger-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/fiddler-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/phoenix-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/norm-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/group-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/mahalo-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/reggie-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/browser-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/sdm-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-dl/mercury-dl.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib-ext/jsk-policy.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/preferredlistgen.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/outrigger.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/sharedvm.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/phoenix.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/phoenix-init.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/norm.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/browser.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/computehttpmdcodebase.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/mercury.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/envcheck.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jini-ext.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/checkser.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/destroy.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jsk-lib.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/group.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/classserver.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/classdep.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/tools.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/mahalo.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/phoenix-group.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/checkconfigurationfile.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jsk-debug-policy.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/fiddler.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jsk-platform.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/computedigest.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jarwrapper.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jini-core.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/sun-util.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/extra.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/outrigger-snaplogstore.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/start.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/serviceui.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/jsk-resources.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/lib/jini/lib/reggie.jar C /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/service/jini/master/ResourceBufferTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-jini/src/java/com/bigdata/service/jini/master/MappedTaskMaster.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractKeyRangeMasterTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskIdleTimeout.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithErrors.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractMasterTestCase.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestFileSystemScanner.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/service/TestRangeQuery.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBuffer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBufferWithChunks.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket693.txt U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket693.java --- Merging r7213 through r7270 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java --- Merging r7213 through r7270 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java --- Merging r7213 through r7270 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java --- Merging r7213 through r7270 into /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.srx A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.ttl A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.rq A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-02.rq U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestOptionals.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathNode.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTEmptyGroupOptimizer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java A /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java U /Users/bryan/Documents/workspace/BIGDATA_READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java Merge complete. ===== File Statistics: ===== Added: 11 Updated: 52 ==== Conflict Statistics: ===== File conflicts: 47 }}} All conflicts were resolved. Out of 47 conflicts, all but one were JAR conflicts. All of the JAR conflicts are edits that were backported to the 1.2.x maintenance branch from the READ_CACHE branch. The same jars are currently in both branches. To simplify things, I have accepted the existing jars in the READ_CACHE branch. In addition, there were some conflicts in AbstractHAJournalServerTestCase.java that arose from the recent changes in the 1.2.x branch to allow the caller to set the various HTTP header fields for the RemoteRepository. The HA test suite reuses the ConnectOpts class and parallel edits were made in AbstractHAJournalServerTestCase.java to support this. See https://sourceforge.net/apps/trac/bigdata/ticket/530 (Journal HA) Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7213&view=rev http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java branches/READ_CACHE/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java branches/READ_CACHE/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java branches/READ_CACHE/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBuffer.java branches/READ_CACHE/bigdata/src/test/com/bigdata/relation/accesspath/TestBlockingBufferWithChunks.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/TestRangeQuery.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractKeyRangeMasterTestCase.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/AbstractMasterTestCase.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestFileSystemScanner.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTask.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskIdleTimeout.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithErrors.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java branches/READ_CACHE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/service/jini/master/MappedTaskMaster.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/service/jini/master/ResourceBufferTask.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ArbitraryLengthPathNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/IReorderableNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/JoinGroupNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/PropertyPathNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StatementPatternNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/UnionNode.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/DGExpander.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTComplexOptionalOptimizer.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTEmptyGroupOptimizer.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTStaticJoinOptimizer.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/store/TripleStoreUtility.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestOptionals.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithoutSids.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java Added Paths: ----------- branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/StaticOptimizer.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.rq branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.srx branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-01.ttl branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/non-matching-optional-02.rq branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestTicket693.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/ticket693.txt branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java Property Changed: ---------------- branches/READ_CACHE/ branches/READ_CACHE/bigdata/lib/jetty/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba/ branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/test/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/util/httpd/ branches/READ_CACHE/bigdata-compatibility/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/attr/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/disco/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/util/config/ branches/READ_CACHE/bigdata-perf/ branches/READ_CACHE/bigdata-perf/btc/ branches/READ_CACHE/bigdata-perf/btc/src/resources/ branches/READ_CACHE/bigdata-perf/lubm/ branches/READ_CACHE/bigdata-perf/uniprot/ branches/READ_CACHE/bigdata-perf/uniprot/src/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/READ_CACHE/bigdata-rdf/src/samples/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/dsi-utils/ branches/READ_CACHE/dsi-utils/LEGAL/ branches/READ_CACHE/dsi-utils/lib/ branches/READ_CACHE/dsi-utils/src/ branches/READ_CACHE/dsi-utils/src/java/ branches/READ_CACHE/dsi-utils/src/java/it/ branches/READ_CACHE/dsi-utils/src/java/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/dsi/ branches/READ_CACHE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/osgi/ branches/READ_CACHE/src/resources/bin/config/ Property changes on: branches/READ_CACHE ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7213 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7270 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/READ_CACHE/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7213 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7270 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -63,7 +63,6 @@ import com.bigdata.rwstore.sector.IMemoryManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ICloseableIterator; -import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.Memoizer; import com.sun.jini.thread.Executor; @@ -806,6 +805,11 @@ * is invoked from within the running task in order to remove * the latency for that RMI from the thread which submits tasks * to consume chunks. + * + * FIXME This is a protocol that should be optimized to provide + * better throughput for scale-out. E.g., a single socket on + * which we transmit and receive notice about operator + * start/stop metadata using some non-blocking service. */ // final boolean lastPassRequested = ((PipelineOp) (t.bop)) @@ -1292,7 +1296,7 @@ halt(new Exception("task=" + toString() + ", cause=" + t, t)); if (getCause() != null) { // Abnormal termination - wrap and rethrow. - + // TODO Why is this line empty? (I think that it is handled by the ChunkTaskWrapper.) } // otherwise ignore exception (normal completion). } finally { @@ -1304,6 +1308,19 @@ * it is closed. */ context.getSource().close(); + /** + * Ensure that the task is cancelled. + * + * Note: This does not appear to be necessary. I am observing + * the interrupt of the operator evaluation task regardless. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/707 + * (BlockingBuffer.close() does not unblock threads) + * @see https://sourceforge.net/apps/trac/bigdata/ticket/716 + * (Verify that IRunningQuery instances (and nested + * queries) are correctly cancelled when interrupted) + */ +// ft.cancel(true/*mayInterruptIfRunning*/); } // Done. return null; Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -886,7 +886,11 @@ shutdown = true; - // stop the query engine. + /* + * Stop the QueryEngineTask: this is the task that accepts chunks that + * are available for evaluation and assigns them to the + * AbstractRunningQuery. + */ final Future<?> f = engineFuture.get(); if (f != null) { if (log.isInfoEnabled()) @@ -894,7 +898,7 @@ f.cancel(true/* mayInterruptIfRunning */); } - // stop the service on which we ran the query engine. + // stop the service on which we ran the QueryEngineTask. final ExecutorService s = engineService.get(); if (s != null) { if (log.isInfoEnabled()) @@ -1425,8 +1429,8 @@ * a safety check against UUID collisions which might be non-random. */ throw new RuntimeException("Query exists with that UUID: uuid=" - + runningQuery.getQueryId()); - + + runningQuery.getQueryId()); + } // final String tag = query.getProperty(QueryHints.TAG, @@ -1521,7 +1525,34 @@ } - // Query was newly registered. + /* + * Query was newly registered. + */ + try { + + // Verify QueryEngine is running. + assertRunning(); + + } catch (IllegalStateException ex) { + + /** + * The query engine either is not initialized or was shutdown + * concurrent with adding the new query to the running query + * table. We yank the query out of the running query table in + * order to have no net effect and then throw out the exception + * indicating that the QueryEngine has been shutdown. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/705"> + * Race condition in QueryEngine.putIfAbsent() </a> + */ + + runningQueries.remove(queryId, runningQuery); + + throw ex; + + } + return runningQuery; } finally { Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/paths/ArbitraryLengthPathOp.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -265,7 +265,8 @@ final IBindingSet[] chunk = sitr.next(); - processChunk(chunk); + for (IBindingSet bs : chunk) + processChunk(new IBindingSet[] { bs }); } Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/concurrent/FutureTaskMon.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -44,11 +44,11 @@ private volatile boolean didStart = false; - public FutureTaskMon(Callable<T> callable) { + public FutureTaskMon(final Callable<T> callable) { super(callable); } - public FutureTaskMon(Runnable runnable, T result) { + public FutureTaskMon(final Runnable runnable, final T result) { super(runnable, result); } @@ -76,14 +76,13 @@ final boolean ret = super.cancel(mayInterruptIfRunning); - if (didStart && mayInterruptIfRunning && ret && log.isDebugEnabled()) { - try { - throw new RuntimeException("cancel call trace"); - } catch (RuntimeException re) { - log.debug("May interrupt running task", re); - } - } + if (didStart && mayInterruptIfRunning && ret && log.isDebugEnabled()) { + log.debug("May have interrupted running task", + new RuntimeException("Stack trace of cancel() invocation")); + + } + return ret; } Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7213 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7270 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/jsr166:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/jsr166:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/AccessPath.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -31,7 +31,7 @@ import java.util.Iterator; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.RejectedExecutionException; import org.apache.log4j.Logger; @@ -1182,14 +1182,21 @@ final BlockingBuffer<R[]> buffer = new BlockingBuffer<R[]>( chunkOfChunksCapacity); - final ExecutorService executorService = indexManager - .getExecutorService(); + /** + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ - final Future<Void> future = executorService - .submit(new ChunkConsumerTask<R>(this, src, buffer)); + // Wrap computation as FutureTask. + final FutureTask<Void> ft = new FutureTask<Void>( + new ChunkConsumerTask<R>(this, src, buffer)); - buffer.setFuture(future); + // Set Future on BlockingBuffer *before* starting computation. + buffer.setFuture(ft); + // Start computation. + indexManager.getExecutorService().submit(ft); + return new ChunkConsumerIterator<R>(buffer.iterator(), keyOrder); } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -1541,7 +1541,17 @@ log.info("Interrupted: " + this, ex); else if (log.isInfoEnabled()) log.info("Interrupted: " + this); - + /** + * Note: Propagating the interrupt appears to be necessary here + * in order to have timely termination of nested subqueries. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/707 + * (BlockingBuffer.close() does not unblock threads) + * @see https://sourceforge.net/apps/trac/bigdata/ticket/716 + * (Verify that IRunningQuery instances (and nested + * queries) are correctly cancelled when interrupted) + */ + Thread.currentThread().interrupt(); return false; } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/IBlockingBuffer.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -29,6 +29,7 @@ package com.bigdata.relation.accesspath; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import com.bigdata.relation.IMutableRelation; import com.bigdata.relation.IRelation; @@ -97,7 +98,12 @@ /** * Set the {@link Future} for the source processing writing on the - * {@link IBlockingBuffer}. + * {@link IBlockingBuffer} (the producer). + * <p> + * Note: You should always wrap the task as a {@link FutureTask} and set the + * {@link Future} on the {@link IBlockingBuffer} before you start the + * consumer. This ensures that the producer will be cancelled if the + * consumer is interrupted. * * @param future * The {@link Future}. @@ -106,7 +112,10 @@ * if the argument is <code>null</code>. * @throws IllegalStateException * if the future has already been set. - * + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + * * @todo There should be a generic type for this. */ public void setFuture(Future future); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/accesspath/MultiSourceSequentialCloseableIterator.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -131,19 +131,29 @@ // current is known to be [null]. lock.lock(); try { - /* Close iterator which has been consumed. + /** + * Close iterator which has been consumed. * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/706" + * > MultiSourceSequentialCloseableIterator.nextSource() can + * throw NPE </a> */ - if (log.isInfoEnabled()) - log.info("Closing source: " + current); - current.close(); + ICloseableIterator<E> t = this.current; + { + if (t != null) { + if (log.isInfoEnabled()) + log.info("Closing source: " + t); + t.close(); + } + } // remove the head of the queue (non-blocking) - while ((current = sources.poll()) != null) { - if (current.hasNext()) { - return current; + while ((t = current = sources.poll()) != null) { + if (t.hasNext()) { + return t; } else { // Note: should already be closed since exhausted. - current.close(); + t.close(); } } // no more sources with data, close while holding lock. Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/relation/rule/eval/ProgramTask.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -355,13 +355,25 @@ * @todo if the #of results is small and they are available with * little latency then return the results inline using a fully * buffered iterator. + * + * Note: hack pattern to ensure Future is cancelled if we exit by + * any code path before the future has been set on the BlockingBuffer. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> */ + try { + // run the task. + future = queryTask.submit(); - // run the task. - future = queryTask.submit(); - - // set the future on the BlockingBuffer. - buffer.setFuture(future); + // set the future on the BlockingBuffer. + buffer.setFuture(future); + } finally { + if (future != null && buffer.getFuture() == null) { + // Future exists but not set on BlockingBuffer. + future.cancel(true/* mayInterruptIfRunning */); + } + } if (log.isDebugEnabled()) log.debug("Returning iterator reading on async query task"); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/AbstractScaleOutClientIndexView.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -33,7 +33,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -75,16 +75,15 @@ import com.bigdata.mdi.PartitionLocator; import com.bigdata.relation.accesspath.BlockingBuffer; import com.bigdata.resources.StaleLocatorException; +import com.bigdata.service.AbstractClient; import com.bigdata.service.AbstractScaleOutFederation; +import com.bigdata.service.IBigdataClient; import com.bigdata.service.IBigdataClient.Options; -import com.bigdata.service.AbstractClient; -import com.bigdata.service.IBigdataClient; import com.bigdata.service.IBigdataFederation; import com.bigdata.service.IDataService; import com.bigdata.service.IMetadataService; import com.bigdata.service.Split; import com.bigdata.service.ndx.pipeline.IDuplicateRemover; -import com.bigdata.service.ndx.pipeline.IndexAsyncWriteStats; import com.bigdata.service.ndx.pipeline.IndexWriteTask; import cutthecrap.utils.striterators.IFilter; @@ -1272,11 +1271,21 @@ writeBuffer// ); - final Future<? extends IndexAsyncWriteStats> future = fed - .getExecutorService().submit(task); + /** + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ - writeBuffer.setFuture(future); + // Wrap computation as FutureTask. + @SuppressWarnings({ "unchecked", "rawtypes" }) + final FutureTask<?> ft = new FutureTask(task); + // Set Future on BlockingBuffer + writeBuffer.setFuture(ft); + + // Submit computation for evaluation. + fed.getExecutorService().submit(ft); + return task.getBuffer(); } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/ClientIndexView.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -36,6 +36,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -91,7 +92,6 @@ import com.bigdata.service.IMetadataService; import com.bigdata.service.Split; import com.bigdata.service.ndx.pipeline.IDuplicateRemover; -import com.bigdata.service.ndx.pipeline.IndexAsyncWriteStats; import com.bigdata.service.ndx.pipeline.IndexWriteTask; import com.bigdata.striterator.ICloseableIterator; import com.bigdata.util.InnerCause; @@ -833,8 +833,20 @@ ts, isReadConsistentTx, fromKey, toKey, capacity, flags, filter, queryBuffer); - queryBuffer.setFuture(fed.getExecutorService().submit(task)); + /** + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ + // Wrap computation as FutureTask. + final FutureTask<Void> ft = new FutureTask<Void>(task); + + // Set Future on BlockingBuffer. + queryBuffer.setFuture(ft); + + // Submit computation for evaluation. + fed.getExecutorService().submit(ft); + return new UnchunkedTupleIterator(queryBuffer.iterator()); } @@ -2228,11 +2240,21 @@ writeBuffer// ); - final Future<? extends IndexAsyncWriteStats> future = fed - .getExecutorService().submit(task); + /** + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ - writeBuffer.setFuture(future); + // Wrap computation as FutureTask. + @SuppressWarnings({ "unchecked", "rawtypes" }) + final FutureTask<?> ft = new FutureTask(task); + // Set Future on BlockingBuffer. + writeBuffer.setFuture(ft); + + // Submit computation for evaluation. + fed.getExecutorService().submit(ft); + return task.getBuffer(); } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractMasterTask.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -35,6 +35,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -896,11 +897,28 @@ // if (oldval == null) { - // assign a worker thread to the sink. - final Future<? extends AbstractSubtaskStats> future = submitSubtask(sink); + /** + * Start subtask. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ + { + + // Wrap the computation as a FutureTask. + @SuppressWarnings({ "unchecked", "rawtypes" }) + final FutureTask<? extends AbstractSubtaskStats> ft = new FutureTask( + sink); + + // Set Future on the BlockingBuffer. + out.setFuture(ft); + + // Assign a worker thread to the sink. + submitSubtask(ft); + + } - out.setFuture(future); - stats.subtaskStartCount.incrementAndGet(); // } else { @@ -940,15 +958,26 @@ */ abstract protected S newSubtask(L locator, BlockingBuffer<E[]> out); +// /** +// * Submit the subtask to an {@link Executor}. +// * +// * @param subtask +// * The subtask. +// * +// * @return The {@link Future}. +// */ +// abstract protected Future<? extends AbstractSubtaskStats> submitSubtask(S subtask); /** * Submit the subtask to an {@link Executor}. * * @param subtask - * The subtask. - * - * @return The {@link Future}. + * The {@link FutureTask} used to execute thee subtask. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> */ - abstract protected Future<? extends AbstractSubtaskStats> submitSubtask(S subtask); + abstract protected void submitSubtask( + FutureTask<? extends AbstractSubtaskStats> subtask); /** * Drains any {@link Future}s from {@link #finishedSubtaskQueue} which are done Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java 2013-08-09 12:05:53 UTC (rev 7270) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/service/ndx/pipeline/IndexWriteTask.java 2013-08-09 12:34:38 UTC (rev 7271) @@ -30,7 +30,7 @@ import java.util.LinkedList; import java.util.concurrent.Callable; -import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; @@ -288,13 +288,12 @@ } - @SuppressWarnings("unchecked") @Override - protected Future<HS> submitSubtask(final S subtask) { + protected void submitSubtask( + final FutureTask<? extends AbstractSubtaskStats> subtask) { - return (Future<HS>) ndx.getFederation().getExecutorService().submit( - subtask); - + ndx.getFederation().getExecutorService().submit(subtask); + } /** Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/service/proxy/Clien... [truncated message content] |
From: <tho...@us...> - 2013-08-09 12:05:59
|
Revision: 7270 http://bigdata.svn.sourceforge.net/bigdata/?rev=7270&view=rev Author: thompsonbry Date: 2013-08-09 12:05:53 +0000 (Fri, 09 Aug 2013) Log Message: ----------- enabled ZK messages (as received) in the HA test suite logs. Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2013-08-09 12:02:55 UTC (rev 7269) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2013-08-09 12:05:53 UTC (rev 7270) @@ -10,7 +10,7 @@ log4j.logger.com.bigdata.journal.jini.ha.HAJournalServer=ALL #log4j.logger.com.bigdata.service.jini.lookup=ALL #log4j.logger.com.bigdata.quorum=INFO -#log4j.logger.com.bigdata.quorum.zk=INFO +log4j.logger.com.bigdata.quorum.zk=INFO log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties 2013-08-09 12:02:55 UTC (rev 7269) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties 2013-08-09 12:05:53 UTC (rev 7270) @@ -10,7 +10,7 @@ log4j.logger.com.bigdata.journal.jini.ha.HAJournalServer=ALL #log4j.logger.com.bigdata.service.jini.lookup=ALL #log4j.logger.com.bigdata.quorum=INFO -#log4j.logger.com.bigdata.quorum.zk=INFO +log4j.logger.com.bigdata.quorum.zk=INFO #log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties 2013-08-09 12:02:55 UTC (rev 7269) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties 2013-08-09 12:05:53 UTC (rev 7270) @@ -10,7 +10,7 @@ log4j.logger.com.bigdata.journal.jini.ha.HAJournalServer=ALL #log4j.logger.com.bigdata.service.jini.lookup=ALL #log4j.logger.com.bigdata.quorum=INFO -#log4j.logger.com.bigdata.quorum.zk=INFO +log4j.logger.com.bigdata.quorum.zk=INFO #log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-09 12:03:04
|
Revision: 7269 http://bigdata.svn.sourceforge.net/bigdata/?rev=7269&view=rev Author: thompsonbry Date: 2013-08-09 12:02:55 +0000 (Fri, 09 Aug 2013) Log Message: ----------- Reconciling edit to test case showing how to use ORDER BY with INSERT INTO made to both the 1.2.x and READ_CACHE branches prior to merging changes from the 1.2.x branch into the READ_CACHE branch. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-08 19:38:24 UTC (rev 7268) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-09 12:02:55 UTC (rev 7269) @@ -467,16 +467,28 @@ final StringBuilder sb = new StringBuilder(); + /* + * FIXME test variants w/ and w/o embedded sub-select and verify the + * *order* is preserved when using the embedded subselect w/ its + * order by. Also, verify that we translate this by lifting out the + * sub-select since the top-level query is empty at thast point. + * + * Also, document this on the wiki. The sub-select is necessary because + * SPARQL does not allow solution modifiers on the top-level WHERE clause + * for INSERT/DELETE+WHERE. + */ sb.append("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n"); sb.append("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n"); sb.append("PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n"); sb.append("INSERT INTO %namedSet1\n"); sb.append("SELECT ?x ?name\n"); + sb.append("WHERE { SELECT ?x ?name\n"); sb.append("WHERE {\n"); sb.append(" ?x rdf:type foaf:Person .\n"); sb.append(" ?x rdfs:label ?name .\n"); sb.append("}\n"); -// sb.append("ORDER BY ?name"); + sb.append("ORDER BY ?name\n"); + sb.append("}"); con.prepareUpdate(QueryLanguage.SPARQL, sb.toString()).execute(); Modified: branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-08 19:38:24 UTC (rev 7268) +++ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-08-09 12:02:55 UTC (rev 7269) @@ -475,7 +475,7 @@ * * Also, document this on the wiki. The sub-select is necessary because * SPARQL does not allow solution modifiers on the top-level WHERE clause - * for INSERT/DELETE+WHERE. + * for INSERT/DELETE+WHERE. */ sb.append("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n"); sb.append("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n"); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 19:38:33
|
Revision: 7268 http://bigdata.svn.sourceforge.net/bigdata/?rev=7268&view=rev Author: thompsonbry Date: 2013-08-08 19:38:24 +0000 (Thu, 08 Aug 2013) Log Message: ----------- - AbstractHA3JournalServerTestCase: Modified the HA test suite to create the service directory for each test in a file named by the test class and the test method. This allows us to inspect the logs for tests that only fail when run as part of the total HA3 or HA test suite. - ServiceConfiguration: javadoc corrections. Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java 2013-08-08 16:07:26 UTC (rev 7267) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/jini/start/config/ServiceConfiguration.java 2013-08-08 19:38:24 UTC (rev 7268) @@ -615,9 +615,11 @@ /** * Hook for overriding the service directory. The default returns the - * {@link ServiceConfiguration#serviceDir}. It SHOULD be overriden to - * return the actual directory in which the specific service instance - * will be started. + * {@link ServiceConfiguration#serviceDir}, which is <code>public</code> + * and accessed in a variety of places. Therefore, in order to override + * the returned value, you should specify an override such that + * {@link ServiceConfiguration#getServiceDir(String, Configuration)} + * will return the desired value. * * @see ServiceConfiguration#serviceDir * @see ServiceConfiguration#getServiceDir(String, Configuration) Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-08-08 16:07:26 UTC (rev 7267) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2013-08-08 19:38:24 UTC (rev 7268) @@ -257,12 +257,12 @@ */ { - final File testDir = new File(TGT_PATH); - + final File testDir = getTestDir(); + if (testDir.exists()) { recursiveDelete(testDir); - + } } @@ -532,11 +532,33 @@ */ /** + * The effective name for this test as used to name the directories in which + * we store things. + */ + protected String getEffectiveTestFileName() { + + return effectiveTestFileName; + + } + + /** + * The effective name for this test as used to name the directories in which + * we store things. + * + * TODO If there are method name collisions across the different test + * classes then the test suite name can be added to this. Also, if there are + * file naming problems, then this value can be munged before it is + * returned. + */ + private final String effectiveTestFileName = getClass().getSimpleName() + + "." + getName(); + + /** * The directory that is the parent of each {@link HAJournalServer}'s * individual service directory. */ protected File getTestDir() { - return new File(FEDNAME + "/CI-HAJournal-1"); + return new File(TGT_PATH, getEffectiveTestFileName()); } protected File getServiceDirA() { @@ -1329,7 +1351,7 @@ final String configFile = SRC_PATH + sourceConfigFileName; - final File serviceDir = new File(TGT_PATH, name); + final File serviceDir = new File(getTestDir(), name); final String installedConfigFileName = "HAJournal.config"; @@ -1381,20 +1403,28 @@ */ // Overrides for this test. - final String[] overrides = getOverrides(); + final String[] testOverrides = getOverrides(); + // Add override for the serviceDir. + final String[] overrides = ConfigMath.concat( + new String[] { "bigdata.serviceDir=new java.io.File(\"" + serviceDir + "\")" }, + testOverrides); + // Config file + overrides from perspective of this JVM. final String[] ourArgs = ConfigMath.concat(new String[] { configFile }, overrides); // Config file + overrides from perspective of the child process. - final String[] childArgs = ConfigMath.concat( - new String[] { installedConfigFileName }, overrides); + final String[] childArgs = ConfigMath.concat(new String[] { + installedConfigFileName, // as installed. + "bigdata.serviceDir=new java.io.File(\".\")" // relative to the serviceDir! + }, testOverrides // plus anything from the test case. + ); final Configuration config = ConfigurationProvider.getInstance(ourArgs); final ServiceConfiguration serviceConfig = new HAJournalServerConfiguration( - name, config, serviceId, serviceDir, childArgs); + name, config, serviceId, /*serviceDir,*/ childArgs); final AbstractServiceStarter<?> serviceStarter = serviceConfig .newServiceStarter(serviceListener); @@ -1595,12 +1625,12 @@ private final String serviceName; private final UUID serviceId; - private final File serviceDir; +// private final File serviceDir; private final String[] args; public HAJournalServerConfiguration(final String serviceName, final Configuration config, final UUID serviceId, - final File serviceDir, final String[] args) + /*final File serviceDirIsIgnored, */ final String[] args) throws ConfigurationException { // Note: ignored! args[] is used instead. @@ -1628,7 +1658,7 @@ this.serviceId = serviceId; - this.serviceDir = serviceDir; +// this.serviceDir = serviceDir; this.args = args; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 16:07:49
|
Revision: 7267 http://bigdata.svn.sourceforge.net/bigdata/?rev=7267&view=rev Author: thompsonbry Date: 2013-08-08 16:07:26 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Sync w/ Martyn. - Added while(true) look to ErrorTask.doRun() to handle cases where a SERVICE_LEAVE causes a QUORUM_BREAK. - QuorumTokenTransitions.toString() is now implemented. - Moved the bounceLeader and bounceFollower tests into the overrides test suite and filed a new ticket for those tests (#718) See #695 Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -5781,7 +5781,7 @@ } if (haLog.isInfoEnabled()) - haLog.info(transitionState.showState()); + haLog.info(transitionState.toString()); if (isLeader || isFollower) { Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/AbstractQuorum.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -54,6 +54,7 @@ import com.bigdata.ha.HAPipelineGlue; import com.bigdata.ha.QuorumService; import com.bigdata.util.InnerCause; +import com.bigdata.util.StackInfoReport; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.util.concurrent.ThreadGuard; import com.bigdata.util.concurrent.ThreadGuard.Guard; @@ -1693,9 +1694,9 @@ private void conditionalWithdrawVoteImpl() throws InterruptedException { - if (log.isDebugEnabled()) - log.debug("Check context", new RuntimeException()); - + if (log.isDebugEnabled()) + log.debug(new StackInfoReport()); + final Long lastCommitTime = getCastVote(serviceId); if (lastCommitTime != null) { doWithdrawVote(); Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -36,18 +36,33 @@ final long currentQuorumToken; final long newQuorumToken; - final long currentHaReady; + final long currentHAReadyToken; public final boolean didBreak; public final boolean didMeet; public final boolean didJoinMetQuorum; public final boolean didLeaveMetQuorum; - final boolean wasMet; - final boolean isMet; - final boolean isJoined; - final boolean wasJoined; - + final private boolean wasMet; + final private boolean isMet; + final private boolean isJoined; + final private boolean wasJoined; + + @Override + public final String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append(getClass()); + sb.append("{oldQuorumToken=" + currentQuorumToken); + sb.append(",newQuorumToken=" + newQuorumToken); + sb.append(",oldHAReadyToken=" + currentHAReadyToken); + sb.append(",didBreak=" + didBreak); + sb.append(",didMeet=" + didMeet); + sb.append(",didJoinMetQuorum=" + didJoinMetQuorum); + sb.append(",didLeaveMetQuorum=" + didLeaveMetQuorum); + sb.append("}"); + return sb.toString(); + } + public QuorumTokenTransitions(final long currentQuorumToken, final long newQuorumToken, final QuorumService<HAGlue> service, final long haReady) { @@ -61,7 +76,7 @@ final long newQuorumToken, final boolean joined, final long haReady) { - this.currentHaReady = haReady; + this.currentHAReadyToken = haReady; this.currentQuorumToken = currentQuorumToken; this.newQuorumToken = newQuorumToken; @@ -72,7 +87,7 @@ // Both quorum token and haReadyToken agree with newValue. final boolean noTokenChange = currentQuorumToken == newQuorumToken - && currentQuorumToken == currentHaReady; + && currentQuorumToken == currentHAReadyToken; /* * TODO: more understanding required as to the effect of this clause @@ -140,7 +155,7 @@ // TODO Document rationale for each assertion. private void checkStates() { - if (wasJoined && wasMet && currentHaReady > currentQuorumToken) { + if (wasJoined && wasMet && currentHAReadyToken > currentQuorumToken) { throw new AssertionError("haReady greater than current token"); @@ -220,8 +235,5 @@ return isMet && wasJoined && !isJoined; } - public final String showState() { - return ""; // TODO showState(). - } } Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -1670,109 +1670,119 @@ } - /** - * FIXME The error task may need to validate that it does not need - * to re-execute and may transition to SeekConsensus. The specific - * problem is when doServiceLeave() leads to a QUORUM_BREAK event. - * <p> - * The relevant tests are testAB_BounceFollower, - * (testAB_BounceLeader, testAB_RestartLeader. - */ -// if (journal.getQuorumToken() == journal.getQuorum().token()) { -// -// haLog.warn("Will not reenter active run state: " -// + runStateTask.runState + ", currentToken: " -// + journal.getQuorumToken() + ", newToken: " -// + journal.getQuorum().token()); @Override public Void doRun() throws Exception { -// /* -// * Discard the current write set. -// * -// * Note: This is going to call through to discardWriteSet(). -// * That method will close out the current HALog and discard the -// * last live write message. -// * -// * FIXME the setQuorumToken() after the serviceLeave() will also -// * cause doLocalAbort() to be called, so we probably do NOT want -// * to call it here. -// */ - journal.doLocalAbort(); + while (true) { - /* - * Note: Bouncing the ZK connection here appears to cause - * problems within the test suite. We have not tracked down why - * yet. - */ -// server.haGlueService.bounceZookeeperConnection(); - - /* - * Do synchronous service leave. - */ + log.warn("Will do error handler."); - log.warn("Will do SERVICE LEAVE"); - - getActor().serviceLeave(); - - /* - * Set token. Journal will notice that it is no longer - * "HA Ready" - * - * Note: We update the haReadyToken and haStatus regardless of - * whether the quorum token has changed in case this service is - * no longer joined with a met quorum. - * - * Note: AbstractJournal.setQuorumToken() will detect case where - * it transitions from a met quorum through a service leave and - * will clear its haReady token and update its haStatus field - * appropriately. (This is why we pass in quorum.token() rather - * than NO_QUORUM.) - * - * TODO There are cases where nothing changes that may hit an - * AssertionError in setQuorumToken(). - * - * TODO This will (conditionally) trigger doLocalAbort(). Since we did this - * explicitly above, that can be do invocations each time we pass through here! - */ - if (log.isInfoEnabled()) - log.info("Current Token: " + journal.getHAReady() + ", new: " + getQuorum().token()); + /* + * Discard the current write set. + * + * Note: This is going to call through to discardWriteSet(). + * That method will close out the current HALog and discard + * the last live write message. + * + * FIXME the setQuorumToken() after the serviceLeave() will + * also cause doLocalAbort() to be called, so we probably do + * NOT want to call it here. + */ + journal.doLocalAbort(); - journal.setQuorumToken(getQuorum().token()); - - /** - * Dispatch Events before entering SeekConsensus! Otherwise - * the events triggered by the serviceLeave() and setQuorumToken - * will not be handled until we enter SeekConsensus, and then - * when they are received SeekConsensus will fail. - * - * The intention of this action is to ensure that when SeekConsensus is - * entered the service is in a "clean" state. - */ - processEvents(); - -// /* -// * Note: We can spin here to give the service an opportunity to -// * handle any backlog of events that trigger a transition into -// * the ERROR state. This might not be strictly necessary, and we -// * do not want to spin too long. -// */ -// -// final long sleepMillis = 0; // 2000; // TODO CONFIG? -// -// log.warn("Sleeping " + sleepMillis + "ms to let events quiesce."); -// -// if (sleepMillis > 0) -// Thread.sleep(sleepMillis); + /* + * Do synchronous service leave. + */ + log.warn("Will do SERVICE LEAVE"); + + getActor().serviceLeave(); + + /* + * Set token. Journal will notice that it is no longer + * "HA Ready" + * + * Note: We update the haReadyToken and haStatus regardless + * of whether the quorum token has changed in case this + * service is no longer joined with a met quorum. + * + * Note: AbstractJournal.setQuorumToken() will detect case + * where it transitions from a met quorum through a service + * leave and will clear its haReady token and update its + * haStatus field appropriately. (This is why we pass in + * quorum.token() rather than NO_QUORUM.) + * + * TODO There are cases where nothing changes that may hit + * an AssertionError in setQuorumToken(). + * + * TODO This will (conditionally) trigger doLocalAbort(). + * Since we did this explicitly above, that can be do + * invocations each time we pass through here! + */ + if (log.isInfoEnabled()) + log.info("Current Token: haJournalReady=" + + journal.getHAReady() + + ", getQuorum().token()=: " + + getQuorum().token()); + + journal.setQuorumToken(getQuorum().token()); + + /** + * Dispatch Events before entering SeekConsensus! Otherwise + * the events triggered by the serviceLeave() and + * setQuorumToken will not be handled until we enter + * SeekConsensus, and then when they are received + * SeekConsensus will fail. + * + * The intention of this action is to ensure that when + * SeekConsensus is entered the service is in a "clean" + * state. + */ + processEvents(); + + /** + * The error task needs to validate that it does not need to + * re-execute and may transition to SeekConsensus. The + * specific problem is when a SERVICE_LEAVE leads to a + * QUORUM_BREAK event. When we handle the SERVICE_LEAVE the + * quorum's token has not yet been cleared. However, it must + * be cleared when the QUORUM_BREAK comes around. Since we + * do not permit re-entry into the ErrorTask in + * enterRunState(), we need to check this post-condition + * here. + * <p> + * The relevant tests are testAB_BounceFollower and + * testAB_BounceLeader and also testAB_RestartLeader and + * testAB_RestartFollower. In addition, any test where we + * fail the leader could require this 2nd pass through + * ErrorTask.doRun(). + */ + final long t1 = journal.getQuorumToken(); + + final long t2 = journal.getQuorum().token(); + + if (t1 == t2) { + + haLog.warn("Will not re-do error handler" + + ": journal.quorumToken=" + t1 + + ", quorum.token()=" + t2); + + break; + + } + + // Sleep here to avoid a tight loop? + + } // while(true) + // Seek consensus. enterRunState(new SeekConsensusTask()); return null; - - } - - } + + } // doRun() + + } // class ErrorTask /** * Task to handle a quorum break event. Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA2JournalServer.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -37,7 +37,6 @@ import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.journal.IRootBlockView; -import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.Quorum; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; @@ -171,202 +170,6 @@ } /** - * 2 services start, quorum meets then we bounce the zookeeper connection - * for the follower and verify that the quorum meets again. - */ - public void testStartAB_BounceFollower() throws Exception { - - final HAGlue serverA = startA(); - final HAGlue serverB = startB(); - - final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - - doNSSStatusRequest(serverA); - doNSSStatusRequest(serverB); - - // Await initial commit point (KB create). - awaitCommitCounter(1L, serverA, serverB); - - // Await [A] up and running as leader. - assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); - - // Await [B] up and running as follower. - assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); - - // Verify self-reporting by RMI in their respective roles. - awaitHAStatus(serverA, HAStatusEnum.Leader); - awaitHAStatus(serverB, HAStatusEnum.Follower); - - // Verify binary equality on the journal files. - assertDigestsEquals(new HAGlue[] { serverA, serverB }); - - if (log.isInfoEnabled()) { - log.info("Zookeeper before quorum break:\n" + dumpZoo()); - } - - /* - * Bounce the follower. Verify quorum meets again and that we can read - * on all services. - */ - { - - final HAGlue leader = quorum.getClient().getLeader(token1); - -// final UUID leaderId1 = leader.getServiceId(); - - if (leader.equals(serverA)) { - - ((HAGlueTest) serverB).bounceZookeeperConnection().get(); - - } else { - - ((HAGlueTest) serverA).bounceZookeeperConnection().get(); - - } - - // Okay, is the problem that the quorum doesn't break? - // assertFalse(quorum.isQuorumMet()); - - // Right so the Quorum is not met, but the follower deosn't seem to know it's broken - - // Wait for the quorum to break and then meet again. - final long token2 = awaitNextQuorumMeet(token1); - - if (log.isInfoEnabled()) { - log.info("Zookeeper after quorum meet:\n" + dumpZoo()); - } - - /* - * Bouncing the connection broke the quorun, so verify that the - * quorum token was advanced. - */ - assertEquals(token1 + 1, token2); - - // The leader MAY have changed (since the quorum broke). - final HAGlue leader2 = quorum.getClient().getLeader(token2); - - // Verify leader self-reports in new role. - awaitHAStatus(leader2, HAStatusEnum.Leader); - -// final UUID leaderId2 = leader2.getServiceId(); -// -// assertFalse(leaderId1.equals(leaderId2)); - - /* - * Verify we can read on the KB on both nodes. - * - * Note: It is important to test the reads for the first commit on - * both the leader and the follower. - */ - for (HAGlue service : new HAGlue[] { serverA, serverB }) { - - final RemoteRepository repo = getRemoteRepository(service); - - // Should be empty. - assertEquals( - 0L, - countResults(repo.prepareTupleQuery( - "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); - - } - - } - - } - - /** - * 2 services start, quorum meets then we bounce the zookeeper connection - * for the leader and verify that the quorum meets again. - */ - public void testStartAB_BounceLeader() throws Exception { - - final HAGlue serverA = startA(); - final HAGlue serverB = startB(); - - final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, - TimeUnit.MILLISECONDS); - - doNSSStatusRequest(serverA); - doNSSStatusRequest(serverB); - - // Await initial commit point (KB create). - awaitCommitCounter(1L, serverA, serverB); - - // Await [A] up and running as leader. - assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); - - // Await [B] up and running as follower. - assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); - - // Verify self-reports in role. - awaitHAStatus(serverA, HAStatusEnum.Leader); - awaitHAStatus(serverB, HAStatusEnum.Follower); - - // Verify binary equality on the journal files. - assertDigestsEquals(new HAGlue[] { serverA, serverB }); - - if (log.isInfoEnabled()) { - log.info("Zookeeper before quorum meet:\n" + dumpZoo()); - } - - /* - * Bounce the leader. Verify that the service that was the follower is - * now the leader. Verify that the quorum meets. - */ - { - - final HAGlue leader = quorum.getClient().getLeader(token1); - -// final UUID leaderId1 = leader.getServiceId(); - - ((HAGlueTest)leader).bounceZookeeperConnection().get(); - - // Wait for the quorum to break and then meet again. - final long token2 = awaitNextQuorumMeet(token1); - - if (log.isInfoEnabled()) { - log.info("Zookeeper after quorum meet:\n" + dumpZoo()); - } - - /* - * Bouncing the connection broke the quorum, so verify that the - * quorum token was advanced. - */ - assertEquals(token1 + 1, token2); - - // The leader MAY have changed. - final HAGlue leader2 = quorum.getClient().getLeader(token2); - -// final UUID leaderId2 = leader2.getServiceId(); -// -// assertFalse(leaderId1.equals(leaderId2)); - - // Verify leader self-reports in new role. - awaitHAStatus(leader2, HAStatusEnum.Leader); - - /* - * Verify we can read on the KB on both nodes. - * - * Note: It is important to test the reads for the first commit on - * both the leader and the follower. - */ - for (HAGlue service : new HAGlue[] { serverA, serverB }) { - - final RemoteRepository repo = getRemoteRepository(service); - - // Should be empty. - assertEquals( - 0L, - countResults(repo.prepareTupleQuery( - "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); - - } - - } - - } - - /** * 2 services start, quorum meets then we restart the follower and verify * that the quorum meets again. */ Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-08 14:04:53 UTC (rev 7266) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-08 16:07:26 UTC (rev 7267) @@ -41,7 +41,10 @@ import com.bigdata.journal.AbstractTask; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.journal.jini.ha.HAJournalTest.SpuriousTestException; +import com.bigdata.quorum.zk.ZKQuorum; +import com.bigdata.quorum.zk.ZKQuorumImpl; import com.bigdata.rdf.sail.webapp.client.HttpException; +import com.bigdata.rdf.sail.webapp.client.RemoteRepository; import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.InnerCause; @@ -569,4 +572,210 @@ } + /** + * 2 services start, quorum meets then we bounce the zookeeper connection + * for the follower and verify that the quorum meets again. + * <p> + * Note: Bouncing the ZK client connection causes the reflected state + * maintained by the {@link ZKQuorumImpl} to be out of sync with the state + * in zookeeper. Not only can some events be lost, but none of the events + * that correspond to the elimination of the ephemeral znodes for this + * service will be observed. Handling this correctly requires special + * consideration. + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/718" > + * HAJournalServer needs to handle ZK client connection loss </a> + */ + public void testStartAB_BounceFollower() throws Exception { + + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reporting by RMI in their respective roles. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before quorum break:\n" + dumpZoo()); + } + + /* + * Bounce the follower. Verify quorum meets again and that we can read + * on all services. + */ + { + + final HAGlue leader = quorum.getClient().getLeader(token1); + +// final UUID leaderId1 = leader.getServiceId(); + + if (leader.equals(serverA)) { + + ((HAGlueTest) serverB).bounceZookeeperConnection().get(); + + } else { + + ((HAGlueTest) serverA).bounceZookeeperConnection().get(); + + } + Thread.sleep(100000); + // Okay, is the problem that the quorum doesn't break? + // assertFalse(quorum.isQuorumMet()); + + // Right so the Quorum is not met, but the follower deosn't seem to know it's broken + + // Wait for the quorum to break and then meet again. + final long token2 = awaitNextQuorumMeet(token1); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum meet:\n" + dumpZoo()); + } + + /* + * Bouncing the connection broke the quorun, so verify that the + * quorum token was advanced. + */ + assertEquals(token1 + 1, token2); + + // The leader MAY have changed (since the quorum broke). + final HAGlue leader2 = quorum.getClient().getLeader(token2); + + // Verify leader self-reports in new role. + awaitHAStatus(leader2, HAStatusEnum.Leader); + +// final UUID leaderId2 = leader2.getServiceId(); +// +// assertFalse(leaderId1.equals(leaderId2)); + + /* + * Verify we can read on the KB on both nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB }) { + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + + /** + * 2 services start, quorum meets then we bounce the zookeeper connection + * for the leader and verify that the quorum meets again. + */ + public void testStartAB_BounceLeader() throws Exception { + + final HAGlue serverA = startA(); + final HAGlue serverB = startB(); + + final long token1 = quorum.awaitQuorum(awaitQuorumTimeout, + TimeUnit.MILLISECONDS); + + doNSSStatusRequest(serverA); + doNSSStatusRequest(serverB); + + // Await initial commit point (KB create). + awaitCommitCounter(1L, serverA, serverB); + + // Await [A] up and running as leader. + assertEquals(HAStatusEnum.Leader, awaitNSSAndHAReady(serverA)); + + // Await [B] up and running as follower. + assertEquals(HAStatusEnum.Follower, awaitNSSAndHAReady(serverB)); + + // Verify self-reports in role. + awaitHAStatus(serverA, HAStatusEnum.Leader); + awaitHAStatus(serverB, HAStatusEnum.Follower); + + // Verify binary equality on the journal files. + assertDigestsEquals(new HAGlue[] { serverA, serverB }); + + if (log.isInfoEnabled()) { + log.info("Zookeeper before quorum meet:\n" + dumpZoo()); + } + + /* + * Bounce the leader. Verify that the service that was the follower is + * now the leader. Verify that the quorum meets. + */ + { + + final HAGlue leader = quorum.getClient().getLeader(token1); + +// final UUID leaderId1 = leader.getServiceId(); + + ((HAGlueTest)leader).bounceZookeeperConnection().get(); + + // Wait for the quorum to break and then meet again. + final long token2 = awaitNextQuorumMeet(token1); + + if (log.isInfoEnabled()) { + log.info("Zookeeper after quorum meet:\n" + dumpZoo()); + } + + /* + * Bouncing the connection broke the quorum, so verify that the + * quorum token was advanced. + */ + assertEquals(token1 + 1, token2); + + // The leader MAY have changed. + final HAGlue leader2 = quorum.getClient().getLeader(token2); + +// final UUID leaderId2 = leader2.getServiceId(); +// +// assertFalse(leaderId1.equals(leaderId2)); + + // Verify leader self-reports in new role. + awaitHAStatus(leader2, HAStatusEnum.Leader); + + /* + * Verify we can read on the KB on both nodes. + * + * Note: It is important to test the reads for the first commit on + * both the leader and the follower. + */ + for (HAGlue service : new HAGlue[] { serverA, serverB }) { + + final RemoteRepository repo = getRemoteRepository(service); + + // Should be empty. + assertEquals( + 0L, + countResults(repo.prepareTupleQuery( + "SELECT * {?a ?b ?c} LIMIT 10").evaluate())); + + } + + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 14:05:00
|
Revision: 7266 http://bigdata.svn.sourceforge.net/bigdata/?rev=7266&view=rev Author: thompsonbry Date: 2013-08-08 14:04:53 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Some clarification/cleanup of informative stack traces and also removed the logic that would allow the re-entry of the ErrorTask since that would cause the interrupt of the task and could interrupt the abort() or actor.serviceLeave(), neither of which would be good. We are looking at a post-condition for the ErrorTask instead to decide if it needs to re-execute. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 13:34:17 UTC (rev 7265) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 14:04:53 UTC (rev 7266) @@ -2701,15 +2701,15 @@ * {@link ICommitRecord} from the root blocks of the store. */// TODO Could merge with doLocalAbort(). private void _abort() { - log.warn("ABORT",new StackInfoReport("ABORT")); + final WriteLock lock = _fieldReadWriteLock.writeLock(); lock.lock(); try { - if (log.isInfoEnabled()) - log.info("start");//, new RuntimeException()); + if (log.isInfoEnabled()) + log.info("ABORT", new StackInfoReport("ABORT")); // Clear gatherFuture.set(null/* newValue */); Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 13:34:17 UTC (rev 7265) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 14:04:53 UTC (rev 7266) @@ -1093,7 +1093,8 @@ if (InnerCause.isInnerCause(t, InterruptedException.class)) { // Note: This is a normal exit condition. - log.info("Interrupted."); + if (log.isInfoEnabled()) + log.info("Interrupted: " + runState); // Done. return null; @@ -1320,44 +1321,22 @@ throw new IllegalArgumentException(); synchronized (runStateRef) { - - /* - * This check appears to cause some transitions to be lost. - * - * FIXME ERROR HANDLING: The specific problem is that a service - * leave can cause a quorum break. This is essentially an - * escalation of the error condition and does require us to at - * least clear the quorum token on the journal (the HAReadyToken - * would be cleared by an uninterrupted service leave, the the - * journal quorum token would only be cleared by a quorum - * break.) - */ + if (runStateTask.runState .equals(lastSubmittedRunStateRef.get())) { - + /* - * FIXME ERROR HANDLING: Checking if the token has changed - * (per the note above) fixes some test scenarios - * (testAB_BounceFollower) but breaks others - * (testAB_BounceLeader and testAB_RestartLeader - * occasionally fails). + * Do not re-enter the same run state. + * + * Note: This was added to prevent re-entry of the + * ErrorState when we are already in the ErrorState. */ - if (journal.getQuorumToken() == journal.getQuorum().token()) { + + haLog.warn("Will not reenter active run state: " + + runStateTask.runState); - haLog.warn("Will not reenter active run state: " - + runStateTask.runState + ", currentToken: " - + journal.getQuorumToken() + ", newToken: " - + journal.getQuorum().token()); + return null; - return null; - - } else { - - haLog.warn("Re-entering current state since token has changed: " - + runStateTask.runState); - - } - } final FutureTask<Void> ft = new FutureTaskMon<Void>( @@ -1603,15 +1582,20 @@ * Transition to {@link RunStateEnum#Error}. */ private class EnterErrorStateTask implements Callable<Void> { - - protected EnterErrorStateTask() { + + protected EnterErrorStateTask() { + /* + * Note: This tells us the code path from which submitted the + * task to enter the ERROR state. + */ log.warn("", new StackInfoReport()); - } - + } + public Void call() throws Exception { enterRunState(new ErrorTask()); return null; } + } /** @@ -1681,11 +1665,25 @@ protected ErrorTask() { super(RunStateEnum.Error); + // Note: This stack trace does not have any useful info. +// log.warn("", new StackInfoReport()); - log.warn("", new StackInfoReport()); - } + /** + * FIXME The error task may need to validate that it does not need + * to re-execute and may transition to SeekConsensus. The specific + * problem is when doServiceLeave() leads to a QUORUM_BREAK event. + * <p> + * The relevant tests are testAB_BounceFollower, + * (testAB_BounceLeader, testAB_RestartLeader. + */ +// if (journal.getQuorumToken() == journal.getQuorum().token()) { +// +// haLog.warn("Will not reenter active run state: " +// + runStateTask.runState + ", currentToken: " +// + journal.getQuorumToken() + ", newToken: " +// + journal.getQuorum().token()); @Override public Void doRun() throws Exception { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 13:34:23
|
Revision: 7265 http://bigdata.svn.sourceforge.net/bigdata/?rev=7265&view=rev Author: thompsonbry Date: 2013-08-08 13:34:17 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Added the UUID version of the ServiceID to the log @ INFO message so we can correlate the UUID with the other messages in the log. Modified Paths: -------------- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-08-08 13:25:18 UTC (rev 7264) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-08-08 13:34:17 UTC (rev 7265) @@ -1264,7 +1264,8 @@ throw new IllegalArgumentException(); if (log.isInfoEnabled()) - log.info("serviceID=" + serviceID); + log.info("serviceID=" + serviceID + ", serviceUUID=" + + JiniUtil.serviceID2UUID(serviceID)); if (this.serviceID != null && !this.serviceID.equals(serviceID)) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 13:25:25
|
Revision: 7264 http://bigdata.svn.sourceforge.net/bigdata/?rev=7264&view=rev Author: thompsonbry Date: 2013-08-08 13:25:18 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Modified to use StackInfoReport rather than RuntimeException. Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-08-08 13:22:20 UTC (rev 7263) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-08-08 13:25:18 UTC (rev 7264) @@ -47,6 +47,7 @@ import com.bigdata.journal.RootBlockView; import com.bigdata.journal.StoreTypeEnum; import com.bigdata.rawstore.Bytes; +import com.bigdata.util.StackInfoReport; /** * Wrapper class to handle process log creation and output for HA. @@ -260,7 +261,7 @@ throw new IllegalStateException(); if (haLog.isInfoEnabled()) - haLog.info("rootBlock=" + rootBlock, new RuntimeException()); + haLog.info("rootBlock=" + rootBlock, new StackInfoReport()); m_rootBlock = rootBlock; @@ -634,7 +635,7 @@ if (isCommitted) return; // Do not remove a sealed HALog file! if (haLog.isInfoEnabled()) - haLog.info("Will remove: " + m_state.m_haLogFile, new RuntimeException()); + haLog.info("Will remove: " + m_state.m_haLogFile, new StackInfoReport()); if (m_state.m_haLogFile.exists() && !m_state.m_haLogFile.delete()) { Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 13:22:20 UTC (rev 7263) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 13:25:18 UTC (rev 7264) @@ -172,6 +172,7 @@ import com.bigdata.util.ChecksumUtility; import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.NT; +import com.bigdata.util.StackInfoReport; /** * <p> @@ -2700,7 +2701,7 @@ * {@link ICommitRecord} from the root blocks of the store. */// TODO Could merge with doLocalAbort(). private void _abort() { - log.warn("ABORT",new RuntimeException("ABORT")); + log.warn("ABORT",new StackInfoReport("ABORT")); final WriteLock lock = _fieldReadWriteLock.writeLock(); lock.lock(); Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 13:22:20 UTC (rev 7263) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 13:25:18 UTC (rev 7264) @@ -104,6 +104,7 @@ import com.bigdata.service.jini.FakeLifeCycle; import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.InnerCause; +import com.bigdata.util.StackInfoReport; import com.bigdata.util.concurrent.LatchedExecutor; import com.bigdata.util.concurrent.MonitoredFutureTask; import com.bigdata.util.config.NicUtil; @@ -1055,7 +1056,7 @@ } haLog.warn("runState=" + runState + ", oldRunState=" + oldRunState - + ", serviceName=" + server.getServiceName(), new RuntimeException()); + + ", serviceName=" + server.getServiceName(), new StackInfoReport()); } @@ -1604,7 +1605,7 @@ private class EnterErrorStateTask implements Callable<Void> { protected EnterErrorStateTask() { - log.warn("", new RuntimeException()); + log.warn("", new StackInfoReport()); } public Void call() throws Exception { @@ -1681,7 +1682,7 @@ super(RunStateEnum.Error); - log.warn("", new RuntimeException()); + log.warn("", new StackInfoReport()); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 13:22:29
|
Revision: 7263 http://bigdata.svn.sourceforge.net/bigdata/?rev=7263&view=rev Author: thompsonbry Date: 2013-08-08 13:22:20 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Moved class to proper location Added Paths: ----------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java Removed Paths: ------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java Copied: branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java (from rev 7262, branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java) =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java (rev 0) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/StackInfoReport.java 2013-08-08 13:22:20 UTC (rev 7263) @@ -0,0 +1,67 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.util; + +/** + * An exception class whose sole purpose is to provide information during + * debugging concerning the context in which some method is invoked. Instances + * of this exception ARE NOT errors. They are only informative and are used in + * patterns such as: + * <pre> + * if(log.isInfoEnabled()) + * log.info(new StackInfoReport()) + * </pre> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class StackInfoReport extends RuntimeException { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public StackInfoReport() { + super(); + } + + public StackInfoReport(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public StackInfoReport(String message, Throwable cause) { + super(message, cause); + } + + public StackInfoReport(String message) { + super(message); + } + + public StackInfoReport(Throwable cause) { + super(cause); + } + +} Deleted: branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java 2013-08-08 13:20:40 UTC (rev 7262) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java 2013-08-08 13:22:20 UTC (rev 7263) @@ -1,67 +0,0 @@ -/* - -Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -*/ -package com.bigdata.util.concurrent; - -/** - * An exception class whose sole purpose is to provide information during - * debugging concerning the context in which some method is invoked. Instances - * of this exception ARE NOT errors. They are only informative and are used in - * patterns such as: - * <pre> - * if(log.isInfoEnabled()) - * log.info(new StackInfoReport()) - * </pre> - * - * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - */ -public class StackInfoReport extends RuntimeException { - - /** - * - */ - private static final long serialVersionUID = 1L; - - public StackInfoReport() { - super(); - } - - public StackInfoReport(String message, Throwable cause, - boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } - - public StackInfoReport(String message, Throwable cause) { - super(message, cause); - } - - public StackInfoReport(String message) { - super(message); - } - - public StackInfoReport(Throwable cause) { - super(cause); - } - -} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 13:20:47
|
Revision: 7262 http://bigdata.svn.sourceforge.net/bigdata/?rev=7262&view=rev Author: thompsonbry Date: 2013-08-08 13:20:40 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Added exception whose sole purpose is to provide information during debugging rather than to provide exceptional control logic. Added Paths: ----------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java Added: branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java (rev 0) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/util/concurrent/StackInfoReport.java 2013-08-08 13:20:40 UTC (rev 7262) @@ -0,0 +1,67 @@ +/* + +Copyright (C) SYSTAP, LLC 2006-2008. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +*/ +package com.bigdata.util.concurrent; + +/** + * An exception class whose sole purpose is to provide information during + * debugging concerning the context in which some method is invoked. Instances + * of this exception ARE NOT errors. They are only informative and are used in + * patterns such as: + * <pre> + * if(log.isInfoEnabled()) + * log.info(new StackInfoReport()) + * </pre> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class StackInfoReport extends RuntimeException { + + /** + * + */ + private static final long serialVersionUID = 1L; + + public StackInfoReport() { + super(); + } + + public StackInfoReport(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public StackInfoReport(String message, Throwable cause) { + super(message, cause); + } + + public StackInfoReport(String message) { + super(message); + } + + public StackInfoReport(Throwable cause) { + super(cause); + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-08 13:02:37
|
Revision: 7261 http://bigdata.svn.sourceforge.net/bigdata/?rev=7261&view=rev Author: thompsonbry Date: 2013-08-08 13:02:28 +0000 (Thu, 08 Aug 2013) Log Message: ----------- Reconciled delta with Martyn Modified Paths: -------------- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-07 20:53:29 UTC (rev 7260) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-08-08 13:02:28 UTC (rev 7261) @@ -5341,7 +5341,7 @@ protected void setQuorumToken(final long newValue) { - log.warn("current: " + quorumToken + ", new: " + newValue); + if(haLog.isInfoEnabled()) log.info("current: " + quorumToken + ", new: " + newValue); // Protect for potential NPE if (quorum == null) Modified: branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java =================================================================== --- branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java 2013-08-07 20:53:29 UTC (rev 7260) +++ branches/READ_CACHE2/bigdata/src/java/com/bigdata/quorum/QuorumTokenTransitions.java 2013-08-08 13:02:28 UTC (rev 7261) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ package com.bigdata.quorum; import com.bigdata.ha.HAGlue; @@ -116,37 +139,56 @@ // TODO Document rationale for each assertion. private void checkStates() { - if (wasJoined && wasMet && currentHaReady > currentQuorumToken) - throw new AssertionError("haReady greater than current token"); - - if (wasMet && isMet && newQuorumToken < currentQuorumToken) { - throw new AssertionError("next token less than current token"); - } - - if (wasMet && isMet && newQuorumToken != currentQuorumToken) { - throw new AssertionError("New quorum token without quorum break first, current: " + currentQuorumToken + ", new: " + newQuorumToken); - } - - if (didMeet && didJoinMetQuorum) { + + if (wasJoined && wasMet && currentHaReady > currentQuorumToken) { + + throw new AssertionError("haReady greater than current token"); + + } + + if (wasMet && isMet && newQuorumToken < currentQuorumToken) { + + throw new AssertionError("next token less than current token"); + + } + + if (wasMet && isMet && newQuorumToken != currentQuorumToken) { + /* - * It is not possible to both join with an existing quorum and - * to be one of the services that caused a quorum to meet. These + * This service observed a quorum token change without observing the + * quorum break. The service CAN NOT go directly into the new + * quorum. It MUST first do a serviceLeave(). + */ + + throw new AssertionError( + "New quorum token without quorum break first, current: " + + currentQuorumToken + ", new: " + newQuorumToken); + + } + + if (didMeet && didJoinMetQuorum) { + + /* + * It is not possible to both join with an existing quorum and to be + * one of the services that caused a quorum to meet. These * conditions are exclusive. */ - throw new AssertionError("didMeet && didJoinMetQuorum"); + + throw new AssertionError("didMeet && didJoinMetQuorum"); + + } + + /** + * This is a bit odd, it is okay, but probably didLeaveMetQuorum will + * only be true iff isJoined + */ + // if (didBreak && didLeaveMetQuorum) { + // throw new AssertionError("didBreak && didLeaveMetQuorum"); + // } + // if (didLeaveMetQuorum && !isJoined) { // TODO Why not valid? + // throw new AssertionError("didLeaveMetQuorum && !isJoined"); + // } } - - /** - * This is a bit odd, it is okay, but probably didLeaveMetQuorum will - * only be true iff isJoined - */ -// if (didBreak && didLeaveMetQuorum) { -// throw new AssertionError("didBreak && didLeaveMetQuorum"); -// } -// if (didLeaveMetQuorum && !isJoined) { // TODO Why not valid? -// throw new AssertionError("didLeaveMetQuorum && !isJoined"); -// } - } /* * Methods for making decisions in the ctor. They are NOT for public Modified: branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-07 20:53:29 UTC (rev 7260) +++ branches/READ_CACHE2/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-08-08 13:02:28 UTC (rev 7261) @@ -1320,31 +1320,43 @@ synchronized (runStateRef) { - - /* - * This check appears to cause some transitions to be lost. - * - * TODO: It would seem that a more precise check is required. - */ + /* + * This check appears to cause some transitions to be lost. + * + * FIXME ERROR HANDLING: The specific problem is that a service + * leave can cause a quorum break. This is essentially an + * escalation of the error condition and does require us to at + * least clear the quorum token on the journal (the HAReadyToken + * would be cleared by an uninterrupted service leave, the the + * journal quorum token would only be cleared by a quorum + * break.) + */ if (runStateTask.runState .equals(lastSubmittedRunStateRef.get())) { - - /* - * FIXME: Checking if the token has changed fixes some test - * scenarios but breaks others. - */ - if (journal.getQuorumToken() == journal.getQuorum().token()) { - haLog.warn("Will not reenter active run state: " - + runStateTask.runState - + ", currentToken: " + journal.getQuorumToken() - + ", newToken: " + journal.getQuorum().token() - ); - - return null; - } else { - haLog.warn("Re-entering current state since token has changed: " + runStateTask.runState); - } + /* + * FIXME ERROR HANDLING: Checking if the token has changed + * (per the note above) fixes some test scenarios + * (testAB_BounceFollower) but breaks others + * (testAB_BounceLeader and testAB_RestartLeader + * occasionally fails). + */ + if (journal.getQuorumToken() == journal.getQuorum().token()) { + + haLog.warn("Will not reenter active run state: " + + runStateTask.runState + ", currentToken: " + + journal.getQuorumToken() + ", newToken: " + + journal.getQuorum().token()); + + return null; + + } else { + + haLog.warn("Re-entering current state since token has changed: " + + runStateTask.runState); + + } + } final FutureTask<Void> ft = new FutureTaskMon<Void>( Modified: branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java =================================================================== --- branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-07 20:53:29 UTC (rev 7260) +++ branches/READ_CACHE2/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java 2013-08-08 13:02:28 UTC (rev 7261) @@ -515,6 +515,24 @@ // Simple transaction. simpleTransaction(); +// try { +// // Simple transaction. +// simpleTransaction(); +// fail("Expecting failed transaction"); +// } catch(HttpException ex) { +// if (!ex.getMessage().contains( +// SpuriousTestException.class.getName())) { +// /* +// * Wrong inner cause. +// * +// * Note: The stack trace of the local exception does not include +// * the remote stack trace. The cause is formatted into the HTTP +// * response body. +// */ +// fail("Expecting " + ClocksNotSynchronizedException.class, t); +// } +// } + // Verify quorum is unchanged. assertEquals(token, quorum.token()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 20:53:42
|
Revision: 7260 http://bigdata.svn.sourceforge.net/bigdata/?rev=7260&view=rev Author: thompsonbry Date: 2013-08-07 20:53:29 +0000 (Wed, 07 Aug 2013) Log Message: ----------- I have added setHeader(name:String,value:String) to IPreparedQuery. This makes it possible to write unit tests of CONNEG by the NSS. There is also setAcceptHeader() and getHeader(name:String):String. I have restored the lost CONNEG coverage for at lease some of the various query types (ASK and SELECT). Note: This change set has some impact on AbstractHAJournalServerTestCase that will need to be reconciled into the READ_CACHE branch. Note: We do not have a parser for the JSON results format in openrdf 2.6.x. Therefore the CONNEG test case for the JSON SPARQL results format is disabled since it otherwise fails to locate the parser to interpret the results. This is documented in the test case and linked to the appropriate tickets. The NSS test suite is passing. The AST Eval test suite is passing (there is a dependency on the REST API for SPARQL federated query). TODO I have not done this for CONSTRUCT or DESCRIBE yet. TODO There are some recent tickets related to the REST API that have patches that will need to be reconciled against this update. See #701, 694, and #696. See https://sourceforge.net/apps/trac/bigdata/ticket/704 (ASK does not support JSON) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -31,6 +31,7 @@ import java.io.IOException; import java.security.DigestException; import java.security.NoSuchAlgorithmException; +import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -165,13 +166,18 @@ request = newRequest(urlString.toString(), opts.method); - if (opts.acceptHeader != null) { - - request.addHeader("Accept", opts.acceptHeader); - - if (log.isDebugEnabled()) - log.debug("Accept: " + opts.acceptHeader); - + if (opts.requestHeaders != null) { + + for (Map.Entry<String, String> e : opts.requestHeaders + .entrySet()) { + + request.addHeader(e.getKey(), e.getValue()); + + if (log.isDebugEnabled()) + log.debug(e.getKey() + ": " + e.getValue()); + + } + } if (opts.entity != null) { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceCallImpl.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -106,11 +106,11 @@ if (acceptHeader != null) { - o.acceptHeader = acceptHeader; + o.setAcceptHeader(acceptHeader); } else { - o.acceptHeader = ConnectOptions.DEFAULT_SOLUTIONS_ACCEPT_HEADER; + o.setAcceptHeader(ConnectOptions.DEFAULT_SOLUTIONS_ACCEPT_HEADER); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/ConnectOptions.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -56,12 +56,12 @@ /** The HTTP method (GET, POST, etc). */ public String method = "POST"; +// /** +// * The accept header (NO DEFAULT). +// */ +// public String acceptHeader = null; + /** - * The accept header (NO DEFAULT). - */ - public String acceptHeader = null; - - /** * Used for {@link RDFFormat} responses. */ public static final String DEFAULT_GRAPH_ACCEPT_HEADER; @@ -125,10 +125,24 @@ } - /** Request parameters to be formatted as URL query parameters. */ + /** + * Request parameters to be formatted as URL query parameters. + * + * TODO Should be private or package private + */ public Map<String, String[]> requestParams; - /** Request entity. */ + /** + * Optional request headers. + * + * TODO Should be private or package private + */ + public Map<String, String> requestHeaders; + + /** Request entity. + * + * TODO Should be private or package private. + */ public HttpEntity entity = null; // /** @@ -187,6 +201,37 @@ } + public void setHeader(final String name, final String val) { + + if (requestHeaders == null) { + requestHeaders = new LinkedHashMap<String, String>(); + } + + requestHeaders.put(name, val); + + } + + public void setAcceptHeader(final String value) { + + setHeader("Accept", value); + + } + + public String getAcceptHeader() { + + return getHeader("Accept"); + + } + + public String getHeader(final String name) { + + if (requestHeaders == null) + return null; + + return requestHeaders.get(name); + + } + /** * Add any URL query parameters. */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/IPreparedQuery.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -42,4 +42,32 @@ */ UUID getQueryId(); + /** + * Override the value of the specified HTTP header. + * + * @param name + * The name of the HTTP header. + * @param value + * The value to be used. + */ + void setHeader(String name, String value); + + /** + * Convenience method to set the <code>Accept</code> header. + * + * @param value + * The value to be used. + */ + void setAcceptHeader(String value); + + /** + * Return the value of the specified HTTP header. + * + * @param name + * The name of the HTTP header. + * + * @return The value -or- <code>null</code> if the header is not defined. + */ + String getHeader(String name); + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -327,7 +327,7 @@ HttpResponse response = null; - opts.acceptHeader = ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER; + opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER); checkResponseCode(response = doConnect(opts)); @@ -574,7 +574,7 @@ HttpResponse resp = null; try { - opts.acceptHeader = ConnectOptions.MIME_APPLICATION_XML; + opts.setAcceptHeader(ConnectOptions.MIME_APPLICATION_XML); checkResponseCode(resp = doConnect(opts)); @@ -627,7 +627,7 @@ HttpResponse resp = null; try { - opts.acceptHeader = ConnectOptions.MIME_APPLICATION_XML; + opts.setAcceptHeader(ConnectOptions.MIME_APPLICATION_XML); checkResponseCode(resp = doConnect(opts)); @@ -686,7 +686,7 @@ HttpResponse response = null; try { - opts.acceptHeader = ConnectOptions.MIME_APPLICATION_XML; + opts.setAcceptHeader(ConnectOptions.MIME_APPLICATION_XML); checkResponseCode(response = doConnect(opts)); @@ -766,7 +766,7 @@ HttpResponse response = null; try { - opts.acceptHeader = ConnectOptions.MIME_APPLICATION_XML; + opts.setAcceptHeader(ConnectOptions.MIME_APPLICATION_XML); checkResponseCode(response = doConnect(opts)); @@ -854,7 +854,7 @@ HttpResponse response = null; try { - opts.acceptHeader = ConnectOptions.MIME_APPLICATION_XML; + opts.setAcceptHeader(ConnectOptions.MIME_APPLICATION_XML); checkResponseCode(response = doConnect(opts)); @@ -880,7 +880,7 @@ * <p> * Right now, the only metadata is the query ID. */ - protected abstract class Query implements IPreparedOperation { + protected abstract class Query implements IPreparedOperation, IPreparedQuery { protected final ConnectOptions opts; @@ -920,18 +920,16 @@ this.query = query; this.update = update; - /* - * Note: This sets various defaults. - */ - setupConnectOptions(); } + @Override final public UUID getQueryId() { return id; } + @Override public final boolean isUpdate() { return update; @@ -957,11 +955,30 @@ if (id != null) opts.addRequestParam("queryId", getQueryId().toString()); - -// return opts; } + + @Override + public void setAcceptHeader(final String value) { + + opts.setAcceptHeader(value); + + } + + @Override + public void setHeader(final String name, final String value) { + opts.setHeader(name, value); + + } + + @Override + public String getHeader(final String name) { + + return opts.getHeader(name); + + } + } private final class TupleQuery extends Query implements IPreparedTupleQuery { @@ -972,15 +989,24 @@ super(opts, id, query); } - + + @Override + protected void setupConnectOptions() { + + super.setupConnectOptions(); + + if (opts.getAcceptHeader() == null) + opts.setAcceptHeader(ConnectOptions.DEFAULT_SOLUTIONS_ACCEPT_HEADER); + + } + public TupleQueryResult evaluate() throws Exception { HttpResponse response = null; // try { - - if (opts.acceptHeader == null) - opts.acceptHeader = ConnectOptions.DEFAULT_SOLUTIONS_ACCEPT_HEADER; + setupConnectOptions(); + checkResponseCode(response = doConnect(opts)); return tupleResults(response); @@ -1014,13 +1040,22 @@ } @Override + protected void setupConnectOptions() { + + super.setupConnectOptions(); + + if (opts.getAcceptHeader() == null) + opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER); + + } + + @Override public GraphQueryResult evaluate() throws Exception { HttpResponse response = null; - if (opts.acceptHeader == null) - opts.acceptHeader = ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER; - + setupConnectOptions(); + checkResponseCode(response = doConnect(opts)); return graphResults(response); @@ -1039,17 +1074,25 @@ } + @Override + protected void setupConnectOptions() { + + super.setupConnectOptions(); + + if (opts.getAcceptHeader() == null) + opts.setAcceptHeader(ConnectOptions.DEFAULT_BOOLEAN_ACCEPT_HEADER); + + } + + @Override public boolean evaluate() throws Exception { HttpResponse response = null; try { -// final ConnectOptions opts = getConnectOpts(); + setupConnectOptions(); - if (opts.acceptHeader == null) - opts.acceptHeader = ConnectOptions.DEFAULT_BOOLEAN_ACCEPT_HEADER; - checkResponseCode(response = doConnect(opts)); return booleanResults(response); @@ -1088,9 +1131,9 @@ HttpResponse response = null; try { - -// final ConnectOptions opts = getConnectOpts(); + setupConnectOptions(); + // Note: No response body is expected. checkResponseCode(response = doConnect(opts)); @@ -1334,14 +1377,19 @@ try { request = newRequest(urlString.toString(), opts.method); - - if (opts.acceptHeader != null) { - - request.addHeader("Accept", opts.acceptHeader); - - if (log.isDebugEnabled()) - log.debug("Accept: " + opts.acceptHeader); - + + if (opts.requestHeaders != null) { + + for (Map.Entry<String, String> e : opts.requestHeaders + .entrySet()) { + + request.addHeader(e.getKey(), e.getValue()); + + if (log.isDebugEnabled()) + log.debug(e.getKey() + ": " + e.getValue()); + + } + } // // conn = doConnect(urlString.toString(), opts.method); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepositoryManager.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -174,7 +174,7 @@ HttpResponse response = null; GraphQueryResult result = null; - opts.acceptHeader = ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER; + opts.setAcceptHeader(ConnectOptions.DEFAULT_GRAPH_ACCEPT_HEADER); try { // check response in try. @@ -314,7 +314,7 @@ HttpResponse response = null; - opts.acceptHeader = ConnectOptions.MIME_PROPERTIES_XML; + opts.setAcceptHeader(ConnectOptions.MIME_PROPERTIES_XML); try { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2013-08-07 18:37:05 UTC (rev 7259) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient.java 2013-08-07 20:53:29 UTC (rev 7260) @@ -3,7 +3,6 @@ import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import org.openrdf.model.Graph; @@ -18,6 +17,8 @@ import org.openrdf.model.impl.URIImpl; import org.openrdf.model.vocabulary.RDF; import org.openrdf.model.vocabulary.RDFS; +import org.openrdf.query.resultio.BooleanQueryResultFormat; +import org.openrdf.query.resultio.TupleQueryResultFormat; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFWriter; import org.openrdf.rio.RDFWriterFactory; @@ -95,27 +96,51 @@ } /** - * "ASK" query with an empty KB. + * "ASK" query with an empty KB and CONNEG for various known/accepted MIME + * Types. */ public void test_ASK() throws Exception { final String queryStr = "ASK where {?s ?p ?o}"; // final RemoteRepository repo = new RemoteRepository(m_serviceURL); - final IPreparedBooleanQuery query = m_repo.prepareBooleanQuery(queryStr); - assertEquals(false, query.evaluate()); - -// final QueryOptions opts = new QueryOptions(); -// opts.serviceURL = m_serviceURL; -// opts.queryStr = queryStr; -// opts.method = "GET"; -// -// opts.acceptHeader = BooleanQueryResultFormat.SPARQL.getDefaultMIMEType(); -// assertEquals(false, askResults(doSparqlQuery(opts, requestPath))); -// -// opts.acceptHeader = BooleanQueryResultFormat.TEXT.getDefaultMIMEType(); -// assertEquals(false, askResults(doSparqlQuery(opts, requestPath))); + { + final IPreparedBooleanQuery query = m_repo + .prepareBooleanQuery(queryStr); + assertEquals(false, query.evaluate()); + } + { + final IPreparedBooleanQuery query = m_repo + .prepareBooleanQuery(queryStr); + query.setHeader("Accept", + BooleanQueryResultFormat.SPARQL.getDefaultMIMEType()); + assertEquals(false, query.evaluate()); + } + { + final IPreparedBooleanQuery query = m_repo + .prepareBooleanQuery(queryStr); + query.setHeader("Accept", + BooleanQueryResultFormat.TEXT.getDefaultMIMEType()); + assertEquals(false, query.evaluate()); + } + /** + * FIXME JJC: Uncomment to test CONNEG for JSON. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/588" > + * JSON-LD </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/714" > + * Migrate to openrdf 2.7 </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/704" > + * ask does not return json </a> + */ +// { +// final IPreparedBooleanQuery query = m_repo +// .prepareBooleanQuery(queryStr); +// query.setHeader("Accept", "application/sparql-results+json"); +// assertEquals(false, query.evaluate()); +// } + } // /** @@ -146,26 +171,92 @@ final String queryStr = "select * where {?s ?p ?o}"; -// final RemoteRepository repo = new RemoteRepository(m_serviceURL); - final IPreparedTupleQuery query = m_repo.prepareTupleQuery(queryStr); - assertEquals(0, countResults(query.evaluate())); + { + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + assertEquals(0, countResults(query.evaluate())); + + } + + { + + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + query.setHeader("Accept", + TupleQueryResultFormat.SPARQL.getDefaultMIMEType()); + + assertEquals(0, countResults(query.evaluate())); + + } + + { + + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + query.setHeader("Accept", + TupleQueryResultFormat.BINARY.getDefaultMIMEType()); + + assertEquals(0, countResults(query.evaluate())); + + } + + /** + * FIXME The necessary parser does not appear to be available. If you + * enable this you will get ClassNotFoundException for + * <code>au/com/bytecode/opencsv/CSVReader</code> + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/714" > + * Migrate to openrdf 2.7 </a> + */ + if (false) { + + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + query.setHeader("Accept", + TupleQueryResultFormat.CSV.getDefaultMIMEType()); + + assertEquals(0, countResults(query.evaluate())); + + } -// final QueryOptions opts = new QueryOptions(); -// opts.serviceURL = m_serviceURL; -// opts.queryStr = queryStr; -// opts.method = "GET"; -// -// opts.acceptHeader = TupleQueryResultFormat.SPARQL.getDefaultMIMEType(); -// assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); -// -// // TODO JSON parser is not bundled by openrdf. -//// opts.acceptHeader = TupleQueryResultFormat.JSON.getDefaultMIMEType(); -//// assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); -// -// opts.acceptHeader = TupleQueryResultFormat.BINARY.getDefaultMIMEType(); -// assertEquals(0, countResults(doSparqlQuery(opts, requestPath))); + { + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + query.setHeader("Accept", + TupleQueryResultFormat.TSV.getDefaultMIMEType()); + + assertEquals(0, countResults(query.evaluate())); + + } + + /** + * FIXME Enable this once we have a JSON result format parser (openrdf + * 2.7). + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/714" > + * Migrate to openrdf 2.7 </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/588" > + * JSON-LD </a> + */ + if (false) { + + final IPreparedTupleQuery query = m_repo + .prepareTupleQuery(queryStr); + + query.setHeader("Accept", + TupleQueryResultFormat.JSON.getDefaultMIMEType()); + + assertEquals(0, countResults(query.evaluate())); + + } + } // /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2013-08-07 18:37:17
|
Revision: 7259 http://bigdata.svn.sourceforge.net/bigdata/?rev=7259&view=rev Author: mrpersonick Date: 2013-08-07 18:37:05 +0000 (Wed, 07 Aug 2013) Log Message: ----------- NSS test cases coping strategy Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java 2013-08-07 18:37:05 UTC (rev 7259) @@ -0,0 +1,70 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Nov 7, 2007 + */ + +package com.bigdata.rdf.sail.webapp; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + + +/** + * Test suite. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: TestAll.java 4908 2011-07-13 19:42:43Z thompsonbry $ + */ +public class TestAll2 extends TestCase { + + /** + * + */ + public TestAll2() { + super(); + } + + /** + * @param arg0 + */ + public TestAll2(String arg0) { + super(arg0); + } + + public static Test suite() { + + final TestSuite suite = new TestSuite("WebApp"); + +// suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.triples)); +// +// suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.sids)); + + suite.addTest(TestNanoSparqlServerWithProxyIndexManager2.suite(TestMode.quads)); + + return suite; + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestAll2.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java 2013-08-07 18:37:05 UTC (rev 7259) @@ -0,0 +1,35 @@ +package com.bigdata.rdf.sail.webapp; + +import com.bigdata.journal.IIndexManager; +import com.bigdata.rdf.sail.webapp.client.RemoteRepositoryManager; + +/** + * Proxied test suite. + * + * @param <S> + */ +public class TestNanoSparqlClient2<S extends IIndexManager> extends + AbstractTestNanoSparqlClient<S> { + + public TestNanoSparqlClient2() { + + } + + public TestNanoSparqlClient2(final String name) { + + super(name); + + } + + /** + * Delete everything matching an access path description. + */ + public void test_IMPLEMENT_ME() throws Exception { + + final RemoteRepositoryManager rrm = super.m_repo; + + // do something here + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlClient2.java ___________________________________________________________________ Added: svn:mime-type + text/plain Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java 2013-08-07 18:37:05 UTC (rev 7259) @@ -0,0 +1,498 @@ +/** +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sail.webapp; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStream; +import java.util.Properties; + +import junit.extensions.proxy.ProxyTestSuite; +import junit.framework.AssertionFailedError; +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestListener; +import junit.framework.TestResult; +import junit.textui.ResultPrinter; + +import com.bigdata.journal.BufferMode; +import com.bigdata.journal.IIndexManager; +import com.bigdata.journal.Journal; +import com.bigdata.rawstore.Bytes; +import com.bigdata.rdf.axioms.NoAxioms; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.store.AbstractTripleStore; +import com.bigdata.rdf.vocab.NoVocabulary; +import com.bigdata.service.IBigdataFederation; +import com.bigdata.service.jini.JiniClient; +import com.bigdata.service.jini.JiniFederation; + +/** + * Test suite for {@link RESTServlet} (SPARQL end point and REST API for RDF + * data). + * + * TODO Add unit tests which exercise the full text index. + * + * TODO Add unit tests which are specific to sids and quads modes. These tests + * should look at the semantics of interchange of sids or quads specific data; + * queries which exercise the context position; and the default-graph and + * named-graph URL query parameters for quads. + * + * @todo Security model? + * + * @todo An NQUADS RDFWriter needs to be written. Then we can test NQUADS + * interchange. + * + * @todo A SPARQL result sets JSON parser needs to be written (Sesame bundles a + * writer, but not a parser) before we can test queries which CONNEG for a + * JSON result set. + * + * @todo Tests which verify the correct rejection of illegal or ill-formed + * requests. + * + * @todo Test suite for reading from a historical commit point. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: TestNanoSparqlServer.java 4398 2011-04-14 13:55:29Z thompsonbry + * $ + */ +public class TestNanoSparqlServerWithProxyIndexManager2<S extends IIndexManager> + extends AbstractIndexManagerTestCase<S> { + + /** + * The {@link IIndexManager} for the backing persistence engine (may be a + * {@link Journal} or {@link JiniFederation}). + */ + private IIndexManager m_indexManager; + + /** + * The mode in which the test is running. + */ + private TestMode testMode; + + /** + * Run in triples mode on a temporary journal. + */ + public TestNanoSparqlServerWithProxyIndexManager2() { + + this(null/* name */, getTemporaryJournal(), TestMode.triples); + + } + + /** + * Run in triples mode on a temporary journal. + */ + public TestNanoSparqlServerWithProxyIndexManager2(String name) { + + this(name, getTemporaryJournal(), TestMode.triples); + + } + + static private Journal getTemporaryJournal() { + + final Properties properties = new Properties(); + + properties.setProperty(com.bigdata.journal.Options.BUFFER_MODE, + BufferMode.Transient.toString()); + + properties.setProperty(com.bigdata.journal.Options.INITIAL_EXTENT, "" + + (Bytes.megabyte32 * 1)); + + return new Journal(properties); + + } + + /** + * Run test suite against an embedded {@link NanoSparqlServer} instance, + * which is in turn running against the caller's {@link IIndexManager}. + * + * @param indexManager + * The {@link Journal} or {@link JiniFederation}. + * @param testMode + * Identifies what mode the kb instance will be using. + */ + private TestNanoSparqlServerWithProxyIndexManager2(final String name, + final IIndexManager indexManager, final TestMode testMode) { + + super(name == null ? TestNanoSparqlServerWithProxyIndexManager2.class.getName() + : name); + + this.m_indexManager = indexManager; + + this.testMode = testMode; + + } + + /** + * Return suite running in triples mode against a temporary journal. + */ + public static Test suite() { + + return suite(TestMode.triples); + + } + + /** + * Return suite running in the specified mode against a temporary journal. + */ + public static Test suite(final TestMode testMode) { + + return suite(getTemporaryJournal(), testMode); + + } + + /** + * The {@link TestMode#triples} test suite. + */ + public static class test_NSS_triples extends TestCase { + public static Test suite() { + return TestNanoSparqlServerWithProxyIndexManager2.suite( + getTemporaryJournal(), TestMode.triples); + } + } + + /** + * The {@link TestMode#quads} test suite. + */ + public static class Test_NSS_quads extends TestCase { + public static Test suite() { + return TestNanoSparqlServerWithProxyIndexManager2.suite( + getTemporaryJournal(), TestMode.quads); + } + } + + /** + * The {@link TestMode#sids} test suite. + */ + public static class Test_NSS_sids extends TestCase { + public static Test suite() { + return TestNanoSparqlServerWithProxyIndexManager2.suite( + getTemporaryJournal(), TestMode.sids); + } + } + + /** + * Return suite running in the given mode against the given + * {@link IIndexManager}. + */ + public static Test suite(final IIndexManager indexManager, + final TestMode testMode) { + + final TestNanoSparqlServerWithProxyIndexManager2<?> delegate = new TestNanoSparqlServerWithProxyIndexManager2( + null/* name */, indexManager, testMode); // !!!! THIS CLASS !!!! + + /* + * Use a proxy test suite and specify the delegate. + */ + + final ProxyTestSuite suite = new ProxyTestSuite(delegate, + "NanoSparqlServer Proxied Test Suite"); + + /* + * List any non-proxied tests (typically bootstrapping tests). + */ + + suite.addTestSuite(TestNanoSparqlClient2.class); + + return suite; + + } + + @SuppressWarnings("unchecked") + public S getIndexManager() { + + return (S) m_indexManager; + + } + + @Override + public Properties getProperties() { + +// System.err.println("testMode="+testMode); + + final Properties properties = new Properties(); + + switch (testMode) { + case quads: + properties.setProperty(AbstractTripleStore.Options.QUADS_MODE, + "true"); + properties.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, + "false"); + properties.setProperty(AbstractTripleStore.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, "false"); + break; + case triples: + properties.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, + "false"); + properties.setProperty(AbstractTripleStore.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, "false"); + break; + case sids: + properties.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, + "false"); + properties.setProperty(AbstractTripleStore.Options.AXIOMS_CLASS, + NoAxioms.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.VOCABULARY_CLASS, + NoVocabulary.class.getName()); + properties.setProperty( + AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, "true"); + break; + default: + fail("Unknown mode: " + testMode); + } + // if (false/* triples w/ truth maintenance */) { + // properties.setProperty(AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, + // "false"); + // } + // if (false/* sids w/ truth maintenance */) { + // properties.setProperty(AbstractTripleStore.Options.STATEMENT_IDENTIFIERS, + // "true"); + // } + + return properties; + } + + /** + * Open the {@link IIndexManager} identified by the property file. + * + * @param propertyFile + * The property file (for a standalone bigdata instance) or the + * jini configuration file (for a bigdata federation). The file + * must end with either ".properties" or ".config". + * + * @return The {@link IIndexManager}. + */ + static private IIndexManager openIndexManager(final String propertyFile) { + + final File file = new File(propertyFile); + + if (!file.exists()) { + + throw new RuntimeException("Could not find file: " + file); + + } + + boolean isJini = false; + if (propertyFile.endsWith(".config")) { + // scale-out. + isJini = true; + } else if (propertyFile.endsWith(".properties")) { + // local journal. + isJini = false; + } else { + /* + * Note: This is a hack, but we are recognizing the jini + * configuration file with a .config extension and the journal + * properties file with a .properties extension. + */ + throw new RuntimeException( + "File must have '.config' or '.properties' extension: " + + file); + } + + final IIndexManager indexManager; + try { + + if (isJini) { + + /* + * A bigdata federation. + */ + + @SuppressWarnings("rawtypes") + final JiniClient<?> jiniClient = new JiniClient( + new String[] { propertyFile }); + + indexManager = jiniClient.connect(); + + } else { + + /* + * Note: we only need to specify the FILE when re-opening a + * journal containing a pre-existing KB. + */ + final Properties properties = new Properties(); + { + // Read the properties from the file. + final InputStream is = new BufferedInputStream( + new FileInputStream(propertyFile)); + try { + properties.load(is); + } finally { + is.close(); + } + if (System.getProperty(BigdataSail.Options.FILE) != null) { + // Override/set from the environment. + properties.setProperty(BigdataSail.Options.FILE, System + .getProperty(BigdataSail.Options.FILE)); + } + if (properties + .getProperty(com.bigdata.journal.Options.FILE) == null) { + // Run against a transient journal if no file was + // specified. + properties.setProperty( + com.bigdata.journal.Options.BUFFER_MODE, + BufferMode.Transient.toString()); + properties.setProperty( + com.bigdata.journal.Options.INITIAL_EXTENT, "" + + (Bytes.megabyte32 * 1)); + } + + } + + indexManager = new Journal(properties); + + } + + } catch (Exception ex) { + + throw new RuntimeException(ex); + + } + + return indexManager; + + } + + /** + * Runs the test suite against an {@link IBigdataFederation} or a + * {@link Journal}. The federation must already be up and running. An + * embedded {@link NanoSparqlServer} instance will be created for each test + * run. Each test will run against a distinct KB instance within a unique + * bigdata namespace on the same backing {@link IIndexManager}. + * <p> + * When run for CI, this can be executed as: + * <pre> + * ... -Djava.security.policy=policy.all TestNanoSparqlServerWithProxyIndexManager triples /nas/bigdata/benchmark/config/bigdataStandalone.config + * </pre> + * + * @param args + * <code> + * (testMode) (propertyFile|configFile) + * </code> + * + * where propertyFile is the configuration file for a + * {@link Journal}. <br/> + * where configFile is the configuration file for an + * {@link IBigdataFederation}.<br/> + * where <i>triples</i> or <i>sids</i> or <i>quads</i> is the + * database mode.</br> where <i>tm</i> indicates that truth + * maintenance should be enabled (only valid with triples or + * sids). + */ + public static void main(final String[] args) throws Exception { + + if (args.length < 2) { + System.err + .println("(triples|sids|quads) (propertyFile|configFile) (tm)?"); + System.exit(1); + } + + final TestMode testMode = TestMode.valueOf(args[0]); + +// if (testMode != TestMode.triples) +// fail("Unsupported test mode: " + testMode); + + final File propertyFile = new File(args[1]); + + if (!propertyFile.exists()) + fail("No such file: " + propertyFile); + + // Setup test result. + final TestResult result = new TestResult(); + + // Setup listener, which will write the result on System.out + result.addListener(new ResultPrinter(System.out)); + + result.addListener(new TestListener() { + + public void startTest(Test arg0) { + log.info(arg0); + } + + public void endTest(Test arg0) { + log.info(arg0); + } + + public void addFailure(Test arg0, AssertionFailedError arg1) { + log.error(arg0,arg1); + } + + public void addError(Test arg0, Throwable arg1) { + log.error(arg0,arg1); + } + }); + + // Open Journal / Connect to the configured federation. + final IIndexManager indexManager = openIndexManager(propertyFile + .getAbsolutePath()); + + try { + + // Setup test suite + final Test test = TestNanoSparqlServerWithProxyIndexManager2.suite( + indexManager, testMode); + + // Run the test suite. + test.run(result); + + } finally { + + if (indexManager instanceof JiniFederation<?>) { + // disconnect + ((JiniFederation<?>) indexManager).shutdownNow(); + } else { + // destroy journal. + ((Journal) indexManager).destroy(); + } + + } + + final String msg = "nerrors=" + result.errorCount() + ", nfailures=" + + result.failureCount() + ", nrun=" + result.runCount(); + + if (result.errorCount() > 0 || result.failureCount() > 0) { + + // At least one test failed. + fail(msg); + + } + + // All green. + System.out.println(msg); + + } + +} Property changes on: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestNanoSparqlServerWithProxyIndexManager2.java ___________________________________________________________________ Added: svn:mime-type + text/plain This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 18:02:16
|
Revision: 7258 http://bigdata.svn.sourceforge.net/bigdata/?rev=7258&view=rev Author: thompsonbry Date: 2013-08-07 18:02:06 +0000 (Wed, 07 Aug 2013) Log Message: ----------- Partial resolution of [1] (the issue is resolved for ASK and SELECT queries, but not for DESCRIBE/CONSTRUCT queries). That ticket will be closed as "won't fix". [1] https://sourceforge.net/apps/trac/bigdata/ticket/715 (Interrupt of thread submitting a query for evaluation does not always terminate the AbstractRunningQuery) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2013-08-07 17:59:45 UTC (rev 7257) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java 2013-08-07 18:02:06 UTC (rev 7258) @@ -191,11 +191,19 @@ return itr.hasNext(); } finally { if (itr != null) { + /** + * Ensure query is terminated. An interrupt during hasNext() + * should cause the query to terminate through itr.close(). + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ itr.close(); } } } - + /** * Evaluate a SELECT query. * @@ -241,15 +249,30 @@ final boolean materializeProjectionInQuery = context.materializeProjectionInQuery && !optimizedQuery.hasSlice(); - return new TupleQueryResultImpl(projectedSet, - ASTEvalHelper.evaluateQuery( - astContainer, - context, - bindingSets - ,materializeProjectionInQuery// + final CloseableIteration<BindingSet, QueryEvaluationException> itr = ASTEvalHelper + .evaluateQuery(astContainer, context, bindingSets, + materializeProjectionInQuery// , projected// - )); + ); + TupleQueryResult r = null; + try { + r = new TupleQueryResultImpl(projectedSet, itr); + return r; + } finally { + if (r == null) { + /** + * Ensure query is terminated if assignment to fails. E.g., if + * interrupted during the ctor. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/707"> + * BlockingBuffer.close() does not unblock threads </a> + */ + itr.close(); + } + } + } /** @@ -463,17 +486,10 @@ final boolean materializeProjectionInQuery = context.materializeProjectionInQuery && !optimizedQuery.hasSlice(); - // Solutions to the WHERE clause (as projected). - final CloseableIteration<BindingSet, QueryEvaluationException> solutions = ASTEvalHelper - .evaluateQuery(astContainer, context, bindingSets// - , materializeProjectionInQuery// - , optimizedQuery.getProjection().getProjectionVars()// - ); - // The effective DescribeMode. -// final DescribeModeEnum describeMode = optimizedQuery.getProjection() -// .getDescribeMode() == null ? QueryHints.DEFAULT_DESCRIBE_MODE -// : optimizedQuery.getProjection().getDescribeMode(); +// final DescribeModeEnum describeMode = optimizedQuery.getProjection() +// .getDescribeMode() == null ? QueryHints.DEFAULT_DESCRIBE_MODE +// : optimizedQuery.getProjection().getDescribeMode(); final DescribeModeEnum describeMode = context .getDescribeMode(optimizedQuery.getProjection()); @@ -483,6 +499,18 @@ final int describeStatementlimit = context .getDescribeStatementLimit(optimizedQuery.getProjection()); + // The final result to be returned. + GraphQueryResult result = null; + + // Solutions to the WHERE clause (as projected). + final CloseableIteration<BindingSet, QueryEvaluationException> solutions = ASTEvalHelper + .evaluateQuery(astContainer, context, bindingSets// + , materializeProjectionInQuery// + , optimizedQuery.getProjection().getProjectionVars()// + ); + + try { + final CloseableIteration<BindingSet, QueryEvaluationException> solutions2; final ConcurrentHashSet<BigdataValue> describedResources; if (describeCache != null) { @@ -593,9 +621,32 @@ } - return new GraphQueryResultImpl(// + result = new GraphQueryResultImpl(// optimizedQuery.getPrefixDecls(), // src3); + } finally { + if (result == null) { + /** + * Cancel the query since we are not returning the + * GraphTupleQuery result object to the caller. + * + * Note: This provides only partial resolution of the following + * ticket. There are other operations than the underlying query + * that would need to be canceled. I have NOT verified that + * closing the underlying query is sufficient to unwind those + * operations. Also, the CBD support is not written to be + * interruptable at this time (see the TODO above). + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/715" + * > Interrupt of thread submitting a query for evaluation + * does not always terminate the AbstractRunningQuery </a> + */ + solutions.close(); + } + } + + return result; } @@ -664,7 +715,7 @@ } } - + /** * Convert a Sesame {@link BindingSet} into a bigdata {@link IBindingSet} * and merges it with the BINDINGS clause (if any) attached to the This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 17:59:56
|
Revision: 7257 http://bigdata.svn.sourceforge.net/bigdata/?rev=7257&view=rev Author: thompsonbry Date: 2013-08-07 17:59:45 +0000 (Wed, 07 Aug 2013) Log Message: ----------- javadoc only Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-08-07 14:36:25 UTC (rev 7256) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-08-07 17:59:45 UTC (rev 7257) @@ -194,6 +194,14 @@ * solution sets to be passed into a query. Any such change would * have to be deeply integrated with the SPARQL parser in order to * provide any benefit for the Java heap. + * + * TODO This logic is currently single-threaded. If we allow internal + * concurrency or when we integrate the RTO, we will need to ensure that + * the logic remains safely cancelable by an interrupt of the thread in + * which the query was submitted. See <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/715" > + * Interrupt of thread submitting a query for evaluation does not + * always terminate the AbstractRunningQuery </a>. */ static PipelineOp convert(final AST2BOpContext ctx, final IBindingSet[] bindingSets) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2013-08-07 14:36:32
|
Revision: 7256 http://bigdata.svn.sourceforge.net/bigdata/?rev=7256&view=rev Author: mrpersonick Date: 2013-08-07 14:36:25 +0000 (Wed, 07 Aug 2013) Log Message: ----------- checking in Jeremy's fix for ticket 684 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java 2013-08-07 14:32:25 UTC (rev 7255) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTStaticJoinOptimizer.java 2013-08-07 14:36:25 UTC (rev 7256) @@ -1487,11 +1487,14 @@ final JoinGroupNode subgroup3 = new JoinGroupNode(); - subgroup3.addChild(newStatementPatternNode(new VarNode("x"), - new ConstantNode(e), new ConstantNode(e), 10l)); + // Note: both x and y are bound at this point, so the best order + // is lowest cardinality first subgroup3.addChild(newStatementPatternNode(new VarNode("y"), new ConstantNode(d), new ConstantNode(d), 1l)); + + subgroup3.addChild(newStatementPatternNode(new VarNode("x"), + new ConstantNode(e), new ConstantNode(e), 10l)); subgroup2.addChild(subgroup3); @@ -1929,7 +1932,7 @@ } */ - public void xtest_union_trac684_A() { + public void test_union_trac684_A() { new Helper(){{ given = select( varNode(z), // z is ?o @@ -2065,7 +2068,7 @@ } */ - public void xtest_union_trac684_C() { + public void test_union_trac684_C() { new Helper(){{ given = select( varNode(z), // z is ?o This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2013-08-07 14:32:32
|
Revision: 7255 http://bigdata.svn.sourceforge.net/bigdata/?rev=7255&view=rev Author: mrpersonick Date: 2013-08-07 14:32:25 +0000 (Wed, 07 Aug 2013) Log Message: ----------- committing the data for TestTicket693 Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/property_paths.owl 2013-08-07 14:32:25 UTC (rev 7255) @@ -0,0 +1,63 @@ +<?xml version="1.0"?> +<rdf:RDF xmlns="http://www.semanticweb.org/ontologies/2013/5/untitled-ontology-287#" + xml:base="http://www.semanticweb.org/ontologies/2013/5/untitled-ontology-287" + xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" + xmlns:owl="http://www.w3.org/2002/07/owl#" + xmlns:xsd="http://www.w3.org/2001/XMLSchema#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"> + <owl:Ontology rdf:about="http://www.semanticweb.org/ontologies/2013/5/untitled-ontology-287"/> + + + + <!-- + /////////////////////////////////////////////////////////////////////////////////////// + // + // Classes + // + /////////////////////////////////////////////////////////////////////////////////////// + --> + + + + + <!-- http://example.org/A --> + + <owl:Class rdf:about="http://example.org/A"/> + + + + <!-- http://example.org/B --> + + <owl:Class rdf:about="http://example.org/B"> + <rdfs:subClassOf rdf:resource="http://example.org/A"/> + </owl:Class> + + + + <!-- http://example.org/C --> + + <owl:Class rdf:about="http://example.org/C"> + <rdfs:subClassOf rdf:resource="http://example.org/B"/> + </owl:Class> + + + + <!-- http://example.org/D --> + + <owl:Class rdf:about="http://example.org/D"> + <rdfs:subClassOf rdf:resource="http://example.org/C"/> + </owl:Class> + + + + <!-- http://example.org/E --> + + <owl:Class rdf:about="http://example.org/E"> + <rdfs:subClassOf rdf:resource="http://example.org/D"/> + </owl:Class> +</rdf:RDF> + + + +<!-- Generated by the OWL API (version 3.3.1957) http://owlapi.sourceforge.net --> + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 14:09:22
|
Revision: 7254 http://bigdata.svn.sourceforge.net/bigdata/?rev=7254&view=rev Author: thompsonbry Date: 2013-08-07 14:09:16 +0000 (Wed, 07 Aug 2013) Log Message: ----------- Replaced System.err with log @ INFO, @ WARN, or @ ERROR as appropriate. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java 2013-08-07 14:05:34 UTC (rev 7253) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/StressTestConcurrent.java 2013-08-07 14:09:16 UTC (rev 7254) @@ -603,14 +603,15 @@ ret.put("nsplit", "" + overflowCounters.indexPartitionSplitCounter); ret.put("nmove", "" + overflowCounters.indexPartitionMoveCounter); - System.err.println(ret.toString(true/* newline */)); + if (log.isInfoEnabled()) + log.info(ret.toString(true/* newline */)); - System.err.println(overflowCounters.getCounters().toString()); + if (log.isInfoEnabled()) + log.info(overflowCounters.getCounters().toString()); if (!failures.isEmpty()) { - System.err.println("failures:\n" - + Arrays.toString(failures.toArray())); + log.error("failures:\n" + Arrays.toString(failures.toArray())); fail("There were " + failures.size() + " failed tasks for unexpected causes"); @@ -630,12 +631,12 @@ final boolean forceOverflow = false; if (forceOverflow) { - System.err.println("Forcing overflow: " + new Date()); + log.warn("Forcing overflow: " + new Date()); ((AbstractScaleOutFederation<?>) federation) .forceOverflow(true/* compactingMerge */, true/* truncateJournal */); - System.err.println("Forced overflow: " + new Date()); + log.warn("Forced overflow: " + new Date()); } @@ -650,7 +651,8 @@ final IIndex expected = groundTruth[i]; - System.err.println("Validating: " + if (log.isInfoEnabled()) + log.info("Validating: " + name + " #groundTruthEntries=" + groundTruth[i].rangeCount() @@ -733,8 +735,9 @@ } - System.err.println("Validated " + nindices - + " indices against ground truth."); + if (log.isInfoEnabled()) + log.info("Validated " + nindices + + " indices against ground truth."); } @@ -788,7 +791,7 @@ if (targetService == null) { - System.err.println("Spamming LBS: services have equal load."); + log.warn("Spamming LBS: services have equal load."); fakeServiceScores[0] = new ServiceScore( AbstractStatisticsCollector.fullyQualifiedHostName, @@ -802,8 +805,7 @@ } else { - System.err - .println("Spamming LBS: one service will appear heavily loaded."); + log.warn("Spamming LBS: one service will appear heavily loaded."); fakeServiceScores[0] = new ServiceScore( AbstractStatisticsCollector.fullyQualifiedHostName, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 14:05:40
|
Revision: 7253 http://bigdata.svn.sourceforge.net/bigdata/?rev=7253&view=rev Author: thompsonbry Date: 2013-08-07 14:05:34 +0000 (Wed, 07 Aug 2013) Log Message: ----------- Converted System.err into log@INFO Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/TestRangeQuery.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/TestRangeQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/TestRangeQuery.java 2013-08-07 14:00:05 UTC (rev 7252) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/service/TestRangeQuery.java 2013-08-07 14:05:34 UTC (rev 7253) @@ -1207,8 +1207,9 @@ final ITuple<E> actualTuple = actual.next(); - System.err.println("nvisited=" + (nfound + 1) + ", actualTuple=" - + actualTuple); + if (log.isInfoEnabled()) + log.info("nvisited=" + (nfound + 1) + ", actualTuple=" + + actualTuple); boolean found = false; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-08-07 14:00:17
|
Revision: 7252 http://bigdata.svn.sourceforge.net/bigdata/?rev=7252&view=rev Author: thompsonbry Date: 2013-08-07 14:00:05 +0000 (Wed, 07 Aug 2013) Log Message: ----------- There were some problems with the customer's test harness. Specifically, it was shutting down the Journal (and hence the executor service on which the query was running) without waiting for the query to terminate after it had been cancelled. This was causing RejectedExecutionException instances to be thrown when the query attempted to notify the query controller that a given operator had halted. Once that issue was corrected, it became obvious that the root cause was in fact the failure to propagate the interrupt out of BlockingBuffer.BlockingIterator.hasNext() as suggested by the customer #707. With this change the query with the nested subquery now terminates in a timely manner. I am running through the SPARQL test suite locally before a commit. I will commit the updated version of the customer's test case as well. We will need to do some longevity testing and performance testing on this change to verify that there are no undesired side-effects which arise from propagating that interrupt. I have also looked at the testOrderByQueriesAreInterruptable() test in the RepositoryConnectionTest class. I have lifted a copy of that test into our code. Examination of this test shows that the query is cancelled in a timely fashion IF the ORDER BY operator has not yet begun to execute. This is in keeping with the semantics of ''deadline'' as implemented by bigdata. A deadline is only examined when we start or stop the evaluation of a query operator. If we need to make deadlines responsive for operators that are long running, then we would have to do something like schedule a future to cancel the query if it was still running after a deadline. Changes are to: - BlockingBuffer.BlockingIterator.hasNext() - the interrupt is now propagated. - ChunkedRunningQuery - javadoc only. - BigdataConnectionTest - lifted a version of testOrderByQueriesAreInterruptable() into our version of that test suite. @see https://sourceforge.net/apps/trac/bigdata/ticket/716 (Verify that IRunningQuery instances (and nested queries) are correctly cancelled when interrupted) @see https://sourceforge.net/apps/trac/bigdata/ticket/707 (BlockingBuffer.close() does not unblock threads) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-07 13:25:39 UTC (rev 7251) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/engine/ChunkedRunningQuery.java 2013-08-07 14:00:05 UTC (rev 7252) @@ -63,7 +63,6 @@ import com.bigdata.rwstore.sector.IMemoryManager; import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ICloseableIterator; -import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.Memoizer; import com.sun.jini.thread.Executor; @@ -806,6 +805,11 @@ * is invoked from within the running task in order to remove * the latency for that RMI from the thread which submits tasks * to consume chunks. + * + * FIXME This is a protocol that should be optimized to provide + * better throughput for scale-out. E.g., a single socket on + * which we transmit and receive notice about operator + * start/stop metadata using some non-blocking service. */ // final boolean lastPassRequested = ((PipelineOp) (t.bop)) @@ -1292,7 +1296,7 @@ halt(new Exception("task=" + toString() + ", cause=" + t, t)); if (getCause() != null) { // Abnormal termination - wrap and rethrow. - + // TODO Why is this line empty? (I think that it is handled by the ChunkTaskWrapper.) } // otherwise ignore exception (normal completion). } finally { @@ -1304,6 +1308,19 @@ * it is closed. */ context.getSource().close(); + /** + * Ensure that the task is cancelled. + * + * Note: This does not appear to be necessary. I am observing + * the interrupt of the operator evaluation task regardless. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/707 + * (BlockingBuffer.close() does not unblock threads) + * @see https://sourceforge.net/apps/trac/bigdata/ticket/716 + * (Verify that IRunningQuery instances (and nested + * queries) are correctly cancelled when interrupted) + */ +// ft.cancel(true/*mayInterruptIfRunning*/); } // Done. return null; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-07 13:25:39 UTC (rev 7251) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/relation/accesspath/BlockingBuffer.java 2013-08-07 14:00:05 UTC (rev 7252) @@ -1541,7 +1541,17 @@ log.info("Interrupted: " + this, ex); else if (log.isInfoEnabled()) log.info("Interrupted: " + this); - + /** + * Note: Propagating the interrupt appears to be necessary here + * in order to have timely termination of nested subqueries. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/707 + * (BlockingBuffer.close() does not unblock threads) + * @see https://sourceforge.net/apps/trac/bigdata/ticket/716 + * (Verify that IRunningQuery instances (and nested + * queries) are correctly cancelled when interrupted) + */ + Thread.currentThread().interrupt(); return false; } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2013-08-07 13:25:39 UTC (rev 7251) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java 2013-08-07 14:00:05 UTC (rev 7252) @@ -35,27 +35,21 @@ import java.io.File; import java.io.IOException; -import java.util.Arrays; import java.util.Properties; import org.apache.log4j.Logger; -import org.openrdf.model.Resource; import org.openrdf.model.Statement; -import org.openrdf.model.URI; import org.openrdf.model.Value; +import org.openrdf.model.vocabulary.RDFS; import org.openrdf.query.BindingSet; import org.openrdf.query.GraphQuery; import org.openrdf.query.GraphQueryResult; -import org.openrdf.query.MalformedQueryException; -import org.openrdf.query.QueryEvaluationException; +import org.openrdf.query.QueryInterruptedException; import org.openrdf.query.QueryLanguage; import org.openrdf.query.TupleQuery; import org.openrdf.query.TupleQueryResult; -import org.openrdf.query.impl.DatasetImpl; import org.openrdf.repository.Repository; import org.openrdf.repository.RepositoryConnectionTest; -import org.openrdf.repository.RepositoryException; -import org.openrdf.repository.contextaware.ContextAwareConnection; import com.bigdata.btree.keys.CollatorEnum; import com.bigdata.btree.keys.StrengthEnum; @@ -682,6 +676,55 @@ } } - + /* + * I have lifted this out of the base openrdf class since it often enough + * fails in CI or when running the entire TestBigdataSailWithQuads test + * suite. However, when run by itself I observe timely termination based on + * the deadline. + * + * Note: This query does several scans of the KB and computes their + * unconstrained cross-product and then sorts the results. + * + * I suspect that the problem may be that the ORDER BY operator does not + * notice the timeout since the deadline is only examined when an operator + * starts or stops. If evaluation reaches the ORDER BY operator and the SORT + * begins, then the SORT is not interrupted since the deadline is not being + * examined. + * + * (non-Javadoc) + * + * @see org.openrdf.repository.RepositoryConnectionTest# + * testOrderByQueriesAreInterruptable() + */ + @Override + public void testOrderByQueriesAreInterruptable() + throws Exception + { + testCon.setAutoCommit(false); + for (int index = 0; index < 512; index++) { + testCon.add(RDFS.CLASS, RDFS.COMMENT, testCon.getValueFactory().createBNode()); + } + testCon.setAutoCommit(true); + TupleQuery query = testCon.prepareTupleQuery(QueryLanguage.SPARQL, + "SELECT * WHERE { ?s ?p ?o . ?s1 ?p1 ?o1 . ?s2 ?p2 ?o2 . ?s3 ?p3 ?o3 . } ORDER BY ?s1 ?p1 ?o1 LIMIT 1000"); + query.setMaxQueryTime(2); + + TupleQueryResult result = query.evaluate(); + log.warn("Query evaluation has begin"); + long startTime = System.currentTimeMillis(); + try { + result.hasNext(); + fail("Query should have been interrupted"); + } + catch (QueryInterruptedException e) { + // Expected + long duration = System.currentTimeMillis() - startTime; + log.warn("Actual query duration: " + duration + "ms"); + assertTrue("Query not interrupted quickly enough, should have been ~2s, but was " + + (duration / 1000) + "s", duration < 5000); + } + } + + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |