This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tho...@us...> - 2013-05-30 21:20:27
|
Revision: 7176 http://bigdata.svn.sourceforge.net/bigdata/?rev=7176&view=rev Author: thompsonbry Date: 2013-05-30 21:20:21 +0000 (Thu, 30 May 2013) Log Message: ----------- added link to the SPARQL UPDATE page on the wiki. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt 2013-05-30 21:12:22 UTC (rev 7175) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt 2013-05-30 21:20:21 UTC (rev 7176) @@ -18,7 +18,7 @@ New features: -- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). See https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update for more information. - SPARQL 1.1 Property Paths. - Remote Java client for Multi-Tenancy extensions NanoSparqlServer - Sesame 2.6.10 dependency This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-30 21:12:37
|
Revision: 7175 http://bigdata.svn.sourceforge.net/bigdata/?rev=7175&view=rev Author: thompsonbry Date: 2013-05-30 21:12:22 +0000 (Thu, 30 May 2013) Log Message: ----------- TestHA3JournalServer.testABC_LargeLoad: Observed this exception once. I suspect WCS compaction. 2nd run fails (quorum not met). Test is green on 3rd retry. In my opinion, this test has been flaky since we enabled WCS compaction. Disabling WCS compaction results in a 3 green test runs in a row. I am reopening the WCS compaction ticket (https://sourceforge.net/apps/trac/bigdata/ticket/674). {{{ com.bigdata.rdf.sail.webapp.client.HttpException: Status Code=500, Status Line=HTTP/1.1 500 Server Error, Response=DROP ALL java.util.concurrent.ExecutionException: java.lang.RuntimeException: com.bigdata.rwstore.PhysicalAddressResolutionException: Address did not resolve to physical address: -57741 at java.util.concurrent.FutureTask$Sync.innerGet(FutureTask.java:252) at java.util.concurrent.FutureTask.get(FutureTask.java:111) at com.bigdata.rdf.sail.webapp.QueryServlet.doUpdate(QueryServlet.java:395) at com.bigdata.rdf.sail.webapp.QueryServlet.doPost(QueryServlet.java:151) at com.bigdata.rdf.sail.webapp.RESTServlet.doPost(RESTServlet.java:201) at javax.servlet.http.HttpServlet.service(HttpServlet.java:727) at javax.servlet.http.HttpServlet.service(HttpServlet.java:820) at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:534) at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:475) at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:929) at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:403) at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:864) at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:117) at org.eclipse.jetty.server.handler.HandlerList.handle(HandlerList.java:47) at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:114) at org.eclipse.jetty.server.Server.handle(Server.java:352) at org.eclipse.jetty.server.HttpConnection.handleRequest(HttpConnection.java:596) at org.eclipse.jetty.server.HttpConnection$RequestHandler.headerComplete(HttpConnection.java:1051) at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:590) at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:212) at org.eclipse.jetty.server.HttpConnection.handle(HttpConnection.java:426) at org.eclipse.jetty.io.nio.SelectChannelEndPoint.handle(SelectChannelEndPoint.java:508) at org.eclipse.jetty.io.nio.SelectChannelEndPoint.access$000(SelectChannelEndPoint.java:34) at org.eclipse.jetty.io.nio.SelectChannelEndPoint$1.run(SelectChannelEndPoint.java:40) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:451) at java.lang.Thread.run(Thread.java:722) Caused by: java.lang.RuntimeException: com.bigdata.rwstore.PhysicalAddressResolutionException: Address did not resolve to physical address: -57741 at com.bigdata.journal.AbstractJournal.abort(AbstractJournal.java:2667) at com.bigdata.rdf.store.LocalTripleStore.abort(LocalTripleStore.java:96) at com.bigdata.rdf.sail.BigdataSail$BigdataSailConnection.rollback(BigdataSail.java:2921) at org.openrdf.repository.sail.SailRepositoryConnection.rollback(SailRepositoryConnection.java:97) at com.bigdata.rdf.sail.webapp.BigdataRDFContext$AbstractQueryTask.call(BigdataRDFContext.java:1087) at com.bigdata.rdf.sail.webapp.BigdataRDFContext$AbstractQueryTask.call(BigdataRDFContext.java:1) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1110) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:603) ... 1 more Caused by: com.bigdata.rwstore.PhysicalAddressResolutionException: Address did not resolve to physical address: -57741 at com.bigdata.rwstore.RWStore.getData(RWStore.java:1853) at com.bigdata.journal.RWStrategy.readFromLocalStore(RWStrategy.java:726) at com.bigdata.journal.RWStrategy.read(RWStrategy.java:153) at com.bigdata.journal.AbstractJournal._getCommitRecord(AbstractJournal.java:3918) at com.bigdata.journal.AbstractJournal._abort(AbstractJournal.java:2780) at com.bigdata.journal.AbstractJournal.doLocalAbort(AbstractJournal.java:5885) at com.bigdata.journal.jini.ha.HAJournal.doLocalAbort(HAJournal.java:636) at com.bigdata.journal.AbstractJournal.abort(AbstractJournal.java:2654) ... 10 more at com.bigdata.rdf.sail.webapp.client.RemoteRepository.checkResponseCode(RemoteRepository.java:1452) at com.bigdata.rdf.sail.webapp.client.RemoteRepository$SparqlUpdate.evaluate(RemoteRepository.java:1096) at com.bigdata.journal.jini.ha.TestHA3JournalServer.testABC_LargeLoad(TestHA3JournalServer.java:387) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:601) at junit.framework.TestCase.runTest(TestCase.java:154) at junit.framework.TestCase.runBare(TestCase.java:127) at junit.framework.TestResult$1.protect(TestResult.java:106) at junit.framework.TestResult.runProtected(TestResult.java:124) at junit.framework.TestResult.run(TestResult.java:109) at junit.framework.TestCase.run(TestCase.java:118) at junit.framework.TestSuite.runTest(TestSuite.java:208) at junit.framework.TestSuite.run(TestSuite.java:203) at junit.framework.TestSuite.runTest(TestSuite.java:208) at junit.framework.TestSuite.run(TestSuite.java:203) at org.eclipse.jdt.internal.junit.runner.junit3.JUnit3TestReference.run(JUnit3TestReference.java:130) at org.eclipse.jdt.internal.junit.runner.TestExecution.run(TestExecution.java:38) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:467) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.runTests(RemoteTestRunner.java:683) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.run(RemoteTestRunner.java:390) at org.eclipse.jdt.internal.junit.runner.RemoteTestRunner.main(RemoteTestRunner.java:197) }}} @see https://sourceforge.net/apps/trac/bigdata/ticket/530#comment (HAJournal) @see https://sourceforge.net/apps/trac/bigdata/ticket/674 (WriteCacheService Compaction causes failures). Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java branches/READ_CACHE/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IBTreeManager.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IResourceManager.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rawstore/IStreamStore.java branches/READ_CACHE/bigdata/src/java/com/bigdata/stream/Stream.java branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/TestName2Addr.java branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/TestNamedIndices.java branches/READ_CACHE/bigdata/src/test/com/bigdata/journal/TestTemporaryStore.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis_CanJoin.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ICacheConnection.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/IEvaluationContext.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceOptions.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAll.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/cache/TestAll.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestDescribe.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03.rq branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/update/TestAll.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest2.java Added Paths: ----------- branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ISolutionSetManager.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/SolutionSetManager.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03a.rq branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestAll.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestSolutionSetManager.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskRW.java branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskWORM.java Removed Paths: ------------- branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ISolutionSetManager.java branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/SolutionSetManager.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/cache/TestSolutionSetCache.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestAll.java branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestSolutionSetManager.java Property Changed: ---------------- branches/READ_CACHE/ branches/READ_CACHE/bigdata/lib/jetty/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba/ branches/READ_CACHE/bigdata/src/java/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/READ_CACHE/bigdata/src/test/com/bigdata/bop/util/ branches/READ_CACHE/bigdata/src/test/com/bigdata/jsr166/ branches/READ_CACHE/bigdata/src/test/com/bigdata/util/httpd/ branches/READ_CACHE/bigdata-compatibility/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/attr/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/disco/ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/util/config/ branches/READ_CACHE/bigdata-perf/ branches/READ_CACHE/bigdata-perf/btc/ branches/READ_CACHE/bigdata-perf/btc/src/resources/ branches/READ_CACHE/bigdata-perf/lubm/ branches/READ_CACHE/bigdata-perf/uniprot/ branches/READ_CACHE/bigdata-perf/uniprot/src/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/READ_CACHE/bigdata-rdf/src/samples/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/READ_CACHE/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/READ_CACHE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/READ_CACHE/dsi-utils/ branches/READ_CACHE/dsi-utils/LEGAL/ branches/READ_CACHE/dsi-utils/lib/ branches/READ_CACHE/dsi-utils/src/ branches/READ_CACHE/dsi-utils/src/java/ branches/READ_CACHE/dsi-utils/src/java/it/ branches/READ_CACHE/dsi-utils/src/java/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/ branches/READ_CACHE/dsi-utils/src/test/it/unimi/dsi/ branches/READ_CACHE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/READ_CACHE/osgi/ branches/READ_CACHE/src/resources/bin/config/ Property changes on: branches/READ_CACHE ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7143 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7173 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 /branches/JOURNAL_HA_BRANCH:2596-4066 /branches/LARGE_LITERALS_REFACTOR:4175-4387 /branches/LEXICON_REFACTOR_BRANCH:2633-3304 /branches/QUADS_QUERY_BRANCH:4525-4531,4550-4584,4586-4609,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH:4814-4836 /branches/bugfix-btm:2594-3237 /branches/dev-btm:2574-2730 /branches/fko:3150-3194 /trunk:3392-3437,3656-4061 Property changes on: branches/READ_CACHE/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7143 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7173 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/lib/jetty:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/lib/jetty:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -43,17 +43,13 @@ import com.bigdata.bop.join.BaseJoinStats; import com.bigdata.bop.join.IHashJoinUtility; import com.bigdata.btree.ISimpleIndexAccess; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.ITx; -import com.bigdata.journal.TimestampUtility; +import com.bigdata.journal.IBTreeManager; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.impl.bnode.SidIV; import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.sparql.ast.QueryHints; -import com.bigdata.rdf.sparql.ast.cache.CacheConnectionFactory; -import com.bigdata.rdf.sparql.ast.cache.ICacheConnection; -import com.bigdata.rdf.sparql.ast.cache.ISolutionSetCache; +import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager; +import com.bigdata.rdf.sparql.ast.ssets.SolutionSetManager; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.spo.SPOPredicate; @@ -61,7 +57,6 @@ import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.rwstore.sector.IMemoryManager; -import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ChunkedFilter; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.CloseableIteratorWrapper; @@ -629,20 +624,20 @@ // Resolve the object which will give us access to the named // solution set. - final ICacheConnection cacheConn = CacheConnectionFactory - .getExistingCacheConnection(getRunningQuery() - .getQueryEngine()); +// final ICacheConnection cacheConn = CacheConnectionFactory +// .getExistingCacheConnection(getRunningQuery() +// .getQueryEngine()); final String namespace = namedSetRef.getNamespace(); final long timestamp = namedSetRef.getTimestamp(); - final ISolutionSetCache sparqlCache = cacheConn == null ? null - : cacheConn.getSparqlCache(namespace, timestamp); - // TODO ClassCastException is possible? - final AbstractJournal localIndexManager = (AbstractJournal) getIndexManager(); + final IBTreeManager localIndexManager = (IBTreeManager) getIndexManager(); + final ISolutionSetManager sparqlCache = new SolutionSetManager( + localIndexManager, namespace, timestamp); + return NamedSolutionSetRefUtility.getSolutionSet(// sparqlCache,// localIndexManager,// Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -35,10 +35,11 @@ import com.bigdata.btree.IIndex; import com.bigdata.btree.ISimpleIndexAccess; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IBTreeManager; import com.bigdata.journal.ITx; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.sparql.ast.ISolutionSetStats; -import com.bigdata.rdf.sparql.ast.cache.ISolutionSetCache; +import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.ICloseableIterator; @@ -144,6 +145,7 @@ } + @SuppressWarnings("rawtypes") final IVariable[] joinVars; { @@ -394,8 +396,8 @@ * the same data. */ public static ISolutionSetStats getSolutionSetStats(// - final ISolutionSetCache sparqlCache,// - final AbstractJournal localIndexManager, // + final ISolutionSetManager sparqlCache,// + final IBTreeManager localIndexManager, // final String namespace,// final long timestamp,// final String localName,// @@ -491,8 +493,8 @@ * {@link IIndex}? */ public static ICloseableIterator<IBindingSet[]> getSolutionSet( - final ISolutionSetCache sparqlCache,// - final AbstractJournal localIndexManager,// + final ISolutionSetManager sparqlCache,// + final IBTreeManager localIndexManager,// final String namespace,// final long timestamp,// final String localName,// @@ -558,6 +560,7 @@ + localName + ", joinVars=" + Arrays.toString(joinVars)); // Iterator visiting the solution set. + @SuppressWarnings("unchecked") final ICloseableIterator<IBindingSet> src = (ICloseableIterator<IBindingSet>) index .scan(); Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7143 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7173 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/aggregate:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4814-4836 Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7143 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7173 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/joinGraph:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -49,7 +49,6 @@ import com.bigdata.rdf.internal.encoder.SolutionSetStreamDecoder; import com.bigdata.rdf.internal.encoder.SolutionSetStreamEncoder; import com.bigdata.rdf.sparql.ast.ISolutionSetStats; -import com.bigdata.rdf.sparql.ast.SolutionSetStats; import com.bigdata.stream.Stream; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.ICloseableIterator; @@ -171,7 +170,7 @@ * by {@link Checkpoint#create(IRawStore, IndexMetadata)} since * Stream.create() is being invoked rather than SolutionSetStream.create(). * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> */ public static SolutionSetStream create(final IRawStore store, final StreamIndexMetadata metadata) { @@ -202,10 +201,10 @@ } /** - * Return the address of the {@link SolutionSetStats} to be written into the + * Return the address of the {@link ISolutionSetStats} to be written into the * next {@link Checkpoint} record. The caller must have {@link #flush()} the * {@link SolutionSetStream} as a pre-condition (to ensure that the stats - * have been written out). If the {@link SolutionSetStats} are not loaded, + * have been written out). If the {@link ISolutionSetStats} are not loaded, * then the address from the last {@link Checkpoint} record is returned. */ public long getStatsAddr() { Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7143 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7173 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/bop/util:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/bop/util:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -23,6 +23,7 @@ */ package com.bigdata.btree; +import com.bigdata.btree.view.FusedView; import com.bigdata.counters.ICounterSetAccess; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.AbstractTask; @@ -39,8 +40,13 @@ * TODO Try to lift out an abstract implementation of this interface for * HTree, BTree, and Stream. This will be another step towards GIST * support. There are protected methods which are used on those classes - * which should be lifted into the abstract base class. - */ + * which should be lifted into the abstract base class. Also, try to + * reconcile this interface with {@link ILocalBTreeView} implementations + * that do not implement {@link ICheckpointProtocol} ({@link FusedView}, + * {@link ReadCommittedView}). + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> + */ public interface ICheckpointProtocol extends ICommitter, ICounterSetAccess, ISimpleIndexAccess { Property changes on: branches/READ_CACHE/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo - /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7143 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 + /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7173 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 /branches/RWSTORE_1_1_0_DEBUG/bigdata/src/java/com/bigdata/htree/raba:5896-5935 /branches/TIDS_PLUS_BLOBS_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4814-4836 Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -550,7 +550,7 @@ * WCS write cache compaction causes errors in RWS postHACommit() * </a> */ - this.compactionEnabled = canCompact() && compactionThreshold < 100; + this.compactionEnabled = false;//canCompact() && compactionThreshold < 100; if (log.isInfoEnabled()) log.info("Compaction Enabled: " + compactionEnabled Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2013-05-30 20:40:36 UTC (rev 7174) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2013-05-30 21:12:22 UTC (rev 7175) @@ -123,16 +123,6 @@ static protected final Logger log = Logger.getLogger(AbstractTask.class); /** - * True iff the {@link #log} level is INFO or less. - */ - final protected boolean INFO = log.isInfoEnabled(); - - /** - * True iff the {@link #log} level is DEBUG or less. - */ - final protected boolean DEBUG = log.isDebugEnabled(); - - /** * Used to protect against re-submission of the same task object. */ private final AtomicBoolean submitted = new AtomicBoolean(false); @@ -463,7 +453,7 @@ if (commitList.put(name, this) != null) { - if (INFO) + if (log.isInfoEnabled()) log.info("Added index to commit list: name=" + name); } @@ -477,7 +467,7 @@ */ private void clearIndexCache() { - if (INFO) + if (log.isInfoEnabled()) log.info("Clearing hard reference cache: " + indexCache.size() + " indices accessed"); @@ -543,6 +533,7 @@ * @todo modify to return <code>null</code> if the index is not * registered? */ + @Override synchronized final public ILocalBTreeView getIndex(final String name) { if (name == null) { @@ -1729,7 +1720,7 @@ MDC.put("timestamp", Long.valueOf(timestamp)); - if(INFO) + if(log.isInfoEnabled()) MDC.put("resources", Arrays.toString(resource)); } @@ -1744,7 +1735,7 @@ MDC.remove("timestamp"); - if(INFO) + if(log.isInfoEnabled()) MDC.remove("resources"); } @@ -1865,7 +1856,7 @@ if (isReadWriteTx) { - if (INFO) + if (log.isInfoEnabled()) log.info("Running read-write tx: timestamp=" + timestamp); // if(tx.isReadOnly()) { @@ -1915,7 +1906,7 @@ clearIndexCache(); - if(INFO) log.info("Reader is done: "+this); + if(log.isInfoEnabled()) log.info("Reader is done: "+this); } @@ -1934,7 +1925,7 @@ } finally { - if(INFO) log.info("done: "+this); + if(log.isInfoEnabled()) log.info("done: "+this); } @@ -1954,7 +1945,7 @@ final Thread t = Thread.currentThread(); - if(INFO) + if(log.isInfoEnabled()) log.info("Unisolated write task: " + this + ", thread=" + t); // // declare resource(s) to lock (exclusive locks are used). @@ -2027,7 +2018,7 @@ // set flag. ran = true; - if (INFO) + if (log.isInfoEnabled()) log.info("Task Ok: class=" + this); /* @@ -2049,7 +2040,7 @@ // Do not re-invoke it afterTask failed above. - if (INFO) + if (log.isInfoEnabled()) log.info("Task failed: class=" + this + " : " + t2); writeService.afterTask(this, t2); @@ -2343,6 +2334,8 @@ class IsolatedActionJournal implements IJournal, IAllocationContext { private final AbstractJournal delegate; + + @SuppressWarnings("rawtypes") private final IResourceLocator resourceLocator; public String toString() { @@ -2376,7 +2369,7 @@ * * @param source */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public IsolatedActionJournal(final AbstractJournal source) { if (source == null) @@ -2416,6 +2409,7 @@ /** * Delegates to the {@link AbstractTask}. */ + @Override public void dropIndex(final String name) { AbstractTask.this.dropIndex(name); @@ -2426,12 +2420,28 @@ * Note: This is the core implementation for registering an index - it * delegates to the {@link AbstractTask}. */ + @Override public IIndex registerIndex(final String name, final BTree btree) { return AbstractTask.this.registerIndex(name, btree); } + @Override + public ICheckpointProtocol register(final String name, final IndexMetadata metadata) { + + /* + * FIXME GIST : Support registration of index types other than BTree + * (HTree, Stream, etc). + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + */ + + throw new UnsupportedOperationException(); + + } + + @Override public void registerIndex(final IndexMetadata indexMetadata) { // delegate to core impl. @@ -2439,6 +2449,7 @@ } + @Override public IIndex registerIndex(final String name, final IndexMetadata indexMetadata) { @@ -2456,6 +2467,31 @@ /** * Note: access to an unisolated index is governed by the AbstractTask. */ + @Override + public ICheckpointProtocol getUnisolatedIndex(String name) { + try { + + /* + * FIXME GIST. This will throw a ClassCastException if the + * returned index is an ILocalBTreeView. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + */ + + return (ICheckpointProtocol) AbstractTask.this.getIndex(name); + + } catch(NoSuchIndexException ex) { + + // api conformance. + return null; + + } + } + + /** + * Note: access to an unisolated index is governed by the AbstractTask. + */ + @Override public IIndex getIndex(final String name) { try { @@ -2476,16 +2512,50 @@ * declare a lock - such views will always be read-only and support * concurrent readers. */ - public IIndex getIndex(final String name, final long timestamp) { + @Override + public ICheckpointProtocol getIndexLocal(final String name, + final long commitTime) { - if (timestamp == ITx.UNISOLATED) { + if (commitTime == ITx.UNISOLATED) { + + return getUnisolatedIndex(name); + + } + + /* + * The index view is obtained from the resource manager. + */ + + if (resourceManager instanceof IJournal) { + + /* + * This code path supports any type of index (BTree, HTree, + * etc). + */ + + return ((IJournal) resourceManager).getIndexLocal(name, + commitTime); + + } - return getIndex(name); + /** + * FIXME GIST : This code path only supports BTree + * (ILocalBTreeView). An attempt to resolve an HTree or other + * non-BTree based named index data structure will probably result + * in a ClassCastException. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/585" + * > GIST </a> + */ + return (ICheckpointProtocol) resourceManager.getIndex(name, commitTime); - } + } - // the index view is obtained from the resource manager. - return resourceManager.getIndex(name, timestamp); + @Override + public IIndex getIndex(final String name, final long timestamp) { + + return (IIndex) getIndexLocal(name, timestamp); } @@ -2495,6 +2565,7 @@ * the name of the backing index as one of the resources for which it * acquired a lock. */ + @Override public SparseRowStore getGlobalRowStore() { // did the task declare the resource name? @@ -2510,6 +2581,7 @@ } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { if (!TimestampUtility.isReadOnly(timestamp)) { @@ -2547,6 +2619,7 @@ * declared the names of the backing indices as resources for which it * acquired a lock. */ + @Override public BigdataFileSystem getGlobalFileSystem() { // did the task declare the resource name? @@ -2583,6 +2656,7 @@ * and will break semantics when the task is isolated by a transaction * rather than unisolated. */ + @Override public TemporaryStore getTempStore() { return tempStoreFactory.getTempStore(); @@ -2590,24 +2664,28 @@ } private TemporaryStoreFactory tempStoreFactory = new TemporaryStoreFactory(); - public IResourceLocator getResourceLocator() { + @Override + public IResourceLocator<?> getResourceLocator() { return resourceLocator; } + @Override public ILocalTransactionManager getLocalTransactionManager() { return delegate.getLocalTransactionManager(); } + @Override public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); @@ -2618,34 +2696,42 @@ * Disallowed methods (commit protocol and shutdown protocol). */ + @Override public void abort() { throw new UnsupportedOperationException(); } + @Override public void close() { throw new UnsupportedOperationException(); } + @Override public void destroy() { throw new UnsupportedOperationException(); } + @Override public void deleteResources() { throw new UnsupportedOperationException(); } + @Override public long commit() { throw new UnsupportedOperationException(); } + @Override public void setCommitter(int index, ICommitter committer) { throw new UnsupportedOperationException(); } + @Override public void shutdown() { throw new UnsupportedOperationException(); } + @Override public void shutdownNow() { throw new UnsupportedOperationException(); } @@ -2658,70 +2744,87 @@ // return delegate.getKeyBuilder(); // } + @Override public void force(final boolean metadata) { delegate.force(metadata); } + @Override public int getByteCount(final long addr) { return delegate.getByteCount(addr); } + @Override public ICommitRecord getCommitRecord(final long timestamp) { return delegate.getCommitRecord(timestamp); } + @Override public CounterSet getCounters() { return delegate.getCounters(); } + @Override public File getFile() { return delegate.getFile(); } + @Override public long getOffset(final long addr) { return delegate.getOffset(addr); } + @Override public long getPhysicalAddress(final long addr) { return delegate.getPhysicalAddress(addr); } + @Override public Properties getProperties() { return delegate.getProperties(); } + @Override public UUID getUUID() { return delegate.getUUID(); } + @Override public IResourceMetadata getResourceMetadata() { return delegate.getResourceMetadata(); } + @Override public long getRootAddr(final int index) { return delegate.getRootAddr(index); } + @Override public long getLastCommitTime() { return delegate.getLastCommitTime(); } + @Override public IRootBlockView getRootBlockView() { return delegate.getRootBlockView(); } + @Override public boolean isFullyBuffered() { return delegate.isFullyBuffered(); } + @Override public boolean isOpen() { return delegate.isOpen(); } + @Override public boolean isReadOnly() { return delegate.isReadOnly(); } + @Override public boolean isStable() { return delegate.isStable(); } @@ -2730,22 +2833,27 @@ // delegate.packAddr(out, addr); // } + @Override public ByteBuffer read(final long addr) { return delegate.read(addr); } + @Override public long size() { return delegate.size(); } + @Override public long toAddr(final int nbytes, final long offset) { return delegate.toAddr(nbytes, offset); } + @Override public String toString(final long addr) { return delegate.toString(addr); } + // @Override // public IRootBlockView getRootBlock(final long commitTime) { // return delegate.getRootBlock(commitTime); // } @@ -2762,6 +2870,7 @@ * allocations to be scoped to the AbstractTask. */ + @Override public long write(final ByteBuffer data) { return delegate.write(data, this); } @@ -2782,6 +2891,7 @@ return delegate.getInputStream(addr); } + @Override public void delete(final long addr) { delegate.delete(addr, this); } @@ -2808,19 +2918,23 @@ completeTask(); } + @Override public ScheduledFuture<?> addScheduledTask(final Runnable task, final long initialDelay, final long delay, final TimeUnit unit) { return delegate.addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return delegate.getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return delegate.getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return delegate.getHttpdPort(); } @@ -2849,6 +2963,8 @@ private class ReadOnlyJournal implements IJournal { private final IJournal delegate; + + @SuppressWarnings("rawtypes") private final DefaultResourceLocator resourceLocator; public String toString() { @@ -2857,7 +2973,7 @@ } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public ReadOnlyJournal(final AbstractJournal source) { if (source == null) @@ -2885,12 +3001,41 @@ * do). */ + @Override + public IIndex getIndex(final String name, final long timestamp) { + + if (timestamp == ITx.UNISOLATED) + throw new UnsupportedOperationException(); + + if (timestamp == AbstractTask.this.timestamp) { + + // to the AbstractTask + try { + + return AbstractTask.this.getIndex(name); + + } catch(NoSuchIndexException ex) { + + // api conformance. + return null; + + } + + } + + // to the backing journal. + return (IIndex) delegate.getIndexLocal(name, timestamp); + + } + /** * {@inheritDoc} * <p> * Note: Does not allow access to {@link ITx#UNISOLATED} indices. */ - public IIndex getIndex(final String name, final long timestamp) { + @Override + public ICheckpointProtocol getIndexLocal(final String name, + final long commitTime) { if (timestamp == ITx.UNISOLATED) throw new UnsupportedOperationException(); @@ -2900,7 +3045,12 @@ // to the AbstractTask try { - return AbstractTask.this.getIndex(name); + /* + * FIXME GIST : This will throw a ClassCastException if the + * index type is ReadCommittedIndex or FusedView. + */ + return (ICheckpointProtocol) AbstractTask.this + .getIndex(name); } catch(NoSuchIndexException ex) { @@ -2912,7 +3062,7 @@ } // to the backing journal. - return delegate.getIndex(name, timestamp); + return delegate.getIndexLocal(name, timestamp); } @@ -2937,30 +3087,53 @@ * Note: Not supported since this method returns the * {@link ITx#UNISOLATED} index. */ + @Override + public ICheckpointProtocol getUnisolatedIndex(String name) { + + throw new UnsupportedOperationException(); + + } + + /** + * Note: Not supported since this method returns the + * {@link ITx#UNISOLATED} index. + */ + @Override public IIndex getIndex(String name) { throw new UnsupportedOperationException(); } + @Override public void dropIndex(String name) { throw new UnsupportedOperationException(); } + @Override + public ICheckpointProtocol register(String name, IndexMetadata metadata) { + + throw new UnsupportedOperationException(); + + } + + @Override public void registerIndex(IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); } + @Override public IIndex registerIndex(String name, BTree btree) { throw new UnsupportedOperationException(); } + @Override public IIndex registerIndex(String name, IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); @@ -2971,6 +3144,7 @@ * Returns an {@link ITx#READ_COMMITTED} view iff the index exists and * <code>null</code> otherwise. */ + @Override public SparseRowStore getGlobalRowStore() { /* @@ -3000,6 +3174,7 @@ } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { /* @@ -3036,6 +3211,7 @@ * Returns an {@link ITx#READ_COMMITTED} view iff the file system exists * and <code>null</code> otherwise. */ + @Override public BigdataFileSystem getGlobalFileSystem() { /* @@ -3085,6 +3261,7 @@ * and will break semantics when the task is isolated by a transaction * rather than unisolated. */ + @Override public TemporaryStore getTempStore() { return tempStoreFactory.getTempStore(); @@ -3092,24 +3269,28 @@ } private TemporaryStoreFactory tempStoreFactory = new TemporaryStoreFactory(); - public DefaultResourceLocator getResourceLocator() { + @Override + public DefaultResourceLocator<?> getResourceLocator() { return resourceLocator; } + @Override public ILocalTransactionManager getLocalTransactionManager() { return delegate.getLocalTransactionManager(); } + @Override public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); @@ -3120,34 +3301,42 @@ * Disallowed methods (commit and shutdown protocols). */ + @Override public void abort() { throw new UnsupportedOperationException(); } + @Override public void close() { throw new UnsupportedOperationException(); } + @Override public void destroy() { throw new UnsupportedOperationException(); } + @Override public long commit() { throw new UnsupportedOperationException(); } + @Override public void deleteResources() { throw new UnsupportedOperationException(); } + @Override public void setCommitter(int index, ICommitter committer) { throw new UnsupportedOperationException(); } + @Override public void shutdown() { throw new UnsupportedOperationException(); } + @Override public void shutdownNow() { throw new UnsupportedOperationException(); } @@ -3156,10 +3345,12 @@ * Disallowed methods (methods that write on the store). */ + @Override public void force(boolean metadata) { throw new UnsupportedOperationException(); } + @Override public long write(ByteBuffer data) { throw new UnsupportedOperationException(); } @@ -3169,6 +3360,7 @@ // throw new UnsupportedOperationException(); // } + @Override public void delete(long addr) { throw new UnsupportedOperationException(); } @@ -3177,86 +3369,107 @@ * Methods that delegate directly to the backing journal. */ + @Override public int getByteCount(long addr) { return delegate.getByteCount(addr); } + @Override public ICommitRecord getCommitRecord(long timestamp) { return delegate.getCommitRecord(timestamp); } + @Override public CounterSet getCounters() { return delegate.getCounters(); } + @Override public File getFile() { return delegate.getFile(); } + @Override public long getOffset(long addr) { return delegate.getOffset(addr); } + @Override public long getPhysicalAddress(final long addr) { return delegate.getPhysicalAddress(addr); } + @Override public Properties getProperties() { return delegate.getProperties(); } + @Override public UUID getUUID() { return delegate.getUUID(); } + @Override public IResourceMetadata getResourceMetadata() { return delegate.getResourceMetadata(); } + @Override public long getRootAddr(int index) { return delegate.getRootAddr(index); } + @Override public long getLastCommitTime() { return delegate.getLastCommitTime(); } + @Override public IRootBlockView getRootBlockView() { return delegate.getRootBlockView(); } + @Override public boolean isFullyBuffered() { return delegate.isFullyBuffered(); } + @Override public boolean isOpen() { return delegate.isOpen(); } + @Override public boolean isReadOnly() { return delegate.isReadOnly(); } + @Override public boolean isStable() { return delegate.isStable(); } + @Override public ByteBuffer read(long addr) { return delegate.read(addr); } + @Override public long size() { return delegate.size(); } + @Override public long toAddr(int nbytes, long offset) { return delegate.toAddr(nbytes, offset); } + @Override public String toString(long addr) { return delegate.toString(addr); } +// @Override // public IRootBlockView getRootBlock(long commitTime) { // return delegate.getRootBlock(commitTime); // } @@ -3265,19 +3478,23 @@ // return delegate.getRootBlocks(startTime); // } + @Override public ScheduledFuture<?> addScheduledTask(Runnable task, long initialDelay, long delay, TimeUnit unit) { return delegate.addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return delegate.getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return delegate.getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return delegate.getHttpdPort(); } @@ -3307,71 +3524,87 @@ private IIndexManager delegate; - public DelegateIndexManager(IIndexManager delegate) { + public DelegateIndexManager(final IIndexManager delegate) { this.delegate = delegate; } + @Override public void dropIndex(String name) { delegate.dropIndex(name); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); } + @Override public BigdataFileSystem getGlobalFileSystem() { return delegate.getGlobalFileSystem(); } + @Override public SparseRowStore getGlobalRowStore() { return delegate.getGlobalRowStore(); } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { return delegate.getGlobalRowStore(timestamp); } + @Ove... [truncated message content] |
From: <tho...@us...> - 2013-05-30 20:40:43
|
Revision: 7174 http://bigdata.svn.sourceforge.net/bigdata/?rev=7174&view=rev Author: thompsonbry Date: 2013-05-30 20:40:36 +0000 (Thu, 30 May 2013) Log Message: ----------- Added release notes for 1.2.3. Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt Added: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/releases/RELEASE_1_2_3.txt 2013-05-30 20:40:36 UTC (rev 7174) @@ -0,0 +1,294 @@ +This is a minor release of bigdata(R). + +Bigdata is a horizontally-scaled, open-source architecture for indexed data with an emphasis on RDF capable of loading 1B triples in under one hour on a 15 node cluster. Bigdata operates in both a single machine mode (Journal) and a cluster mode (Federation). The Journal provides fast scalable ACID indexed storage for very large data sets, up to 50 billion triples / quads. The federation provides fast scalable shard-wise parallel indexed storage using dynamic sharding and shard-wise ACID updates and incremental cluster size growth. Both platforms support fully concurrent readers with snapshot isolation. + +Distributed processing offers greater throughput but does not reduce query or update latency. Choose the Journal when the anticipated scale and throughput requirements permit. Choose the Federation when the administrative and machine overhead associated with operating a cluster is an acceptable tradeoff to have essentially unlimited data scaling and throughput. + +See [1,2,8] for instructions on installing bigdata(R), [4] for the javadoc, and [3,5,6] for news, questions, and the latest developments. For more information about SYSTAP, LLC and bigdata, see [7]. + +Starting with the 1.0.0 release, we offer a WAR artifact [8] for easy installation of the single machine RDF database. For custom development and cluster installations we recommend checking out the code from SVN using the tag for this release. The code will build automatically under eclipse. You can also build the code using the ant script. The cluster installer requires the use of the ant script. + +You can download the WAR from: + +http://sourceforge.net/projects/bigdata/ + +You can checkout this release from: + +https://bigdata.svn.sourceforge.net/svnroot/bigdata/tags/BIGDATA_RELEASE_1_2_3 + +New features: + +- SPARQL 1.1 Update Extensions (SPARQL UPDATE for named solution sets). +- SPARQL 1.1 Property Paths. +- Remote Java client for Multi-Tenancy extensions NanoSparqlServer +- Sesame 2.6.10 dependency +- Plus numerous other bug fixes and performance enhancements. + +Feature summary: + +- Single machine data storage to ~50B triples/quads (RWStore); +- Clustered data storage is essentially unlimited; +- Simple embedded and/or webapp deployment (NanoSparqlServer); +- Triples, quads, or triples with provenance (SIDs); +- Fast RDFS+ inference and truth maintenance; +- Fast 100% native SPARQL 1.1 evaluation; +- Integrated "analytic" query package; +- %100 Java memory manager leverages the JVM native heap (no GC); + +Road map [3]: + +- High availability for the journal and the cluster. +- Runtime Query Optimizer for Analytic Query mode; and +- Simplified deployment, configuration, and administration for clusters. + +Change log: + + Note: Versions with (*) MAY require data migration. For details, see [9]. + +1.2.3: + +- http://sourceforge.net/apps/trac/bigdata/ticket/168 (Maven Build) +- http://sourceforge.net/apps/trac/bigdata/ticket/196 (Journal leaks memory). +- http://sourceforge.net/apps/trac/bigdata/ticket/235 (Occasional deadlock in CI runs in com.bigdata.io.writecache.TestAll) +- http://sourceforge.net/apps/trac/bigdata/ticket/312 (CI (mock) quorums deadlock) +- http://sourceforge.net/apps/trac/bigdata/ticket/405 (Optimize hash join for subgroups with no incoming bound vars.) +- http://sourceforge.net/apps/trac/bigdata/ticket/412 (StaticAnalysis#getDefinitelyBound() ignores exogenous variables.) +- http://sourceforge.net/apps/trac/bigdata/ticket/485 (RDFS Plus Profile) +- http://sourceforge.net/apps/trac/bigdata/ticket/495 (SPARQL 1.1 Property Paths) +- http://sourceforge.net/apps/trac/bigdata/ticket/519 (Negative parser tests) +- http://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for SOLUTION SETS) +- http://sourceforge.net/apps/trac/bigdata/ticket/535 (Optimize JOIN VARS for Sub-Selects) +- http://sourceforge.net/apps/trac/bigdata/ticket/555 (Support PSOutputStream/InputStream at IRawStore) +- http://sourceforge.net/apps/trac/bigdata/ticket/559 (Use RDFFormat.NQUADS as the format identifier for the NQuads parser) +- http://sourceforge.net/apps/trac/bigdata/ticket/570 (MemoryManager Journal does not implement all methods). +- http://sourceforge.net/apps/trac/bigdata/ticket/575 (NSS Admin API) +- http://sourceforge.net/apps/trac/bigdata/ticket/577 (DESCRIBE with OFFSET/LIMIT needs to use sub-select) +- http://sourceforge.net/apps/trac/bigdata/ticket/578 (Concise Bounded Description (CBD)) +- http://sourceforge.net/apps/trac/bigdata/ticket/579 (CONSTRUCT should use distinct SPO filter) +- http://sourceforge.net/apps/trac/bigdata/ticket/583 (VoID in ServiceDescription) +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/590 (nxparser fails with uppercase language tag) +- http://sourceforge.net/apps/trac/bigdata/ticket/592 (Optimize RWStore allocator sizes) +- http://sourceforge.net/apps/trac/bigdata/ticket/593 (Ugrade to Sesame 2.6.10) +- http://sourceforge.net/apps/trac/bigdata/ticket/594 (WAR was deployed using TRIPLES rather than QUADS by default) +- http://sourceforge.net/apps/trac/bigdata/ticket/596 (Change web.xml parameter names to be consistent with Jini/River) +- http://sourceforge.net/apps/trac/bigdata/ticket/597 (SPARQL UPDATE LISTENER) +- http://sourceforge.net/apps/trac/bigdata/ticket/598 (B+Tree branching factor and HTree addressBits are confused in their NodeSerializer implementations) +- http://sourceforge.net/apps/trac/bigdata/ticket/599 (BlobIV for blank node : NotMaterializedException) +- http://sourceforge.net/apps/trac/bigdata/ticket/600 (BlobIV collision counter hits false limit.) +- http://sourceforge.net/apps/trac/bigdata/ticket/601 (Log uncaught exceptions) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/607 (History service / index) +- http://sourceforge.net/apps/trac/bigdata/ticket/608 (LOG BlockingBuffer not progressing at INFO or lower level) +- http://sourceforge.net/apps/trac/bigdata/ticket/609 (bigdata-ganglia is required dependency for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/611 (The code that processes SPARQL Update has a typo) +- http://sourceforge.net/apps/trac/bigdata/ticket/612 (Bigdata scale-up depends on zookeper) +- http://sourceforge.net/apps/trac/bigdata/ticket/613 (SPARQL UPDATE response inlines large DELETE or INSERT triple graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/614 (static join optimizer does not get ordering right when multiple tails share vars with ancestry) +- http://sourceforge.net/apps/trac/bigdata/ticket/615 (AST2BOpUtility wraps UNION with an unnecessary hash join) +- http://sourceforge.net/apps/trac/bigdata/ticket/616 (Row store read/update not isolated on Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/617 (Concurrent KB create fails with "No axioms defined?") +- http://sourceforge.net/apps/trac/bigdata/ticket/618 (DirectBufferPool.poolCapacity maximum of 2GB) +- http://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) +- http://sourceforge.net/apps/trac/bigdata/ticket/620 (UpdateServlet fails to parse MIMEType when doing conneg.) +- http://sourceforge.net/apps/trac/bigdata/ticket/626 (Expose performance counters for read-only indices) +- http://sourceforge.net/apps/trac/bigdata/ticket/627 (Environment variable override for NSS properties file) +- http://sourceforge.net/apps/trac/bigdata/ticket/628 (Create a bigdata-client jar for the NSS REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/631 (ClassCastException in SIDs mode query) +- http://sourceforge.net/apps/trac/bigdata/ticket/632 (NotMaterializedException when a SERVICE call needs variables that are provided as query input bindings) +- http://sourceforge.net/apps/trac/bigdata/ticket/633 (ClassCastException when binding non-uri values to a variable that occurs in predicate position) +- http://sourceforge.net/apps/trac/bigdata/ticket/638 (Change DEFAULT_MIN_RELEASE_AGE to 1ms) +- http://sourceforge.net/apps/trac/bigdata/ticket/640 (Conditionally rollback() BigdataSailConnection if dirty) +- http://sourceforge.net/apps/trac/bigdata/ticket/642 (Property paths do not work inside of exists/not exists filters) +- http://sourceforge.net/apps/trac/bigdata/ticket/643 (Add web.xml parameters to lock down public NSS end points) +- http://sourceforge.net/apps/trac/bigdata/ticket/644 (Bigdata2Sesame2BindingSetIterator can fail to notice asynchronous close()) +- http://sourceforge.net/apps/trac/bigdata/ticket/650 (Can not POST RDF to a graph using REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/654 (Rare AssertionError in WriteCache.clearAddrMap()) +- http://sourceforge.net/apps/trac/bigdata/ticket/655 (SPARQL REGEX operator does not perform case-folding correctly for Unicode data) +- http://sourceforge.net/apps/trac/bigdata/ticket/656 (InFactory bug when IN args consist of a single literal) +- http://sourceforge.net/apps/trac/bigdata/ticket/647 (SIDs mode creates unnecessary hash join for GRAPH group patterns) +- http://sourceforge.net/apps/trac/bigdata/ticket/667 (Provide NanoSparqlServer initialization hook) +- http://sourceforge.net/apps/trac/bigdata/ticket/669 (Doubly nested subqueries yield no results with LIMIT) +- http://sourceforge.net/apps/trac/bigdata/ticket/675 (Flush indices in parallel during checkpoint to reduce IO latency) + +1.2.2: + +- http://sourceforge.net/apps/trac/bigdata/ticket/586 (RWStore immedateFree() not removing Checkpoint addresses from the historical index cache.) +- http://sourceforge.net/apps/trac/bigdata/ticket/602 (RWStore does not discard logged deletes on reset()) +- http://sourceforge.net/apps/trac/bigdata/ticket/603 (Prepare critical maintenance release as branch of 1.2.1) + +1.2.1: + +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/539 (NotMaterializedException with REGEX and Vocab) +- http://sourceforge.net/apps/trac/bigdata/ticket/540 (SPARQL UPDATE using NSS via index.html) +- http://sourceforge.net/apps/trac/bigdata/ticket/541 (MemoryManaged backed Journal mode) +- http://sourceforge.net/apps/trac/bigdata/ticket/546 (Index cache for Journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/549 (BTree can not be cast to Name2Addr (MemStore recycler)) +- http://sourceforge.net/apps/trac/bigdata/ticket/550 (NPE in Leaf.getKey() : root cause was user error) +- http://sourceforge.net/apps/trac/bigdata/ticket/558 (SPARQL INSERT not working in same request after INSERT DATA) +- http://sourceforge.net/apps/trac/bigdata/ticket/562 (Sub-select in INSERT cause NPE in UpdateExprBuilder) +- http://sourceforge.net/apps/trac/bigdata/ticket/563 (DISTINCT ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/567 (Failure to set cached value on IV results in incorrect behavior for complex UPDATE operation) +- http://sourceforge.net/apps/trac/bigdata/ticket/568 (DELETE WHERE fails with Java AssertionError) +- http://sourceforge.net/apps/trac/bigdata/ticket/569 (LOAD-CREATE-LOAD using virgin journal fails with "Graph exists" exception) +- http://sourceforge.net/apps/trac/bigdata/ticket/571 (DELETE/INSERT WHERE handling of blank nodes) +- http://sourceforge.net/apps/trac/bigdata/ticket/573 (NullPointerException when attempting to INSERT DATA containing a blank node) + +1.2.0: (*) + +- http://sourceforge.net/apps/trac/bigdata/ticket/92 (Monitoring webapp) +- http://sourceforge.net/apps/trac/bigdata/ticket/267 (Support evaluation of 3rd party operators) +- http://sourceforge.net/apps/trac/bigdata/ticket/337 (Compact and efficient movement of binding sets between nodes.) +- http://sourceforge.net/apps/trac/bigdata/ticket/433 (Cluster leaks threads under read-only index operations: DGC thread leak) +- http://sourceforge.net/apps/trac/bigdata/ticket/437 (Thread-local cache combined with unbounded thread pools causes effective memory leak: termCache memory leak & thread-local buffers) +- http://sourceforge.net/apps/trac/bigdata/ticket/438 (KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/439 (Class loader problem) +- http://sourceforge.net/apps/trac/bigdata/ticket/441 (Ganglia integration) +- http://sourceforge.net/apps/trac/bigdata/ticket/443 (Logger for RWStore transaction service and recycler) +- http://sourceforge.net/apps/trac/bigdata/ticket/444 (SPARQL query can fail to notice when IRunningQuery.isDone() on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/445 (RWStore does not track tx release correctly) +- http://sourceforge.net/apps/trac/bigdata/ticket/446 (HTTP Repostory broken with bigdata 1.1.0) +- http://sourceforge.net/apps/trac/bigdata/ticket/448 (SPARQL 1.1 UPDATE) +- http://sourceforge.net/apps/trac/bigdata/ticket/449 (SPARQL 1.1 Federation extension) +- http://sourceforge.net/apps/trac/bigdata/ticket/451 (Serialization error in SIDs mode on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/454 (Global Row Store Read on Cluster uses Tx) +- http://sourceforge.net/apps/trac/bigdata/ticket/456 (IExtension implementations do point lookups on lexicon) +- http://sourceforge.net/apps/trac/bigdata/ticket/457 ("No such index" on cluster under concurrent query workload) +- http://sourceforge.net/apps/trac/bigdata/ticket/458 (Java level deadlock in DS) +- http://sourceforge.net/apps/trac/bigdata/ticket/460 (Uncaught interrupt resolving RDF terms) +- http://sourceforge.net/apps/trac/bigdata/ticket/461 (KeyAfterPartitionException / KeyBeforePartitionException on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/463 (NoSuchVocabularyItem with LUBMVocabulary for DerivedNumericsExtension) +- http://sourceforge.net/apps/trac/bigdata/ticket/464 (Query statistics do not update correctly on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/465 (Too many GRS reads on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/469 (Sail does not flush assertion buffers before query) +- http://sourceforge.net/apps/trac/bigdata/ticket/472 (acceptTaskService pool size on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/475 (Optimize serialization for query messages on cluster) +- http://sourceforge.net/apps/trac/bigdata/ticket/476 (Test suite for writeCheckpoint() and recycling for BTree/HTree) +- http://sourceforge.net/apps/trac/bigdata/ticket/478 (Cluster does not map input solution(s) across shards) +- http://sourceforge.net/apps/trac/bigdata/ticket/480 (Error releasing deferred frees using 1.0.6 against a 1.0.4 journal) +- http://sourceforge.net/apps/trac/bigdata/ticket/481 (PhysicalAddressResolutionException against 1.0.6) +- http://sourceforge.net/apps/trac/bigdata/ticket/482 (RWStore reset() should be thread-safe for concurrent readers) +- http://sourceforge.net/apps/trac/bigdata/ticket/484 (Java API for NanoSparqlServer REST API) +- http://sourceforge.net/apps/trac/bigdata/ticket/491 (AbstractTripleStore.destroy() does not clear the locator cache) +- http://sourceforge.net/apps/trac/bigdata/ticket/492 (Empty chunk in ThickChunkMessage (cluster)) +- http://sourceforge.net/apps/trac/bigdata/ticket/493 (Virtual Graphs) +- http://sourceforge.net/apps/trac/bigdata/ticket/496 (Sesame 2.6.3) +- http://sourceforge.net/apps/trac/bigdata/ticket/497 (Implement STRBEFORE, STRAFTER, and REPLACE) +- http://sourceforge.net/apps/trac/bigdata/ticket/498 (Bring bigdata RDF/XML parser up to openrdf 2.6.3.) +- http://sourceforge.net/apps/trac/bigdata/ticket/500 (SPARQL 1.1 Service Description) +- http://www.openrdf.org/issues/browse/SES-884 (Aggregation with an solution set as input should produce an empty solution as output) +- http://www.openrdf.org/issues/browse/SES-862 (Incorrect error handling for SPARQL aggregation; fix in 2.6.1) +- http://www.openrdf.org/issues/browse/SES-873 (Order the same Blank Nodes together in ORDER BY) +- http://sourceforge.net/apps/trac/bigdata/ticket/501 (SPARQL 1.1 BINDINGS are ignored) +- http://sourceforge.net/apps/trac/bigdata/ticket/503 (Bigdata2Sesame2BindingSetIterator throws QueryEvaluationException were it should throw NoSuchElementException) +- http://sourceforge.net/apps/trac/bigdata/ticket/504 (UNION with Empty Group Pattern) +- http://sourceforge.net/apps/trac/bigdata/ticket/505 (Exception when using SPARQL sort & statement identifiers) +- http://sourceforge.net/apps/trac/bigdata/ticket/506 (Load, closure and query performance in 1.1.x versus 1.0.x) +- http://sourceforge.net/apps/trac/bigdata/ticket/508 (LIMIT causes hash join utility to log errors) +- http://sourceforge.net/apps/trac/bigdata/ticket/513 (Expose the LexiconConfiguration to Function BOPs) +- http://sourceforge.net/apps/trac/bigdata/ticket/515 (Query with two "FILTER NOT EXISTS" expressions returns no results) +- http://sourceforge.net/apps/trac/bigdata/ticket/516 (REGEXBOp should cache the Pattern when it is a constant) +- http://sourceforge.net/apps/trac/bigdata/ticket/517 (Java 7 Compiler Compatibility) +- http://sourceforge.net/apps/trac/bigdata/ticket/518 (Review function bop subclass hierarchy, optimize datatype bop, etc.) +- http://sourceforge.net/apps/trac/bigdata/ticket/520 (CONSTRUCT WHERE shortcut) +- http://sourceforge.net/apps/trac/bigdata/ticket/521 (Incremental materialization of Tuple and Graph query results) +- http://sourceforge.net/apps/trac/bigdata/ticket/525 (Modify the IChangeLog interface to support multiple agents) +- http://sourceforge.net/apps/trac/bigdata/ticket/527 (Expose timestamp of LexiconRelation to function bops) +- http://sourceforge.net/apps/trac/bigdata/ticket/532 (ClassCastException during hash join (can not be cast to TermId)) +- http://sourceforge.net/apps/trac/bigdata/ticket/533 (Review materialization for inline IVs) +- http://sourceforge.net/apps/trac/bigdata/ticket/534 (BSBM BI Q5 error using MERGE JOIN) + +1.1.0 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/23 (Lexicon joins) + - http://sourceforge.net/apps/trac/bigdata/ticket/109 (Store large literals as "blobs") + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/203 (Implement an persistence capable hash table to support analytic query) + - http://sourceforge.net/apps/trac/bigdata/ticket/209 (AccessPath should visit binding sets rather than elements for high level query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/227 (SliceOp appears to be necessary when operator plan should suffice without) + - http://sourceforge.net/apps/trac/bigdata/ticket/232 (Bottom-up evaluation semantics). + - http://sourceforge.net/apps/trac/bigdata/ticket/246 (Derived xsd numeric data types must be inlined as extension types.) + - http://sourceforge.net/apps/trac/bigdata/ticket/254 (Revisit pruning of intermediate variable bindings during query execution) + - http://sourceforge.net/apps/trac/bigdata/ticket/261 (Lift conditions out of subqueries.) + - http://sourceforge.net/apps/trac/bigdata/ticket/300 (Native ORDER BY) + - http://sourceforge.net/apps/trac/bigdata/ticket/324 (Inline predeclared URIs and namespaces in 2-3 bytes) + - http://sourceforge.net/apps/trac/bigdata/ticket/330 (NanoSparqlServer does not locate "html" resources when run from jar) + - http://sourceforge.net/apps/trac/bigdata/ticket/334 (Support inlining of unicode data in the statement indices.) + - http://sourceforge.net/apps/trac/bigdata/ticket/364 (Scalable default graph evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/368 (Prune variable bindings during query evaluation) + - http://sourceforge.net/apps/trac/bigdata/ticket/370 (Direct translation of openrdf AST to bigdata AST) + - http://sourceforge.net/apps/trac/bigdata/ticket/373 (Fix StrBOp and other IValueExpressions) + - http://sourceforge.net/apps/trac/bigdata/ticket/377 (Optimize OPTIONALs with multiple statement patterns.) + - http://sourceforge.net/apps/trac/bigdata/ticket/380 (Native SPARQL evaluation on cluster) + - http://sourceforge.net/apps/trac/bigdata/ticket/387 (Cluster does not compute closure) + - http://sourceforge.net/apps/trac/bigdata/ticket/395 (HTree hash join performance) + - http://sourceforge.net/apps/trac/bigdata/ticket/401 (inline xsd:unsigned datatypes) + - http://sourceforge.net/apps/trac/bigdata/ticket/408 (xsd:string cast fails for non-numeric data) + - http://sourceforge.net/apps/trac/bigdata/ticket/421 (New query hints model.) + - http://sourceforge.net/apps/trac/bigdata/ticket/431 (Use of read-only tx per query defeats cache on cluster) + +1.0.3 + + - http://sourceforge.net/apps/trac/bigdata/ticket/217 (BTreeCounters does not track bytes released) + - http://sourceforge.net/apps/trac/bigdata/ticket/269 (Refactor performance counters using accessor interface) + - http://sourceforge.net/apps/trac/bigdata/ticket/329 (B+Tree should delete bloom filter when it is disabled.) + - http://sourceforge.net/apps/trac/bigdata/ticket/372 (RWStore does not prune the CommitRecordIndex) + - http://sourceforge.net/apps/trac/bigdata/ticket/375 (Persistent memory leaks (RWStore/DISK)) + - http://sourceforge.net/apps/trac/bigdata/ticket/385 (FastRDFValueCoder2: ArrayIndexOutOfBoundsException) + - http://sourceforge.net/apps/trac/bigdata/ticket/391 (Release age advanced on WORM mode journal) + - http://sourceforge.net/apps/trac/bigdata/ticket/392 (Add a DELETE by access path method to the NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/393 (Add "context-uri" request parameter to specify the default context for INSERT in the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/394 (log4j configuration error message in WAR deployment) + - http://sourceforge.net/apps/trac/bigdata/ticket/399 (Add a fast range count method to the REST API) + - http://sourceforge.net/apps/trac/bigdata/ticket/422 (Support temp triple store wrapped by a BigdataSail) + - http://sourceforge.net/apps/trac/bigdata/ticket/424 (NQuads support for NanoSparqlServer) + - http://sourceforge.net/apps/trac/bigdata/ticket/425 (Bug fix to DEFAULT_RDF_FORMAT for bulk data loader in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/426 (Support either lockfile (procmail) and dotlockfile (liblockfile1) in scale-out) + - http://sourceforge.net/apps/trac/bigdata/ticket/427 (BigdataSail#getReadOnlyConnection() race condition with concurrent commit) + - http://sourceforge.net/apps/trac/bigdata/ticket/435 (Address is 0L) + - http://sourceforge.net/apps/trac/bigdata/ticket/436 (TestMROWTransactions failure in CI) + +1.0.2 + + - http://sourceforge.net/apps/trac/bigdata/ticket/32 (Query time expansion of (foo rdf:type rdfs:Resource) drags in SPORelation for scale-out.) + - http://sourceforge.net/apps/trac/bigdata/ticket/181 (Scale-out LUBM "how to" in wiki and build.xml are out of date.) + - http://sourceforge.net/apps/trac/bigdata/ticket/356 (Query not terminated by error.) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/361 (IRunningQuery not closed promptly.) + - http://sourceforge.net/apps/trac/bigdata/ticket/371 (DataLoader fails to load resources available from the classpath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/376 (Support for the streaming of bigdata IBindingSets into a sparql query.) + - http://sourceforge.net/apps/trac/bigdata/ticket/378 (ClosedByInterruptException during heavy query mix.) + - http://sourceforge.net/apps/trac/bigdata/ticket/379 (NotSerializableException for SPOAccessPath.) + - http://sourceforge.net/apps/trac/bigdata/ticket/382 (Change dependencies to Apache River 2.2.0) + +1.0.1 (*) + + - http://sourceforge.net/apps/trac/bigdata/ticket/107 (Unicode clean schema names in the sparse row store). + - http://sourceforge.net/apps/trac/bigdata/ticket/124 (TermIdEncoder should use more bits for scale-out). + - http://sourceforge.net/apps/trac/bigdata/ticket/225 (OSX requires specialized performance counter collection classes). + - http://sourceforge.net/apps/trac/bigdata/ticket/348 (BigdataValueFactory.asValue() must return new instance when DummyIV is used). + - http://sourceforge.net/apps/trac/bigdata/ticket/349 (TermIdEncoder limits Journal to 2B distinct RDF Values per triple/quad store instance). + - http://sourceforge.net/apps/trac/bigdata/ticket/351 (SPO not Serializable exception in SIDS mode (scale-out)). + - http://sourceforge.net/apps/trac/bigdata/ticket/352 (ClassCastException when querying with binding-values that are not known to the database). + - http://sourceforge.net/apps/trac/bigdata/ticket/353 (UnsupportedOperatorException for some SPARQL queries). + - http://sourceforge.net/apps/trac/bigdata/ticket/355 (Query failure when comparing with non materialized value). + - http://sourceforge.net/apps/trac/bigdata/ticket/357 (RWStore reports "FixedAllocator returning null address, with freeBits".) + - http://sourceforge.net/apps/trac/bigdata/ticket/359 (NamedGraph pattern fails to bind graph variable if only one binding exists.) + - http://sourceforge.net/apps/trac/bigdata/ticket/362 (log4j - slf4j bridge.) + +For more information about bigdata(R), please see the following links: + +[1] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Main_Page +[2] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=GettingStarted +[3] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=Roadmap +[4] http://www.bigdata.com/bigdata/docs/api/ +[5] http://sourceforge.net/projects/bigdata/ +[6] http://www.bigdata.com/blog +[7] http://www.systap.com/bigdata.htm +[8] http://sourceforge.net/projects/bigdata/files/bigdata/ +[9] http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=DataMigration + +About bigdata: + +Bigdata(R) is a horizontally-scaled, general purpose storage and computing fabric for ordered data (B+Trees), designed to operate on either a single server or a cluster of commodity hardware. Bigdata(R) uses dynamically partitioned key-range shards in order to remove any realistic scaling limits - in principle, bigdata(R) may be deployed on 10s, 100s, or even thousands of machines and new capacity may be added incrementally without requiring the full reload of all data. The bigdata(R) RDF database supports RDFS and OWL Lite reasoning, high-level query (SPARQL), and datum level provenance. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-30 17:34:29
|
Revision: 7173 http://bigdata.svn.sourceforge.net/bigdata/?rev=7173&view=rev Author: thompsonbry Date: 2013-05-30 17:34:20 +0000 (Thu, 30 May 2013) Log Message: ----------- - RemoteServiceOptions: Modified the default for isGet() to be false. This supports use cases where HTTP caching must be defeated in order to obtain a then-current view of the status of the remote resource. - RemoteRepository: Added setQueryMethod() and getQueryMethod() so you can control whether or not http caching will be used for queries. Added setMaxRequestURLLength() to permit control of when a POST or a GET with a long requestURL is converted into a POST with a ''application/x-www-form-urlencoded'' request body. Differentiated between idempotent and non-idempotent methods. - QueryServlet, RESTServlet: modified to support POST for more of the REST API. TODO Add unit tests for ESTCARD and CONTEXTS that use GET / POST. @see https://sourceforge.net/apps/trac/bigdata/ticket/619 (RemoteRepository class should use application/x-www-form-urlencoded for large POST requests) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceOptions.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceOptions.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceOptions.java 2013-05-30 16:00:14 UTC (rev 7172) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/RemoteServiceOptions.java 2013-05-30 17:34:20 UTC (rev 7173) @@ -60,8 +60,18 @@ // // } + /** + * Note: The default is <code>false</code>. This supports use cases where + * the end points are read/write databases and http caching must be defeated + * in order to gain access to the most recent committed state of the end + * point. + * + * @see #isGET() + */ + private final static boolean DEFAULT_IS_GET = false; + private boolean isSparql11 = true; - private boolean isGET = true; + private boolean isGET = DEFAULT_IS_GET; private String acceptStr = null; public RemoteServiceOptions() { @@ -94,9 +104,14 @@ } /** - * When <code>true</code>, use GET for query. Otherwise use POST. Note that - * POST can often handle larger queries than GET due to limits at the HTTP - * client layer, but HTTP caching only works for GET. + * When <code>true</code>, use GET for query and otherwise use POST (default + * {@value #DEFAULT_IS_GET}). POST can often handle larger queries than GET + * due to limits at the HTTP client layer and will defeat http caching and + * thus provide a current view of the committed state of the SPARQL end + * point when the end point is a read/write database. However, GET supports + * HTTP caching and can scale much better when the SPARQL end point is a + * read-only resource or a read-mostly resource where stale reads are + * acceptable. */ public boolean isGET() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-05-30 16:00:14 UTC (rev 7172) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2013-05-30 17:34:20 UTC (rev 7173) @@ -150,6 +150,21 @@ // SPARQL 1.1 UPDATE. doUpdate(req, resp); + } else if (req.getParameter(ATTR_UUID) != null) { + + // UUID with caching defeated. + doUUID(req, resp); + + } else if (req.getParameter(ATTR_ESTCARD) != null) { + + // ESTCARD with caching defeated. + doEstCard(req, resp); + + } else if (req.getParameter(ATTR_CONTEXTS) != null) { + + // CONTEXTS with caching defeated. + doContexts(req, resp); + } else { // SPARQL Query. Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2013-05-30 16:00:14 UTC (rev 7172) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2013-05-30 17:34:20 UTC (rev 7173) @@ -190,8 +190,12 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (req.getParameter("query") != null - || req.getParameter("update") != null) { + if (req.getParameter(QueryServlet.ATTR_QUERY) != null + || req.getParameter(QueryServlet.ATTR_UPDATE) != null + || req.getParameter(QueryServlet.ATTR_UUID) != null + || req.getParameter(QueryServlet.ATTR_ESTCARD) != null + || req.getParameter(QueryServlet.ATTR_CONTEXTS) != null + ) { // SPARQL QUERY -or- SPARQL UPDATE via POST m_queryServlet.doPost(req, resp); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-05-30 16:00:14 UTC (rev 7172) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java 2013-05-30 17:34:20 UTC (rev 7173) @@ -97,7 +97,9 @@ import org.xml.sax.Attributes; import org.xml.sax.ext.DefaultHandler2; +import com.bigdata.rdf.sparql.ast.service.RemoteServiceOptions; + /** * Java API to the Nano Sparql Server. * <p> @@ -135,6 +137,29 @@ static protected final String UTF8 = "UTF-8"; /** + * Note: The default is <code>false</code>. This supports use cases where + * the end points are read/write databases and http caching must be defeated + * in order to gain access to the most recent committed state of the end + * point. + * + * @see #getQueryMethod() + * @see #setQueryMethod(String) + */ + static private final String DEFAULT_QUERY_METHOD = "POST"; + + /** + * The default maximum limit on a requestURL before the request is converted + * into a POST using a <code>application/x-www-form-urlencoded</code> + * request entity. + * <p> + * Note: I suspect that 2000 might be a better default limit. If the limit + * is 4096 bytes on the target, then, even with UTF encoding, most queries + * having a request URL that is 2000 characters long should go through with + * a GET. 1000 is a safe value but it could reduce http caching. + */ + static private final int DEFAULT_MAX_REQUEST_URL_LENGTH = 1000; + + /** * The service end point for the default data set. */ protected final String sparqlEndpointURL; @@ -148,38 +173,86 @@ * Thread pool for processing HTTP responses in background. */ protected final Executor executor; + + /** + * The maximum requestURL length before the request is converted into a POST + * using a <code>application/x-www-form-urlencoded</code> request entity. + */ + private volatile int maxRequestURLLength = DEFAULT_MAX_REQUEST_URL_LENGTH; -// /** -// * Create a connection to a remote repository using a shared -// * {@link ClientConnectionManager} and a {@link DefaultHttpClient}. -// * -// * @param serviceURL -// * The SPARQL http end point. -// * -// * @see ClientConnectionManagerFactory#getInstance() -// */ -// public RemoteRepository(final String serviceURL) { -// -// this(serviceURL, new DefaultHttpClient( -// ClientConnectionManagerFactory.getInstance())); -// -// } -// -// /** -// * Create a connection to a remote repository. -// * -// * @param serviceURL -// * The SPARQL http end point. -// * @param httpClient -// * The {@link HttpClient}. -// */ -// public RemoteRepository(final String serviceURL, final HttpClient httpClient) { -// -// this(serviceURL, httpClient, Executors.newCachedThreadPool()); -// -// } + /** + * The HTTP verb that will be used for a QUERY (versus a UPDATE or other + * mutation operation). + */ + private volatile String queryMethod = DEFAULT_QUERY_METHOD; + + /** + * Return the maximum requestURL length before the request is converted into + * a POST using a <code>application/x-www-form-urlencoded</code> request + * entity. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619"> + * RemoteRepository class should use application/x-www-form-urlencoded + * for large POST requests </a> + */ + public int getMaxRequestURLLength() { + + return maxRequestURLLength; + + } + + public void setMaxRequestURLLength(final int newVal) { + + if (newVal <= 0) + throw new IllegalArgumentException(); + + this.maxRequestURLLength = newVal; + + } /** + * Return the HTTP verb that will be used for a QUERY (versus an UPDATE or + * other mutation operations) (default {@value #DEFAULT_IS_GET}). POST can + * often handle larger queries than GET due to limits at the HTTP client + * layer and will defeat http caching and thus provide a current view of the + * committed state of the SPARQL end point when the end point is a + * read/write database. However, GET supports HTTP caching and can scale + * much better when the SPARQL end point is a read-only resource or a + * read-mostly resource where stale reads are acceptable. + * + * @see #setQueryMethod(String) + */ + public String getQueryMethod() { + + return queryMethod; + + } + + /** + * Set the default HTTP verb for QUERY and other idempotant operations. + * + * @param method + * The method which may be "POST" or "GET". + * + * @see #getQueryMethod() + * + * @see RemoteServiceOptions#setGET(boolean) + */ + public void setQueryMethod(final String method) { + + if ("POST".equalsIgnoreCase(method) || "GET".equalsIgnoreCase(method)) { + + this.queryMethod = method.toUpperCase(); + + } else { + + throw new IllegalArgumentException(); + + } + + } + + /** * Create a connection to a remote repository. A typical invocation looks * like: * @@ -272,7 +345,7 @@ public IPreparedTupleQuery prepareTupleQuery(final String query) throws Exception { - return new TupleQuery(newConnectOptions(), UUID.randomUUID(), query); + return new TupleQuery(newQueryConnectOptions(), UUID.randomUUID(), query); } @@ -287,7 +360,7 @@ public IPreparedGraphQuery prepareGraphQuery(final String query) throws Exception { - return new GraphQuery(newConnectOptions(), UUID.randomUUID(), query); + return new GraphQuery(newQueryConnectOptions(), UUID.randomUUID(), query); } @@ -302,7 +375,7 @@ public IPreparedBooleanQuery prepareBooleanQuery(final String query) throws Exception { - return new BooleanQuery(newConnectOptions(), UUID.randomUUID(), query); + return new BooleanQuery(newQueryConnectOptions(), UUID.randomUUID(), query); } @@ -319,7 +392,7 @@ public IPreparedSparqlUpdate prepareUpdate(final String updateStr) throws Exception { - return new SparqlUpdate(newConnectOptions(), UUID.randomUUID(), + return new SparqlUpdate(newUpdateConnectOptions(), UUID.randomUUID(), updateStr); } @@ -454,7 +527,7 @@ */ public void cancel(final UUID queryId) throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newUpdateConnectOptions(); opts.addRequestParam("cancelQuery"); @@ -482,10 +555,8 @@ public long rangeCount(final Resource s, final URI p, final Value o, final Resource... c) throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newQueryConnectOptions(); - opts.method = "GET"; - opts.addRequestParam("ESTCARD"); if (s != null) { opts.addRequestParam("s", EncodeDecodeValue.encodeValue(s)); @@ -549,10 +620,8 @@ */ public Collection<Resource> getContexts() throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newQueryConnectOptions(); - opts.method = "GET"; - opts.addRequestParam("CONTEXTS"); HttpResponse resp = null; @@ -590,10 +659,8 @@ */ public long add(final AddOp add) throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newUpdateConnectOptions(); - opts.method = "POST"; - add.prepareForWire(); if (add.format != null) { @@ -650,7 +717,7 @@ */ public long remove(final RemoveOp remove) throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newUpdateConnectOptions(); remove.prepareForWire(); @@ -738,7 +805,7 @@ */ public long update(final RemoveOp remove, final AddOp add) throws Exception { - final ConnectOptions opts = newConnectOptions(); + final ConnectOptions opts = newUpdateConnectOptions(); remove.prepareForWire(); add.prepareForWire(); @@ -1204,6 +1271,10 @@ * The connection options. * * @return The connection. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/619"> + * RemoteRepository class should use application/x-www-form-urlencoded + * for large POST requests </a> */ public HttpResponse doConnect(final ConnectOptions opts) throws Exception { @@ -1214,21 +1285,41 @@ final StringBuilder urlString = new StringBuilder(opts.serviceURL); ConnectOptions.addQueryParams(urlString, opts.requestParams); - - /* - * URL is too long. Reset the URL to just the service endpoint - * and use application/x-www-form-urlencoded entity instead. Only in - * cases where there is not already a request entity (SPARQL query and - * SPARQL update). - */ - if (urlString.length() > 1000 && - opts.method.equals("POST") && opts.entity == null) { - - urlString.setLength(0); - urlString.append(opts.serviceURL); - opts.entity = ConnectOptions.getFormEntity(opts.requestParams); - + final boolean isLongRequestURL = urlString.length() > getMaxRequestURLLength(); + + if (isLongRequestURL && opts.method.equals("POST") + && opts.entity == null) { + + /* + * URL is too long. Reset the URL to just the service endpoint and + * use application/x-www-form-urlencoded entity instead. Only in + * cases where there is not already a request entity (SPARQL query + * and SPARQL update). + */ + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + + } else if (isLongRequestURL && opts.method.equals("GET") + && opts.entity == null) { + + /* + * Convert automatically to a POST if the request URL is too long. + * + * Note: [opts.entity == null] should always be true for a GET so + * this bit is a paranoia check. + */ + + opts.method = "POST"; + + urlString.setLength(0); + urlString.append(opts.serviceURL); + + opts.entity = ConnectOptions.getFormEntity(opts.requestParams); + } if (log.isDebugEnabled()) { @@ -1930,6 +2021,34 @@ /** * Return the {@link ConnectOptions} which will be used by default for the + * SPARQL end point for a QUERY or other idempotent operation. + */ + final protected ConnectOptions newQueryConnectOptions() { + + final ConnectOptions opts = newConnectOptions(sparqlEndpointURL); + + opts.method = getQueryMethod(); + + return opts; + + } + + /** + * Return the {@link ConnectOptions} which will be used by default for the + * SPARQL end point for an UPDATE or other non-idempotant operation. + */ + final protected ConnectOptions newUpdateConnectOptions() { + + final ConnectOptions opts = newConnectOptions(sparqlEndpointURL); + + opts.method = "POST"; + + return opts; + + } + + /** + * Return the {@link ConnectOptions} which will be used by default for the * SPARQL end point. */ final protected ConnectOptions newConnectOptions() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-30 16:00:30
|
Revision: 7172 http://bigdata.svn.sourceforge.net/bigdata/?rev=7172&view=rev Author: thompsonbry Date: 2013-05-30 16:00:14 +0000 (Thu, 30 May 2013) Log Message: ----------- - SolutionSetStream : javadoc and imports only. - ICheckpointProtocol : javadoc and imports only. - IBTreeManager: add methods to support GIST (register(name,metadata)::ICheckpointProtocol and getUnisolatedIndex(name)::ICheckpointProtocol). - IResourceManager: javadoc. - Stream: Javadoc. - AbstractTask: Implementations of new IBTreeManager methods. Attempted GIST refactoring. Hit blocking issues regarding the lack of a base class for ICheckpointProtocol and ILocalIndexView. This causes conflicts with IResourceManager as well. - JournalDelegate: added new IBTreeManager methods. - TemporaryStore: added new IBTreeManager methods; added @Override annotations; made the name2Addr field final; added unit tests for the new GIST methods on IBTreeManager. - TestDumpJournal, TestName2Addr, TestNamedIndices: removed use of getHTree() on AbstractJournal in favor of getUnisolatedIndex(). - CacheConnectionImpl: decoupled the SolutionSetCache from the CacheConnectionImpl prior to breaking the association between the DESCRIBE cache and durable named SOLUTION SETS. removed getSparqlCache(). This completely decouples the concept of the named solution set manager from the concept of a cache. - CacheConnectionFactory: Rewrote to the IJournal interface rather than AbstractJournal (might be able to rewrite to IBTreeManager). - SolutionSetCache: Refactored to use IJournal as the backing store and the new GIST methods on IBTreeManager. - SolutionSetCache => SolutionSetManager - ISolutionSetCache => ISolutionSetManager - BOpContext: modified to access the SolutionSetManager via its flyweight constructor. - NamedSolutionSetRefUtility: modified to use IBTreeManager rather than AbstractJournal (GIST). - QueryHints: Removed the SOLUTION_SET_CACHE query hint. This feature is always enabled (but is not yet supported for read/write transactions). @see https://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for NAMED SOLUTION SETS) @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/BOpContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IBTreeManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IResourceManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/JournalDelegate.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/TemporaryStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestDumpJournal.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestName2Addr.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestNamedIndices.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/journal/TestTemporaryStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/QueryHints.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysisBase.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/StaticAnalysis_CanJoin.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ICacheConnection.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUpdate.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/IEvaluationContext.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/cache/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestDescribe.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/TestBigdataSailWithQuads.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/ISolutionSetManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/ssets/SolutionSetManager.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/ssets/TestSolutionSetManager.java Removed Paths: ------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/SolutionSetCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/cache/TestSolutionSetCache.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/BOpContext.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/BOpContext.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -43,17 +43,13 @@ import com.bigdata.bop.join.BaseJoinStats; import com.bigdata.bop.join.IHashJoinUtility; import com.bigdata.btree.ISimpleIndexAccess; -import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.IIndexManager; -import com.bigdata.journal.ITx; -import com.bigdata.journal.TimestampUtility; +import com.bigdata.journal.IBTreeManager; import com.bigdata.rdf.internal.IV; import com.bigdata.rdf.internal.impl.bnode.SidIV; import com.bigdata.rdf.model.BigdataBNode; import com.bigdata.rdf.sparql.ast.QueryHints; -import com.bigdata.rdf.sparql.ast.cache.CacheConnectionFactory; -import com.bigdata.rdf.sparql.ast.cache.ICacheConnection; -import com.bigdata.rdf.sparql.ast.cache.ISolutionSetCache; +import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager; +import com.bigdata.rdf.sparql.ast.ssets.SolutionSetManager; import com.bigdata.rdf.spo.ISPO; import com.bigdata.rdf.spo.SPO; import com.bigdata.rdf.spo.SPOPredicate; @@ -61,7 +57,6 @@ import com.bigdata.relation.accesspath.IAccessPath; import com.bigdata.relation.accesspath.IBlockingBuffer; import com.bigdata.rwstore.sector.IMemoryManager; -import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ChunkedFilter; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.CloseableIteratorWrapper; @@ -629,20 +624,20 @@ // Resolve the object which will give us access to the named // solution set. - final ICacheConnection cacheConn = CacheConnectionFactory - .getExistingCacheConnection(getRunningQuery() - .getQueryEngine()); +// final ICacheConnection cacheConn = CacheConnectionFactory +// .getExistingCacheConnection(getRunningQuery() +// .getQueryEngine()); final String namespace = namedSetRef.getNamespace(); final long timestamp = namedSetRef.getTimestamp(); - final ISolutionSetCache sparqlCache = cacheConn == null ? null - : cacheConn.getSparqlCache(namespace, timestamp); - // TODO ClassCastException is possible? - final AbstractJournal localIndexManager = (AbstractJournal) getIndexManager(); + final IBTreeManager localIndexManager = (IBTreeManager) getIndexManager(); + final ISolutionSetManager sparqlCache = new SolutionSetManager( + localIndexManager, namespace, timestamp); + return NamedSolutionSetRefUtility.getSolutionSet(// sparqlCache,// localIndexManager,// Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/NamedSolutionSetRefUtility.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -35,10 +35,11 @@ import com.bigdata.btree.IIndex; import com.bigdata.btree.ISimpleIndexAccess; import com.bigdata.journal.AbstractJournal; +import com.bigdata.journal.IBTreeManager; import com.bigdata.journal.ITx; import com.bigdata.journal.TimestampUtility; import com.bigdata.rdf.sparql.ast.ISolutionSetStats; -import com.bigdata.rdf.sparql.ast.cache.ISolutionSetCache; +import com.bigdata.rdf.sparql.ast.ssets.ISolutionSetManager; import com.bigdata.rdf.store.AbstractTripleStore; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.ICloseableIterator; @@ -144,6 +145,7 @@ } + @SuppressWarnings("rawtypes") final IVariable[] joinVars; { @@ -394,8 +396,8 @@ * the same data. */ public static ISolutionSetStats getSolutionSetStats(// - final ISolutionSetCache sparqlCache,// - final AbstractJournal localIndexManager, // + final ISolutionSetManager sparqlCache,// + final IBTreeManager localIndexManager, // final String namespace,// final long timestamp,// final String localName,// @@ -491,8 +493,8 @@ * {@link IIndex}? */ public static ICloseableIterator<IBindingSet[]> getSolutionSet( - final ISolutionSetCache sparqlCache,// - final AbstractJournal localIndexManager,// + final ISolutionSetManager sparqlCache,// + final IBTreeManager localIndexManager,// final String namespace,// final long timestamp,// final String localName,// @@ -558,6 +560,7 @@ + localName + ", joinVars=" + Arrays.toString(joinVars)); // Iterator visiting the solution set. + @SuppressWarnings("unchecked") final ICloseableIterator<IBindingSet> src = (ICloseableIterator<IBindingSet>) index .scan(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/solutions/SolutionSetStream.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -49,7 +49,6 @@ import com.bigdata.rdf.internal.encoder.SolutionSetStreamDecoder; import com.bigdata.rdf.internal.encoder.SolutionSetStreamEncoder; import com.bigdata.rdf.sparql.ast.ISolutionSetStats; -import com.bigdata.rdf.sparql.ast.SolutionSetStats; import com.bigdata.stream.Stream; import com.bigdata.striterator.Chunkerator; import com.bigdata.striterator.ICloseableIterator; @@ -171,7 +170,7 @@ * by {@link Checkpoint#create(IRawStore, IndexMetadata)} since * Stream.create() is being invoked rather than SolutionSetStream.create(). * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> */ public static SolutionSetStream create(final IRawStore store, final StreamIndexMetadata metadata) { @@ -202,10 +201,10 @@ } /** - * Return the address of the {@link SolutionSetStats} to be written into the + * Return the address of the {@link ISolutionSetStats} to be written into the * next {@link Checkpoint} record. The caller must have {@link #flush()} the * {@link SolutionSetStream} as a pre-condition (to ensure that the stats - * have been written out). If the {@link SolutionSetStats} are not loaded, + * have been written out). If the {@link ISolutionSetStats} are not loaded, * then the address from the last {@link Checkpoint} record is returned. */ public long getStatsAddr() { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/btree/ICheckpointProtocol.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -23,6 +23,7 @@ */ package com.bigdata.btree; +import com.bigdata.btree.view.FusedView; import com.bigdata.counters.ICounterSetAccess; import com.bigdata.journal.AbstractJournal; import com.bigdata.journal.AbstractTask; @@ -39,8 +40,13 @@ * TODO Try to lift out an abstract implementation of this interface for * HTree, BTree, and Stream. This will be another step towards GIST * support. There are protected methods which are used on those classes - * which should be lifted into the abstract base class. - */ + * which should be lifted into the abstract base class. Also, try to + * reconcile this interface with {@link ILocalBTreeView} implementations + * that do not implement {@link ICheckpointProtocol} ({@link FusedView}, + * {@link ReadCommittedView}). + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> + */ public interface ICheckpointProtocol extends ICommitter, ICounterSetAccess, ISimpleIndexAccess { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/AbstractTask.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -123,16 +123,6 @@ static protected final Logger log = Logger.getLogger(AbstractTask.class); /** - * True iff the {@link #log} level is INFO or less. - */ - final protected boolean INFO = log.isInfoEnabled(); - - /** - * True iff the {@link #log} level is DEBUG or less. - */ - final protected boolean DEBUG = log.isDebugEnabled(); - - /** * Used to protect against re-submission of the same task object. */ private final AtomicBoolean submitted = new AtomicBoolean(false); @@ -463,7 +453,7 @@ if (commitList.put(name, this) != null) { - if (INFO) + if (log.isInfoEnabled()) log.info("Added index to commit list: name=" + name); } @@ -477,7 +467,7 @@ */ private void clearIndexCache() { - if (INFO) + if (log.isInfoEnabled()) log.info("Clearing hard reference cache: " + indexCache.size() + " indices accessed"); @@ -543,8 +533,9 @@ * @todo modify to return <code>null</code> if the index is not * registered? */ + @Override synchronized final public ILocalBTreeView getIndex(final String name) { - + if (name == null) { // @todo change to IllegalArgumentException for API consistency? @@ -1729,7 +1720,7 @@ MDC.put("timestamp", Long.valueOf(timestamp)); - if(INFO) + if(log.isInfoEnabled()) MDC.put("resources", Arrays.toString(resource)); } @@ -1744,7 +1735,7 @@ MDC.remove("timestamp"); - if(INFO) + if(log.isInfoEnabled()) MDC.remove("resources"); } @@ -1865,7 +1856,7 @@ if (isReadWriteTx) { - if (INFO) + if (log.isInfoEnabled()) log.info("Running read-write tx: timestamp=" + timestamp); // if(tx.isReadOnly()) { @@ -1915,7 +1906,7 @@ clearIndexCache(); - if(INFO) log.info("Reader is done: "+this); + if(log.isInfoEnabled()) log.info("Reader is done: "+this); } @@ -1934,7 +1925,7 @@ } finally { - if(INFO) log.info("done: "+this); + if(log.isInfoEnabled()) log.info("done: "+this); } @@ -1954,7 +1945,7 @@ final Thread t = Thread.currentThread(); - if(INFO) + if(log.isInfoEnabled()) log.info("Unisolated write task: " + this + ", thread=" + t); // // declare resource(s) to lock (exclusive locks are used). @@ -2027,7 +2018,7 @@ // set flag. ran = true; - if (INFO) + if (log.isInfoEnabled()) log.info("Task Ok: class=" + this); /* @@ -2049,7 +2040,7 @@ // Do not re-invoke it afterTask failed above. - if (INFO) + if (log.isInfoEnabled()) log.info("Task failed: class=" + this + " : " + t2); writeService.afterTask(this, t2); @@ -2343,6 +2334,8 @@ class IsolatedActionJournal implements IJournal, IAllocationContext { private final AbstractJournal delegate; + + @SuppressWarnings("rawtypes") private final IResourceLocator resourceLocator; public String toString() { @@ -2376,7 +2369,7 @@ * * @param source */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public IsolatedActionJournal(final AbstractJournal source) { if (source == null) @@ -2416,6 +2409,7 @@ /** * Delegates to the {@link AbstractTask}. */ + @Override public void dropIndex(final String name) { AbstractTask.this.dropIndex(name); @@ -2426,12 +2420,28 @@ * Note: This is the core implementation for registering an index - it * delegates to the {@link AbstractTask}. */ + @Override public IIndex registerIndex(final String name, final BTree btree) { return AbstractTask.this.registerIndex(name, btree); } + @Override + public ICheckpointProtocol register(final String name, final IndexMetadata metadata) { + + /* + * FIXME GIST : Support registration of index types other than BTree + * (HTree, Stream, etc). + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + */ + + throw new UnsupportedOperationException(); + + } + + @Override public void registerIndex(final IndexMetadata indexMetadata) { // delegate to core impl. @@ -2439,6 +2449,7 @@ } + @Override public IIndex registerIndex(final String name, final IndexMetadata indexMetadata) { @@ -2456,6 +2467,31 @@ /** * Note: access to an unisolated index is governed by the AbstractTask. */ + @Override + public ICheckpointProtocol getUnisolatedIndex(String name) { + try { + + /* + * FIXME GIST. This will throw a ClassCastException if the + * returned index is an ILocalBTreeView. + * + * @see https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) + */ + + return (ICheckpointProtocol) AbstractTask.this.getIndex(name); + + } catch(NoSuchIndexException ex) { + + // api conformance. + return null; + + } + } + + /** + * Note: access to an unisolated index is governed by the AbstractTask. + */ + @Override public IIndex getIndex(final String name) { try { @@ -2476,25 +2512,60 @@ * declare a lock - such views will always be read-only and support * concurrent readers. */ - public IIndex getIndex(final String name, final long timestamp) { + @Override + public ICheckpointProtocol getIndexLocal(final String name, + final long commitTime) { - if (timestamp == ITx.UNISOLATED) { - - return getIndex(name); - + if (commitTime == ITx.UNISOLATED) { + + return getUnisolatedIndex(name); + } + + /* + * The index view is obtained from the resource manager. + */ + + if (resourceManager instanceof IJournal) { + + /* + * This code path supports any type of index (BTree, HTree, + * etc). + */ + + return ((IJournal) resourceManager).getIndexLocal(name, + commitTime); + + } + + /** + * FIXME GIST : This code path only supports BTree + * (ILocalBTreeView). An attempt to resolve an HTree or other + * non-BTree based named index data structure will probably result + * in a ClassCastException. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/585" + * > GIST </a> + */ + return (ICheckpointProtocol) resourceManager.getIndex(name, commitTime); - // the index view is obtained from the resource manager. - return resourceManager.getIndex(name, timestamp); + } + + @Override + public IIndex getIndex(final String name, final long timestamp) { + + return (IIndex) getIndexLocal(name, timestamp); } - + /** * Returns an {@link ITx#READ_COMMITTED} view if the index exists -or- * an {@link ITx#UNISOLATED} view IFF the {@link AbstractTask} declared * the name of the backing index as one of the resources for which it * acquired a lock. */ + @Override public SparseRowStore getGlobalRowStore() { // did the task declare the resource name? @@ -2510,6 +2581,7 @@ } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { if (!TimestampUtility.isReadOnly(timestamp)) { @@ -2547,6 +2619,7 @@ * declared the names of the backing indices as resources for which it * acquired a lock. */ + @Override public BigdataFileSystem getGlobalFileSystem() { // did the task declare the resource name? @@ -2583,6 +2656,7 @@ * and will break semantics when the task is isolated by a transaction * rather than unisolated. */ + @Override public TemporaryStore getTempStore() { return tempStoreFactory.getTempStore(); @@ -2590,24 +2664,28 @@ } private TemporaryStoreFactory tempStoreFactory = new TemporaryStoreFactory(); - public IResourceLocator getResourceLocator() { + @Override + public IResourceLocator<?> getResourceLocator() { return resourceLocator; } + @Override public ILocalTransactionManager getLocalTransactionManager() { return delegate.getLocalTransactionManager(); } + @Override public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); @@ -2618,34 +2696,42 @@ * Disallowed methods (commit protocol and shutdown protocol). */ + @Override public void abort() { throw new UnsupportedOperationException(); } + @Override public void close() { throw new UnsupportedOperationException(); } + @Override public void destroy() { throw new UnsupportedOperationException(); } + @Override public void deleteResources() { throw new UnsupportedOperationException(); } + @Override public long commit() { throw new UnsupportedOperationException(); } + @Override public void setCommitter(int index, ICommitter committer) { throw new UnsupportedOperationException(); } + @Override public void shutdown() { throw new UnsupportedOperationException(); } + @Override public void shutdownNow() { throw new UnsupportedOperationException(); } @@ -2658,70 +2744,87 @@ // return delegate.getKeyBuilder(); // } + @Override public void force(final boolean metadata) { delegate.force(metadata); } + @Override public int getByteCount(final long addr) { return delegate.getByteCount(addr); } + @Override public ICommitRecord getCommitRecord(final long timestamp) { return delegate.getCommitRecord(timestamp); } + @Override public CounterSet getCounters() { return delegate.getCounters(); } + @Override public File getFile() { return delegate.getFile(); } - + + @Override public long getOffset(final long addr) { return delegate.getOffset(addr); } + @Override public long getPhysicalAddress(final long addr) { return delegate.getPhysicalAddress(addr); } + @Override public Properties getProperties() { return delegate.getProperties(); } + @Override public UUID getUUID() { return delegate.getUUID(); } + @Override public IResourceMetadata getResourceMetadata() { return delegate.getResourceMetadata(); } + @Override public long getRootAddr(final int index) { return delegate.getRootAddr(index); } + @Override public long getLastCommitTime() { return delegate.getLastCommitTime(); } + @Override public IRootBlockView getRootBlockView() { return delegate.getRootBlockView(); } + @Override public boolean isFullyBuffered() { return delegate.isFullyBuffered(); } + @Override public boolean isOpen() { return delegate.isOpen(); } + @Override public boolean isReadOnly() { return delegate.isReadOnly(); } + @Override public boolean isStable() { return delegate.isStable(); } @@ -2730,26 +2833,32 @@ // delegate.packAddr(out, addr); // } + @Override public ByteBuffer read(final long addr) { return delegate.read(addr); } + @Override public long size() { return delegate.size(); } + @Override public long toAddr(final int nbytes, final long offset) { return delegate.toAddr(nbytes, offset); } + @Override public String toString(final long addr) { return delegate.toString(addr); } + @Override public IRootBlockView getRootBlock(final long commitTime) { return delegate.getRootBlock(commitTime); } + @Override public Iterator<IRootBlockView> getRootBlocks(final long startTime) { return delegate.getRootBlocks(startTime); } @@ -2762,6 +2871,7 @@ * allocations to be scoped to the AbstractTask. */ + @Override public long write(final ByteBuffer data) { return delegate.write(data, this); } @@ -2782,6 +2892,7 @@ return delegate.getInputStream(addr); } + @Override public void delete(final long addr) { delegate.delete(addr, this); } @@ -2808,19 +2919,23 @@ completeTask(); } + @Override public ScheduledFuture<?> addScheduledTask(final Runnable task, final long initialDelay, final long delay, final TimeUnit unit) { return delegate.addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return delegate.getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return delegate.getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return delegate.getHttpdPort(); } @@ -2849,6 +2964,8 @@ private class ReadOnlyJournal implements IJournal { private final IJournal delegate; + + @SuppressWarnings("rawtypes") private final DefaultResourceLocator resourceLocator; public String toString() { @@ -2857,7 +2974,7 @@ } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) public ReadOnlyJournal(final AbstractJournal source) { if (source == null) @@ -2885,17 +3002,13 @@ * do). */ - /** - * {@inheritDoc} - * <p> - * Note: Does not allow access to {@link ITx#UNISOLATED} indices. - */ + @Override public IIndex getIndex(final String name, final long timestamp) { - + if (timestamp == ITx.UNISOLATED) throw new UnsupportedOperationException(); - if(timestamp == AbstractTask.this.timestamp) { + if (timestamp == AbstractTask.this.timestamp) { // to the AbstractTask try { @@ -2912,10 +3025,48 @@ } // to the backing journal. - return delegate.getIndex(name, timestamp); + return (IIndex) delegate.getIndexLocal(name, timestamp); } + + /** + * {@inheritDoc} + * <p> + * Note: Does not allow access to {@link ITx#UNISOLATED} indices. + */ + @Override + public ICheckpointProtocol getIndexLocal(final String name, + final long commitTime) { + if (timestamp == ITx.UNISOLATED) + throw new UnsupportedOperationException(); + + if (timestamp == AbstractTask.this.timestamp) { + + // to the AbstractTask + try { + + /* + * FIXME GIST : This will throw a ClassCastException if the + * index type is ReadCommittedIndex or FusedView. + */ + return (ICheckpointProtocol) AbstractTask.this + .getIndex(name); + + } catch (NoSuchIndexException ex) { + + // api conformance. + return null; + + } + + } + + // to the backing journal. + return delegate.getIndexLocal(name, timestamp); + + } + /** * {@inheritDoc} * <p> @@ -2937,30 +3088,53 @@ * Note: Not supported since this method returns the * {@link ITx#UNISOLATED} index. */ + @Override + public ICheckpointProtocol getUnisolatedIndex(String name) { + + throw new UnsupportedOperationException(); + + } + + /** + * Note: Not supported since this method returns the + * {@link ITx#UNISOLATED} index. + */ + @Override public IIndex getIndex(String name) { throw new UnsupportedOperationException(); } + @Override public void dropIndex(String name) { throw new UnsupportedOperationException(); } + @Override + public ICheckpointProtocol register(String name, IndexMetadata metadata) { + + throw new UnsupportedOperationException(); + + } + + @Override public void registerIndex(IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); } + @Override public IIndex registerIndex(String name, BTree btree) { throw new UnsupportedOperationException(); } + @Override public IIndex registerIndex(String name, IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); @@ -2971,6 +3145,7 @@ * Returns an {@link ITx#READ_COMMITTED} view iff the index exists and * <code>null</code> otherwise. */ + @Override public SparseRowStore getGlobalRowStore() { /* @@ -3000,6 +3175,7 @@ } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { /* @@ -3036,6 +3212,7 @@ * Returns an {@link ITx#READ_COMMITTED} view iff the file system exists * and <code>null</code> otherwise. */ + @Override public BigdataFileSystem getGlobalFileSystem() { /* @@ -3085,6 +3262,7 @@ * and will break semantics when the task is isolated by a transaction * rather than unisolated. */ + @Override public TemporaryStore getTempStore() { return tempStoreFactory.getTempStore(); @@ -3092,24 +3270,28 @@ } private TemporaryStoreFactory tempStoreFactory = new TemporaryStoreFactory(); - public DefaultResourceLocator getResourceLocator() { + @Override + public DefaultResourceLocator<?> getResourceLocator() { return resourceLocator; } + @Override public ILocalTransactionManager getLocalTransactionManager() { return delegate.getLocalTransactionManager(); } + @Override public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); @@ -3120,34 +3302,42 @@ * Disallowed methods (commit and shutdown protocols). */ + @Override public void abort() { throw new UnsupportedOperationException(); } + @Override public void close() { throw new UnsupportedOperationException(); } + @Override public void destroy() { throw new UnsupportedOperationException(); } + @Override public long commit() { throw new UnsupportedOperationException(); } + @Override public void deleteResources() { throw new UnsupportedOperationException(); } + @Override public void setCommitter(int index, ICommitter committer) { throw new UnsupportedOperationException(); } + @Override public void shutdown() { throw new UnsupportedOperationException(); } + @Override public void shutdownNow() { throw new UnsupportedOperationException(); } @@ -3156,10 +3346,12 @@ * Disallowed methods (methods that write on the store). */ + @Override public void force(boolean metadata) { throw new UnsupportedOperationException(); } + @Override public long write(ByteBuffer data) { throw new UnsupportedOperationException(); } @@ -3169,6 +3361,7 @@ // throw new UnsupportedOperationException(); // } + @Override public void delete(long addr) { throw new UnsupportedOperationException(); } @@ -3177,107 +3370,133 @@ * Methods that delegate directly to the backing journal. */ + @Override public int getByteCount(long addr) { return delegate.getByteCount(addr); } + @Override public ICommitRecord getCommitRecord(long timestamp) { return delegate.getCommitRecord(timestamp); } + @Override public CounterSet getCounters() { return delegate.getCounters(); } + @Override public File getFile() { return delegate.getFile(); } + @Override public long getOffset(long addr) { return delegate.getOffset(addr); } + @Override public long getPhysicalAddress(final long addr) { return delegate.getPhysicalAddress(addr); } + @Override public Properties getProperties() { return delegate.getProperties(); } + @Override public UUID getUUID() { return delegate.getUUID(); } + @Override public IResourceMetadata getResourceMetadata() { return delegate.getResourceMetadata(); } + @Override public long getRootAddr(int index) { return delegate.getRootAddr(index); } + @Override public long getLastCommitTime() { return delegate.getLastCommitTime(); } + @Override public IRootBlockView getRootBlockView() { return delegate.getRootBlockView(); } + @Override public boolean isFullyBuffered() { return delegate.isFullyBuffered(); } + @Override public boolean isOpen() { return delegate.isOpen(); } + @Override public boolean isReadOnly() { return delegate.isReadOnly(); } + @Override public boolean isStable() { return delegate.isStable(); } + @Override public ByteBuffer read(long addr) { return delegate.read(addr); } + @Override public long size() { return delegate.size(); } + @Override public long toAddr(int nbytes, long offset) { return delegate.toAddr(nbytes, offset); } + @Override public String toString(long addr) { return delegate.toString(addr); } + @Override public IRootBlockView getRootBlock(long commitTime) { return delegate.getRootBlock(commitTime); } + @Override public Iterator<IRootBlockView> getRootBlocks(long startTime) { return delegate.getRootBlocks(startTime); } + @Override public ScheduledFuture<?> addScheduledTask(Runnable task, long initialDelay, long delay, TimeUnit unit) { return delegate.addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return delegate.getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return delegate.getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return delegate.getHttpdPort(); } @@ -3307,71 +3526,87 @@ private IIndexManager delegate; - public DelegateIndexManager(IIndexManager delegate) { + public DelegateIndexManager(final IIndexManager delegate) { this.delegate = delegate; } + @Override public void dropIndex(String name) { delegate.dropIndex(name); } + @Override public ExecutorService getExecutorService() { return delegate.getExecutorService(); } + @Override public BigdataFileSystem getGlobalFileSystem() { return delegate.getGlobalFileSystem(); } + @Override public SparseRowStore getGlobalRowStore() { return delegate.getGlobalRowStore(); } + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { return delegate.getGlobalRowStore(timestamp); } + @Override public IIndex getIndex(String name, long timestamp) { return delegate.getIndex(name, timestamp); } + @Override public long getLastCommitTime() { return delegate.getLastCommitTime(); } - public IResourceLocator getResourceLocator() { + @Override + public IResourceLocator<?> getResourceLocator() { return delegate.getResourceLocator(); } + @Override public IResourceLockService getResourceLockService() { return delegate.getResourceLockService(); } + @Override public void registerIndex(IndexMetadata indexMetadata) { delegate.registerIndex(indexMetadata); } + @Override public void destroy() { delegate.destroy(); } + @Override public TemporaryStore getTempStore() { return delegate.getTempStore(); } + @Override public ScheduledFuture<?> addScheduledTask(Runnable task, long initialDelay, long delay, TimeUnit unit) { return delegate.addScheduledTask(task, initialDelay, delay, unit); } + @Override public boolean getCollectPlatformStatistics() { return delegate.getCollectPlatformStatistics(); } + @Override public boolean getCollectQueueStatistics() { return delegate.getCollectQueueStatistics(); } + @Override public int getHttpdPort() { return delegate.getHttpdPort(); } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IBTreeManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IBTreeManager.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IBTreeManager.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -29,17 +29,21 @@ package com.bigdata.journal; import com.bigdata.btree.BTree; +import com.bigdata.btree.Checkpoint; import com.bigdata.btree.ICheckpointProtocol; import com.bigdata.btree.IIndex; +import com.bigdata.btree.ILocalBTreeView; import com.bigdata.btree.IndexMetadata; import com.bigdata.btree.view.FusedView; import com.bigdata.htree.HTree; +import com.bigdata.rawstore.IRawStore; import com.bigdata.service.IDataService; import com.bigdata.service.IMetadataService; import com.bigdata.service.ndx.IClientIndex; /** - * Extended to allow direct registration of a named {@link BTree}. + * Interface for management of local index resources such as {@link BTree}, + * {@link HTree}, etc. * * @todo change registerIndex() methods to return void and have people use * {@link #getIndex(String)} to obtain the view after they have registered @@ -50,6 +54,9 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST + * </a> */ public interface IBTreeManager extends IIndexManager { @@ -99,7 +106,7 @@ * * @return The object that would be returned by {@link #getIndex(String)}. * - * @see #register(String, IndexMetadata) + * @see #registerIndex(String, IndexMetadata) * * @exception IndexExistsException * if there is an index already registered under that name. @@ -125,6 +132,23 @@ public IIndex registerIndex(String name, IndexMetadata indexMetadata); /** + * Variant method creates and registered a named persistence capable data + * structure but does not assume that the data structure will be a + * {@link BTree}. + * + * @param store + * The backing store. + * @param metadata + * The metadata that describes the data structure to be created. + * + * @return The persistence capable data structure. + * + * @see Checkpoint#create(IRawStore, IndexMetadata) + */ + public ICheckpointProtocol register(final String name, + final IndexMetadata metadata); + + /** * Return the unisolated view of the named index (the mutable view of the * live index object). * @@ -139,4 +163,54 @@ */ public IIndex getIndex(String name); + /** + * Return the mutable view of the named persistence capable data structure + * (aka the "live" or {@link ITx#UNISOLATED} view). + * <p> + * Note: {@link #getIndex(String)} delegates to this method and then casts + * the result to an {@link IIndex}. This is the core implementation to + * access an existing named index. + * + * @return The mutable view of the persistence capable data structure. + * + * @see #getIndex(String) + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > + * GIST </a> + */ + public ICheckpointProtocol getUnisolatedIndex(final String name); + + /** + * Core implementation for access to historical index views. + * <p> + * Note: Transactions should pass in the timestamp against which they are + * reading rather than the transaction identifier (aka startTime). By + * providing the timestamp of the commit point, the transaction will hit the + * {@link #indexCache}. If the transaction passes the startTime instead, + * then all startTimes will be different and the cache will be defeated. + * + * @throws UnsupportedOperationException + * If you pass in {@link ITx#UNISOLATED}, + * {@link ITx#READ_COMMITTED}, or a timestamp that corresponds + * to a read-write transaction since those are not "commit + * times". + * + * @see IIndexStore#getIndex(String, long) + * + * @see <a href="http://sourceforge.net/apps/trac/bigdata/ticket/546" > Add + * cache for access to historical index views on the Journal by name + * and commitTime. </a> + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > + * GIST </a> + * + * FIXME GIST : Reconcile with + * {@link IResourceManager#getIndex(String, long)}. They are returning + * types that do not overlap ({@link ICheckpointProtocol} and + * {@link ILocalBTreeView}). This is blocking the support of GIST in + * {@link AbstractTask}. + */ + public ICheckpointProtocol getIndexLocal(final String name, + final long commitTime); + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IResourceManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IResourceManager.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/IResourceManager.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -41,6 +41,7 @@ import com.bigdata.btree.IndexSegmentStore; import com.bigdata.btree.view.FusedView; import com.bigdata.counters.CounterSet; +import com.bigdata.htree.HTree; import com.bigdata.rawstore.IRawStore; import com.bigdata.resources.ResourceManager; import com.bigdata.resources.StaleLocatorException; @@ -163,9 +164,9 @@ * @param timestamp * A transaction identifier, {@link ITx#UNISOLATED} for the * unisolated index view, {@link ITx#READ_COMMITTED}, or - * <code>timestamp</code> for a historical view no later than - * the specified timestamp. - * + * <code>timestamp</code> for a historical view no later than the + * specified timestamp. + * * @return The index or <code>null</code> iff there is no index registered * with that name for that <i>timestamp</i>, including if the * timestamp is a transaction identifier and the transaction is @@ -181,6 +182,14 @@ * been split, joined or moved. * * @see IIndexStore#getIndex(String, long) + * + * FIXME GIST - this only supports {@link ILocalBTreeView}. We need to + * also support {@link HTree}, etc. See + * {@link IBTreeManager#getIndexLocal(String, long)} which is the + * corresponding method for local stores. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > + * GIST </a> */ public ILocalBTreeView getIndex(String name, long timestamp); @@ -286,7 +295,7 @@ * if the {@link IResourceManager} is not part of an * {@link IBigdataFederation}. */ - public IBigdataFederation getFederation(); + public IBigdataFederation<?> getFederation(); // /** // * Return the ordered {@link UUID}[] of the physical {@link IDataService} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/JournalDelegate.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/JournalDelegate.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -36,11 +36,11 @@ import com.bigdata.bfs.BigdataFileSystem; import com.bigdata.btree.BTree; +import com.bigdata.btree.ICheckpointProtocol; import com.bigdata.btree.IIndex; import com.bigdata.btree.IndexMetadata; import com.bigdata.counters.CounterSet; import com.bigdata.mdi.IResourceMetadata; -import com.bigdata.rawstore.IAllocationContext; import com.bigdata.rawstore.IPSOutputStream; import com.bigdata.relation.locator.IResourceLocator; import com.bigdata.sparse.SparseRowStore; @@ -287,4 +287,20 @@ public boolean isDirty() { return delegate.isDirty(); } + + @Override + public ICheckpointProtocol register(String name, IndexMetadata metadata) { + return delegate.register(name, metadata); + } + + @Override + public ICheckpointProtocol getIndexLocal(String name, long commitTime) { + return delegate.getIndexLocal(name, commitTime); + } + + @Override + public ICheckpointProtocol getUnisolatedIndex(String name) { + return delegate.getUnisolatedIndex(name); + } + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/TemporaryStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2013-05-30 15:40:39 UTC (rev 7171) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/journal/TemporaryStore.java 2013-05-30 16:00:14 UTC (rev 7172) @@ -45,7 +45,6 @@ import com.bigdata.btree.Checkpoint; import com.bigdata.btree.ICheckpointProtocol; import com.bigdata.btree.IndexMetadata; -import com.bigdata.htree.HTree; import com.bigdata.journal.Name2Addr.Entry; import com.bigdata.rawstore.IRawStore; import com.bigdata.rawstore.WormAddressManager; @@ -67,8 +66,7 @@ * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ * - * FIXME GIST This should support generalized indices (HTree, Stream, etc) not just - * BTree. + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/585" > GIST </a> */ //* {@link #checkpoint()} may be used to checkpoint the indices and //* {@link #restoreLastCheckpoint()} may be used to revert to the last @@ -101,6 +99,16 @@ .parseLong(Options.DEFAULT_LIVE_INDEX_CACHE_TIMEOUT); /** + * BTree mapping index names to the last metadata record committed for the + * named index. The keys are index names (unicode strings). The values are + * the last known address of the named btree. + * <p> + * Note: This is a mutable {@link BTree} so it is NOT thread-safe. We always + * synchronize on this object before accessing it. + */ + private final Name2Addr name2Addr; + + /** * A {@link TemporaryStore} that can scale-up. The backing file will be * created using the Java temporary file mechanism. * @@ -139,12 +147,15 @@ * @param file * The backing file (may exist, but must be empty if it exists). */ + @SuppressWarnings({ "unchecked", "rawtypes" }) public TemporaryStore(final int offsetBits, final File file) { super(0L/* maximumExtent */, offsetBits, file); - setupName2AddrBTree(); + name2Addr = Name2Addr.create(this); + name2Addr.setupCache(liveIndexCacheCapacity, liveIndexCacheTimeout); + executorService = Executors.newCachedThreadPool(new DaemonThreadFactory (getClass().getName()+".executorService")); @@ -155,28 +166,18 @@ } - /** - * BTree mapping index names to the last metadata record committed for the - * named index. The keys are index names (unicode strings). The values are - * the last known address of the named btree. - * <p> - * Note: This is a mutable {@link BTree} so it is NOT thread-safe. We always - * synchronize on this object before accessing it. - */ - private Name2Addr name2Addr; - - /** - * Setup the btree that resolved named btrees. - */ - private void setupName2AddrBTree() { - - assert name2Addr == null; - - name2Addr = Name2Addr.create(this); - - name2Addr.setupCache(liveIndexCacheCapacity, liveIndexCacheTimeout); - - } +// /** +// * Setup the btree that resolved named btrees. +// */ +// private void setupName2AddrBTree() { +// +// assert name2Addr == null; +// +// name2Addr = Name2Addr.create(this); +// +// name2Addr.setupCache(liveIndexCacheCapacity, liveIndexCacheTimeout); +// +// } // /** // * The address of the last checkpoint written. When ZERO(0L) no checkpoint @@ -252,12 +253,14 @@ // // } + @Override public void registerIndex(final IndexMetadata metadata) { registerIndex(metadata.getName(), metadata); } + @Override public BTree registerIndex(final String name, final IndexMetadata metadata) { return (BTree) register(name, metadata); @@ -278,6 +281,7 @@ * * @see Checkpoint#create(IRawStore, IndexMetadata) */ + @Override public ICheckpointProtocol register(final String name, final IndexMetadata metadata) { @@ -289,6 +293,7 @@ } + @Override final public BTree registerIndex(final String name, final BTree btree) { registerIndex(name, btree); @@ -305,7 +310,7 @@ * @param ndx * The data structure. */ - final public void register(final String name, final ICheckpointProtocol ndx) { + private final void register(final String name, final ICheckpointProtocol ndx) { synchronized (name2Addr) { @@ -318,6 +323,7 @@ } + @Override public void dropIndex(final String name) { synchronized(name2Addr) { @@ -331,6 +337,7 @@ } + @Override public Iterator<String> indexNameScan(final String prefix, final long timestampIsIgnored) { @@ -353,58 +360,68 @@ } - /** - * Return an {@link ITx#UNISOLATED} view of the named index -or- - * <code>null</code> if there is no registered index by that name. - */ - public BTree getIndex(final String name) { + @Override + public ICheckpointProtocol getUnisolatedIndex(final String name) { synchronized(name2Addr) { assertOpen(); - return (BTree) name2Addr.getIndex(name); + return name2Addr.getIndex(name); } - + } + +// /** +// * Return an {@link ITx#UNISOLATED} view of the named index -or- +// * <code>null</code> if there is no registered index by that name. +// */ + @Override + public BTree getIndex(final String name) { - /** - * Return an {@link ITx#UNISOLATED} view of the named index -or- - * <code>null</code> if there is no registered index by that name. - */ - public HTree getHTree(final String name) { + return (BTree) getUnisolatedIndex(name); - synchronized(name2Addr) { + } - assertOpen(); +// /** +// * Return an {@link ITx#UNISOLATED} view of the named index -or- +// * <code>null</code> if there is no registered index by that name. +// */ +// public HTree getHTree(final String name) { +// +// return (HTree) getUnisolatedIndex(name); +// +// } - return (HTree) name2Addr.getIndex(name); - - } + @Override + public BTree getIndex(final String name, final long timestamp) { + + return (BTree) getIndexLocal(name, timestamp); } /** - * Historical reads and transa... [truncated message content] |
From: <tho...@us...> - 2013-05-30 15:40:46
|
Revision: 7171 http://bigdata.svn.sourceforge.net/bigdata/?rev=7171&view=rev Author: thompsonbry Date: 2013-05-30 15:40:39 +0000 (Thu, 30 May 2013) Log Message: ----------- Removing some methods, and commenting other methods for removal, once we merge in the delta from the development branch. These methods have to do with the GIST API. The changes in the development branch were made in furtherment of the GIST API [1] and the SPARQL UPDATE for NAMED SOLUTION SETs[2]. [1] https://sourceforge.net/apps/trac/bigdata/ticket/585 (GIST) [2] https://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for NAMED SOLUTION SETS) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-29 14:47:12 UTC (rev 7170) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-30 15:40:39 UTC (rev 7171) @@ -165,7 +165,6 @@ import com.bigdata.service.AbstractHATransactionService; import com.bigdata.service.AbstractTransactionService; import com.bigdata.service.IBigdataFederation; -import com.bigdata.stream.Stream; import com.bigdata.util.ChecksumUtility; import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.NT; @@ -4498,6 +4497,7 @@ * cache for access to historical index views on the Journal by name * and commitTime. </a> */ +// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. final public ICheckpointProtocol getIndexLocal(final String name, final long commitTime) { @@ -4975,6 +4975,7 @@ * * @see Checkpoint#create(IRawStore, IndexMetadata) */ +// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. public ICheckpointProtocol register(final String name, final IndexMetadata metadata) { @@ -5186,30 +5187,34 @@ * * @return The mutable view of the index. * - * @see #getLiveView(String, long) + * @see #getUnisolatedIndex(String) + * + * @deprecated Use {@link #getUnisolatedIndex(String)} */ +// TODO Remove method once change in IBTreeManager merged into READ_CACHE branch. + @Deprecated final public HTree getHTree(final String name) { return (HTree) getUnisolatedIndex(name); } - /** - * Return the mutable view of the named index (aka the "live" or - * {@link ITx#UNISOLATED} index). This object is NOT thread-safe. You MUST - * NOT write on this index unless you KNOW that you are the only writer. See - * {@link ConcurrencyManager}, which handles exclusive locks for - * {@link ITx#UNISOLATED} indices. - * - * @return The mutable view of the index. - * - * @see #getLiveView(String, long) - */ - final public Stream getStream(final String name) { - - return (Stream) getUnisolatedIndex(name); - - } +// /** +// * Return the mutable view of the named index (aka the "live" or +// * {@link ITx#UNISOLATED} index). This object is NOT thread-safe. You MUST +// * NOT write on this index unless you KNOW that you are the only writer. See +// * {@link ConcurrencyManager}, which handles exclusive locks for +// * {@link ITx#UNISOLATED} indices. +// * +// * @return The mutable view of the index. +// * +// * @see #getLiveView(String, long) +// */ +// final public Stream getStream(final String name) { +// +// return (Stream) getUnisolatedIndex(name); +// +// } /** * Return the mutable view of the named persistence capable data structure @@ -5217,6 +5222,7 @@ * * @return The mutable view of the persistence capable data structure. */ +// @Override TODO Add @Override once change in IBTreeManager merged into READ_CACHE branch. final public ICheckpointProtocol getUnisolatedIndex(final String name) { final ReadLock lock = _fieldReadWriteLock.readLock(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-29 14:47:20
|
Revision: 7170 http://bigdata.svn.sourceforge.net/bigdata/?rev=7170&view=rev Author: thompsonbry Date: 2013-05-29 14:47:12 +0000 (Wed, 29 May 2013) Log Message: ----------- We have identified a concurrency hole in the IRWStrategy.commit() / IRWStrategy.postCommit() logic. The Journal is MRMW (multiple readers, multiple writers). However, the RWStore allocation write lock needs to be held across those two method calls. The same problem exists for the MemStore. A new getCommitLock() method was added to the IRWStrategy and IStore interfaces. The postCommit() methods now assert that the caller is holding the appropriate lock. This forces the caller to ensure that they have acquired the commitLock before invoking IRWStrategy.commit(). Removed WCS.isFlush() since the assumptions implied for the method were not valid when it was invoked and added some asserts for the same criteria into WCS.flush(). Took out code path in HAJournalServer.handleReplicatedWrite() where it was rethrowing the root cause after entering the error state. In fact, we do not want to propagate the root cause back to the leader since that can cause an uncurable error where an update might otherwise complete with a majority of the services if one service enters an error state. WCS compaction is now enabled by default. HALog compression is now enabled by default. See https://sourceforge.net/apps/trac/bigdata/ticket/674 (WCS write cache compaction causes errors in RWS postHACommit()) See https://sourceforge.net/apps/trac/bigdata/ticket/557 (StressTestConcurrentTx may ignore real errors) See https://sourceforge.net/apps/trac/bigdata/ticket/652 (Compress write cache blocks for replication and in HALogs) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IRWStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IStore.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/AllocationContext.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/sector/TestMemStore.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -550,7 +550,7 @@ * WCS write cache compaction causes errors in RWS postHACommit() * </a> */ - this.compactionEnabled = false; //canCompact() && compactionThreshold < 100; + this.compactionEnabled = canCompact() && compactionThreshold < 100; if (log.isInfoEnabled()) log.info("Compaction Enabled: " + compactionEnabled @@ -2196,10 +2196,14 @@ try { if(!halt) { /* - * Can not check assertion if there is an existing + * Check assertions for clean WCS after flush(). + * + * Note: Can not check assertion if there is an existing * exception. */ + assert dirtyList.size() == 0; assert compactingCacheRef.get() == null; + assert current.get() == null; } } finally { dirtyListLock.unlock(); @@ -3841,22 +3845,22 @@ } } - /** - * Debug method to verify that the {@link WriteCacheService} has flushed all - * {@link WriteCache} buffers. - * - * @return whether there are no outstanding writes buffered - */ - public boolean isFlushed() { - - final boolean clear = - dirtyList.size() == 0 - && compactingCacheRef.get() == null - && (current.get() == null || current.get().isEmpty()); - - return clear; - - } +// /** +// * Debug method to verify that the {@link WriteCacheService} has flushed all +// * {@link WriteCache} buffers. +// * +// * @return whether there are no outstanding writes buffered +// */ +// public boolean isFlushed() { +// +// final boolean clear = +// dirtyList.size() == 0 +// && compactingCacheRef.get() == null +// && (current.get() == null || current.get().isEmpty()); +// +// return clear; +// +// } /** * An array of writeCache actions is maintained that can be used Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -114,7 +114,6 @@ import com.bigdata.ha.msg.IHADigestRequest; import com.bigdata.ha.msg.IHADigestResponse; import com.bigdata.ha.msg.IHAGatherReleaseTimeRequest; -import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; import com.bigdata.ha.msg.IHALogDigestRequest; import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHALogRequest; @@ -3192,7 +3191,9 @@ * retained. */ if (_bufferStrategy instanceof IHistoryManager) { + ((IHistoryManager) _bufferStrategy).checkDeferredFrees(this); + } /* @@ -3244,6 +3245,22 @@ } /* + * Conditionally obtain a lock that will protect the + * commit()/postCommit() protocol. + */ + final long nextOffset; + final Lock commitLock; + if (_bufferStrategy instanceof IRWStrategy) { + commitLock = ((IRWStrategy) _bufferStrategy).getCommitLock(); + } else { + commitLock = null; + } + if (commitLock != null) { + // Take the commit lock. + commitLock.lock(); + } + try { + /* * Call commit on buffer strategy prior to retrieving root block, * required for RWStore since the metaBits allocations are not made * until commit, leading to invalid addresses for recent store @@ -3264,15 +3281,14 @@ * does not create much latency because the WriteCacheService drains * the dirtyList in a seperate thread. */ - _bufferStrategy.commit(); + _bufferStrategy.commit(); + + /* + * The next offset at which user data would be written. + * Calculated, after commit! + */ + nextOffset = _bufferStrategy.getNextOffset(); - /* - * next offset at which user data would be written. - * Calculated, after commit! - */ - - final long nextOffset = _bufferStrategy.getNextOffset(); - final long blockSequence; if (_bufferStrategy instanceof IHABufferStrategy) { @@ -3281,20 +3297,6 @@ blockSequence = ((IHABufferStrategy) _bufferStrategy) .getBlockSequence(); - if (!((IHABufferStrategy) _bufferStrategy) - .getWriteCacheService().isFlushed()) { - - /** - * @see <a - * href="https://sourceforge.net/apps/trac/bigdata/ticket/674" - * > WCS write cache compaction causes errors in RWS - * postHACommit() </a> - */ - - throw new AssertionError(); - - } - } else { blockSequence = old.getBlockSequence(); @@ -3381,11 +3383,16 @@ // write the root block on to the backing store. _bufferStrategy.writeRootBlock(newRootBlock, forceOnCommit); - // Now the root blocks are down we can commit any - // transient state - if (_bufferStrategy instanceof IRWStrategy) { - ((IRWStrategy) _bufferStrategy).postCommit(); - } + if (_bufferStrategy instanceof IRWStrategy) { + + /* + * Now the root blocks are down we can commit any transient + * state. + */ + + ((IRWStrategy) _bufferStrategy).postCommit(); + + } // set the new root block. _rootBlock = newRootBlock; @@ -3482,6 +3489,15 @@ throw new RuntimeException(e); } + } // else HA mode + + } finally { + if(commitLock != null) { + /* + * Release the [commitLock] iff one was taken above. + */ + commitLock.unlock(); + } } final long elapsedNanos = System.nanoTime() - beginNanos; @@ -5901,6 +5917,38 @@ try { + /* + * Note: flush() is done by prepare2Phase(). The only conditions + * under which it is not done already is (a) HARestore (when + * localService is null) and (b) during RESTORE or RESYNC for the + * HAJournalServer (when haStatus will be NotReady). + */ + final boolean shouldFlush = localService == null + || (haStatus == null || haStatus == HAStatusEnum.NotReady); + + /* + * Force application data to stable storage _before_ we update the + * root blocks. This option guarantees that the application data is + * stable on the disk before the atomic commit. Some operating + * systems and/or file systems may otherwise choose an ordered write + * with the consequence that the root blocks are laid down on the + * disk before the application data and a hard failure could result + * in the loss of application data addressed by the new root blocks + * (data loss on restart). + * + * Note: We do not force the file metadata to disk. If that is done, + * it will be done by a force() after we write the root block on the + * disk. + * + * Note: [shouldFlush] is probably sufficient. This test uses + * [shouldFlush||true] to err on the side of safety. + */ + if ((shouldFlush || true) && doubleSync) { + + _bufferStrategy.force(false/* metadata */); + + } + // The timestamp for this commit point. final long commitTime = rootBlock.getLastCommitTime(); @@ -5914,11 +5962,18 @@ .isLeader(rootBlock.getQuorumToken()); if (leader) { - // Now the root blocks are down we can commit any - // transient state - if (_bufferStrategy instanceof IRWStrategy) { - ((IRWStrategy) _bufferStrategy).postCommit(); - } + + if (_bufferStrategy instanceof IRWStrategy) { + + /* + * Now the root blocks are down we can commit any transient + * state. + */ + + ((IRWStrategy) _bufferStrategy).postCommit(); + + } + } else { /* Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -346,7 +346,7 @@ * Compress write cache blocks for replication and in HALogs </a> */ String HALOG_COMPRESSOR = "HALogCompressor"; - String DEFAULT_HALOG_COMPRESSOR = null;//FIXME Change default: CompressorRegistry.DEFLATE_BEST_SPEED; + String DEFAULT_HALOG_COMPRESSOR = CompressorRegistry.DEFLATE_BEST_SPEED; /** * The initial extent of the journal (bytes). When the journal is backed by Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -34,6 +34,7 @@ import java.security.MessageDigest; import java.util.UUID; import java.util.concurrent.Future; +import java.util.concurrent.locks.Lock; import org.apache.log4j.Logger; @@ -886,6 +887,11 @@ } @Override + public Lock getCommitLock() { + return m_store.getCommitLock(); + } + + @Override public void postCommit() { m_store.postCommit(); } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IRWStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IRWStrategy.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IRWStrategy.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -1,5 +1,7 @@ package com.bigdata.rwstore; +import java.util.concurrent.locks.Lock; + import com.bigdata.journal.IBufferStrategy; import com.bigdata.journal.RWStrategy; import com.bigdata.rawstore.IAllocationManagerStore; @@ -31,10 +33,12 @@ */ public boolean isCommitted(long addr); -// /** -// * Resets allocators from current rootblock -// */ -// void resetFromHARootBlock(IRootBlockView rootBlock); + /** + * Optionally return a {@link Lock} that must be used (when non- + * <code>null</code>) to make the {@link IBufferStrategy#commit()} / + * {@link #postCommit()} strategy atomic. + */ + public Lock getCommitLock(); /** * Called post commit to dispose any transient commit state retained to Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IStore.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IStore.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/IStore.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -25,6 +25,7 @@ package com.bigdata.rwstore; import java.io.File; +import java.util.concurrent.locks.Lock; import com.bigdata.rawstore.IAllocationContext; import com.bigdata.rawstore.IStreamStore; @@ -58,6 +59,13 @@ public void free(long addr, int size); /** + * Optionally return a {@link Lock} that must be used (when non- + * <code>null</code>) to make the {@link #commit()} / {@link #postCommit()} + * strategy atomic. + */ + public Lock getCommitLock(); + + /** * Global commit on the backing store. Previously committed data which has * been marked as {@link #free(long, int)} is now available for recycling. * However, recycling can not occur if session protection is active. Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -3137,19 +3137,32 @@ } /** + * {@inheritDoc} + */ + public Lock getCommitLock() { + + return m_allocationWriteLock; + + } + + /** + * {@inheritDoc} + * <p> * Commits the FixedAllocator bits */ public void postCommit() { - m_allocationWriteLock.lock(); - try { - for (FixedAllocator fa : m_commitList) { - fa.postCommit(); - } + + if (!m_allocationWriteLock.isHeldByCurrentThread()) + throw new IllegalMonitorStateException(); + + for (FixedAllocator fa : m_commitList) { + + fa.postCommit(); - m_commitList.clear(); - } finally { - m_allocationWriteLock.unlock(); } + + m_commitList.clear(); + } public int checkDeferredFrees(final AbstractJournal journal) { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/AllocationContext.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/AllocationContext.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/AllocationContext.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -29,6 +29,7 @@ import java.nio.ByteBuffer; import java.util.LinkedHashSet; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.log4j.Logger; @@ -367,6 +368,11 @@ m_root.commit(); } + @Override + public Lock getCommitLock() { + return m_root.getCommitLock(); + } + @Override public void postCommit() { m_root.postCommit(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -29,6 +29,7 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.util.UUID; +import java.util.concurrent.locks.Lock; import com.bigdata.cache.ConcurrentWeakValueCache; import com.bigdata.counters.CounterSet; @@ -159,6 +160,13 @@ } @Override + public Lock getCommitLock() { + + return m_mmgr.getCommitLock(); + + } + + @Override public void postCommit() { m_mmgr.postCommit(); m_dirty = false; Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemoryManager.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.log4j.Logger; @@ -104,6 +105,9 @@ /** * The lock used to serialize all allocation/deallocation requests. This is * shared across all allocation contexts to avoid lock ordering problems. + * + * FIXME This should be a read/write lock as per RWStore. That will provide + * better concurrency. */ final /*private*/ ReentrantLock m_allocationLock = new ReentrantLock(); @@ -1196,16 +1200,22 @@ } @Override + public Lock getCommitLock() { + return m_allocationLock; + } + + @Override public void postCommit() { - m_allocationLock.lock(); - try { - final Iterator<SectorAllocator> sectors = m_sectors.iterator(); - while (sectors.hasNext()) { - sectors.next().commit(); - } - } finally { - m_allocationLock.unlock(); - } + if(!m_allocationLock.isHeldByCurrentThread()) + throw new IllegalMonitorStateException(); +// try { + final Iterator<SectorAllocator> sectors = m_sectors.iterator(); + while (sectors.hasNext()) { + sectors.next().commit(); + } +// } finally { +// m_allocationLock.unlock(); +// } } private ConcurrentWeakValueCache<Long, ICommitter> m_externalCache = null; Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/sector/TestMemStore.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/sector/TestMemStore.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/sector/TestMemStore.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -111,7 +111,7 @@ * Use a proxy test suite and specify the delegate. */ - ProxyTestSuite suite = new ProxyTestSuite(delegate, + final ProxyTestSuite suite = new ProxyTestSuite(delegate, "MemStore Test Suite"); /* Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-29 12:22:08 UTC (rev 7169) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-29 14:47:12 UTC (rev 7170) @@ -1144,6 +1144,16 @@ } // RunStateCallable + /** + * Transition to {@link RunStateEnum#Error}. + * <p> + * Note: if the current {@link Thread} is a {@link Thread} executing one + * of the {@link RunStateCallable#doRun()} methods, then it will be + * <strong>interrupted</strong> when entering the new run state. Thus, + * the caller MAY observe an {@link InterruptedException} in their + * thread, but only if they are being run out of + * {@link RunStateCallable}. + */ void enterErrorState() { /* @@ -1178,7 +1188,11 @@ /* * Transition into the error state. + * + * Note: This can cause the current Thread to be interrupted if it + * is the Thread executing one of the RunStateCallable classes. */ + enterRunState(new ErrorTask()); } @@ -3011,11 +3025,34 @@ try { enterErrorState(); } catch (RuntimeException e) { - // log and ignore. - log.error(e, e); + if (InnerCause.isInnerCause(e, + InterruptedException.class)) { + /* + * Propagate the interrupt. + * + * Note: This probably does not occur in this + * context since we are not running in the + * Thread for any doRun() method. + */ + Thread.interrupted(); + } else { + // log and ignore. + log.error(e, e); + } } - // rethrow exception. - throw new RuntimeException(t); + /* + * Note: DO NOT rethrow the exception. This service will + * leave the met quorum. If we rethrow the exception, + * the the update operation that that generated the live + * replicated write will be failed with the rethrown + * exception as the root cause. However, we want the + * update operation to complete successfully as long as + * we can retain an met quorum (and the same leader) for + * the duration of the update operation. + */ +// // rethrow exception. +// throw new RuntimeException(t); + return; } // /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-29 12:22:19
|
Revision: 7169 http://bigdata.svn.sourceforge.net/bigdata/?rev=7169&view=rev Author: thompsonbry Date: 2013-05-29 12:22:08 +0000 (Wed, 29 May 2013) Log Message: ----------- Refactored Stream to support IRawStore which now extends IStreamStore (previously, only IRWStrategy was supported). Added coverage to the named solution set update test suite for WORMStrategy and RWStore in addition to the MemStrategy. This provides confirmation that we support all three backends (again, now that IRawStore implements IStreamStore). @see https://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for NAMED SOLUTION SETS) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rawstore/IStreamStore.java branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/update/TestAll.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskRW.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskWORM.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rawstore/IStreamStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rawstore/IStreamStore.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/rawstore/IStreamStore.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -25,10 +25,12 @@ import java.io.InputStream; - /** * Interface for reading and writing streams on a persistence store. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > Support + * PSOutputStream/InputStream at IRawStore </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ public interface IStreamStore { Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/stream/Stream.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -63,7 +63,7 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.rawstore.IPSOutputStream; import com.bigdata.rawstore.IRawStore; -import com.bigdata.rwstore.IRWStrategy; +import com.bigdata.rawstore.IStreamStore; import com.bigdata.service.IBigdataFederation; import com.bigdata.striterator.ICloseableIterator; @@ -111,7 +111,7 @@ /** * The backing store. */ - private final IRWStrategy store; + private final IRawStore store; /** * <code>true</code> iff the view is read-only. @@ -137,15 +137,15 @@ protected long rootAddr; /** - * FIXME There is a reliance on the {@link IRWStrategy} right now because - * the {@link IPSOutputStream} API has not yet been lifted onto the - * {@link IRawStore} or a similar API. + * {@inheritDoc} + * <p> + * Note: There is a reliance on the {@link IStreamStore} API. * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > * Support PSOutputStream/InputStream at IRawStore </a> */ @Override - public IRWStrategy getStore() { + public IRawStore getStore() { return store; @@ -198,7 +198,7 @@ // save a reference to the immutable metadata record. this.metadata = (StreamIndexMetadata) metadata; - this.store = (IRWStrategy) ((store instanceof AbstractJournal) ? ((AbstractJournal) store) + this.store = (IRawStore) ((store instanceof AbstractJournal) ? ((AbstractJournal) store) .getBufferStrategy() : store); this.readOnly = readOnly; Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/CacheConnectionImpl.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -53,10 +53,10 @@ import com.bigdata.journal.TemporaryStore; import com.bigdata.journal.TimestampUtility; import com.bigdata.rawstore.Bytes; +import com.bigdata.rawstore.IStreamStore; import com.bigdata.rdf.sparql.ast.QueryHints; import com.bigdata.relation.locator.DefaultResourceLocator; import com.bigdata.resources.IndexManager; -import com.bigdata.rwstore.IRWStrategy; import com.bigdata.rwstore.RWStore; import com.bigdata.service.IDataService; import com.bigdata.sparse.SparseRowStore; @@ -211,18 +211,18 @@ } - /* - * TODO Hack enables the SOLUTIONS cache. + /** + * Conditionally enable the SOLUTIONS cache. * * Note: The SolutionSetStream has a dependency on the IPSOutputStream * so the solutions cache can not be enabled when that interface is not * available. * * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > - * Support PSOutputStream/InputStream at IRawStore </a> + * Support PSOutputStream/InputStream at IRawStore </a> */ this.enableSolutionsCache = QueryHints.DEFAULT_SOLUTION_SET_CACHE - && cacheStore.getBufferStrategy() instanceof IRWStrategy; + && cacheStore.getBufferStrategy() instanceof IStreamStore; /* * TODO Hack enables the DESCRIBE cache. Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/cache/ISolutionSetCache.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -35,7 +35,15 @@ /** * A SPARQL solution set cache or a connection to a remote SPARQL cache or cache * fabric. + * <p> + * Note: This is an internal interface that may evolve substantially. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> SPARQL + * UPDATE Extensions (Trac) </a> + * @see <a + * href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update"> + * SPARQL Update Extensions (Wiki) </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -327,7 +327,6 @@ } - /** * A unit test for an INCLUDE with another JOIN. For this test, the INCLUDE * will run first: Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/update/TestAll.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/update/TestAll.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/update/TestAll.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -29,9 +29,10 @@ import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTest; import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTest2; +import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTest2DiskRW; +import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTest2DiskWORM; import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTxTest; import com.bigdata.rdf.sail.tck.BigdataSPARQLUpdateTxTest2; -import com.bigdata.rdf.sparql.ast.QueryHints; /** * Aggregates test suites into increasing dependency order. @@ -85,27 +86,37 @@ // Fully isolated read/write operations. suite.addTestSuite(BigdataSPARQLUpdateTxTest.class); - /* - * TODO We should always run this test suite, not just when the solution - * set cache is enabled. + /** + * The bigdata extensions to SPARQL UPDATE to support solution sets as + * well as graphs. + * + * Note: We need to run a few different IRawStore backends to confirm + * support for the IStreamStore interface and to confirm that the store + * correctly supports SPARQL UPDATE on NAMED SOLUTION SETS using that + * IStreamStore interface. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> + * SPARQL UPDATE Extensions (Trac) </a> + * @see <a + * href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update"> + * SPARQL Update Extensions (Wiki) </a> + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > + * Support PSOutputStream/InputStream at IRawStore </a> */ - if(QueryHints.DEFAULT_SOLUTION_SET_CACHE) { + { - /* - * The bigdata extensions to SPARQL UPDATE to support solution sets - * as well as graphs. - */ - - // Unisolated operations. - suite.addTestSuite(BigdataSPARQLUpdateTest2.class); + // Unisolated operations + suite.addTestSuite(BigdataSPARQLUpdateTest2.class); // MemStore. + suite.addTestSuite(BigdataSPARQLUpdateTest2DiskRW.class); + suite.addTestSuite(BigdataSPARQLUpdateTest2DiskWORM.class); // Fully isolated read/write operations. - suite.addTestSuite(BigdataSPARQLUpdateTxTest2.class); - + suite.addTestSuite(BigdataSPARQLUpdateTxTest2.class); // MemStore + } - + return suite; - + } - + } Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-05-28 21:30:38 UTC (rev 7168) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -66,6 +66,7 @@ import com.bigdata.rdf.sail.BigdataSail; import com.bigdata.rdf.sail.BigdataSail.Options; import com.bigdata.rdf.sail.BigdataSailRepository; +import com.bigdata.rdf.sparql.ast.QueryHints; /** * Test suite for BIGDATA extension to SPARQL UPDATE for NAMED SOLUTION SETS. @@ -330,11 +331,8 @@ public Properties getProperties() { final Properties props = new Properties(super.getProperties()); - -// final File journal = BigdataStoreTest.createTempFile(); -// -// props.setProperty(BigdataSail.Options.FILE, journal.getAbsolutePath()); + // Base version of the test uses the MemStore. props.setProperty(Options.BUFFER_MODE, BufferMode.MemStore.toString()); // quads mode: quads=true, sids=false, axioms=NoAxioms, vocab=NoVocabulary @@ -418,6 +416,22 @@ } /** + * Return <code>true</code> iff the SPARQL UPDATE for NAMED SOLUTION SETS + * feature is enabled. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> + * SPARQL UPDATE Extensions (Trac) </a> + * @see <a + * href="https://sourceforge.net/apps/mediawiki/bigdata/index.php?title=SPARQL_Update"> + * SPARQL Update Extensions (Wiki) </a> + */ + protected boolean isSolutionSetUpdateEnabled() { + + return QueryHints.DEFAULT_SOLUTION_SET_CACHE; + + } + + /** * Unit test for <code>INSERT INTO ... SELECT</code>. This loads some data * into the end point, creates a named solution set, then verifies that the * solutions are present using a query and an INCLUDE join against the named @@ -425,6 +439,13 @@ */ public void test_insertIntoSolutions_01() throws Exception { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + loadDataset(packagePath + "dataset-01.trig"); // Build the solution set. @@ -482,6 +503,13 @@ */ public void test_deleteFromSolutions_01() throws Exception { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + loadDataset(packagePath + "dataset-01.trig"); // Build the solution set. @@ -555,6 +583,13 @@ */ public void test_deleteFromSolutions_02() throws Exception { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + loadDataset(packagePath + "dataset-01.trig"); // Build the solution set. @@ -628,6 +663,13 @@ */ public void test_deleteFromSolutions_03() throws Exception { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + loadDataset(packagePath + "dataset-01.trig"); // Build the solution set. @@ -710,6 +752,13 @@ */ public void test_deleteInsertSolutions_01() throws Exception { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + loadDataset(packagePath + "dataset-01.trig"); // Build the solution set. @@ -816,6 +865,13 @@ */ public void test_isolation_insertIntoSolutionsWithIncludeFromSolutions() { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + fail("write test"); } @@ -831,6 +887,13 @@ public void test_createSolutionSet_01() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + // Should fail since solution set does not exist. try { con.prepareUpdate(QueryLanguage.SPARQL, "drop solutions %namedSet1") @@ -859,6 +922,13 @@ public void test_createSolutionSet_02() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + // Should succeed. con.prepareUpdate(QueryLanguage.SPARQL, "create solutions %namedSet1") .execute(); @@ -886,6 +956,13 @@ public void test_dropSolutionSet_01() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + try { con.prepareUpdate(QueryLanguage.SPARQL, "drop solutions %namedSet1") .execute(); @@ -904,6 +981,13 @@ public void test_dropSolutionSet_02() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + con.prepareUpdate(QueryLanguage.SPARQL, "drop silent solutions %namedSet1").execute(); @@ -916,6 +1000,13 @@ public void test_clearSolutionSet_01() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + try { con.prepareUpdate(QueryLanguage.SPARQL, "clear solutions %namedSet1") .execute(); @@ -934,6 +1025,13 @@ public void test_clearSolutionSet_02() throws UpdateExecutionException, RepositoryException, MalformedQueryException { + if (!isSolutionSetUpdateEnabled()) { + /* + * Test requires this feature. + */ + return; + } + con.prepareUpdate(QueryLanguage.SPARQL, "clear silent solutions %namedSet1").execute(); Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskRW.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskRW.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskRW.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -0,0 +1,74 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Mar 18, 2012 + */ + +package com.bigdata.rdf.sail.tck; + +import java.io.File; +import java.util.Properties; + +import com.bigdata.journal.BufferMode; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSail.Options; + +/** + * A variant of the test suite using {@link BufferMode#DiskRW}. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> SPARQL + * UPDATE Extensions (Trac) </a> + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > Support + * PSOutputStream/InputStream at IRawStore </a> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: BigdataSPARQLUpdateTxTest2.java 7168 2013-05-28 21:30:38Z + * thompsonbry $ + */ +public class BigdataSPARQLUpdateTest2DiskRW extends BigdataSPARQLUpdateTest2 { + + /** + * + */ + public BigdataSPARQLUpdateTest2DiskRW() { + } + + @Override + public Properties getProperties() { + + final Properties props = new Properties(super.getProperties()); + + final File journal = BigdataStoreTest.createTempFile(); + + props.setProperty(BigdataSail.Options.FILE, journal.getAbsolutePath()); + + props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString()); +// props.setProperty(Options.BUFFER_MODE, BufferMode.DiskWORM.toString()); + + return props; + + } + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskWORM.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskWORM.java (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest2DiskWORM.java 2013-05-29 12:22:08 UTC (rev 7169) @@ -0,0 +1,74 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2012. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on Mar 18, 2012 + */ + +package com.bigdata.rdf.sail.tck; + +import java.io.File; +import java.util.Properties; + +import com.bigdata.journal.BufferMode; +import com.bigdata.rdf.sail.BigdataSail; +import com.bigdata.rdf.sail.BigdataSail.Options; + +/** + * A variant of the test suite using {@link BufferMode#DiskWORM}. + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> SPARQL + * UPDATE Extensions (Trac) </a> + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/555" > Support + * PSOutputStream/InputStream at IRawStore </a> + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @version $Id: BigdataSPARQLUpdateTxTest2.java 7168 2013-05-28 21:30:38Z + * thompsonbry $ + */ +public class BigdataSPARQLUpdateTest2DiskWORM extends BigdataSPARQLUpdateTest2 { + + /** + * + */ + public BigdataSPARQLUpdateTest2DiskWORM() { + } + + @Override + public Properties getProperties() { + + final Properties props = new Properties(super.getProperties()); + + final File journal = BigdataStoreTest.createTempFile(); + + props.setProperty(BigdataSail.Options.FILE, journal.getAbsolutePath()); + +// props.setProperty(Options.BUFFER_MODE, BufferMode.DiskRW.toString()); + props.setProperty(Options.BUFFER_MODE, BufferMode.DiskWORM.toString()); + + return props; + + } + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 21:30:45
|
Revision: 7168 http://bigdata.svn.sourceforge.net/bigdata/?rev=7168&view=rev Author: thompsonbry Date: 2013-05-28 21:30:38 +0000 (Tue, 28 May 2013) Log Message: ----------- javadoc Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest2.java Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest2.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest2.java 2013-05-28 21:21:16 UTC (rev 7167) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest2.java 2013-05-28 21:30:38 UTC (rev 7168) @@ -34,6 +34,9 @@ /** * A variant of the test suite using full read/write transactions. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531"> SPARQL + * UPDATE Extensions (Trac) </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 21:21:24
|
Revision: 7167 http://bigdata.svn.sourceforge.net/bigdata/?rev=7167&view=rev Author: thompsonbry Date: 2013-05-28 21:21:16 +0000 (Tue, 28 May 2013) Log Message: ----------- AST2BOpUtility - javadoc only. TestInclude - added a 2nd test case so we have coverage both for the code path where we use a SCAN (INCLUDE runs first) as well as the case where we run a JOIN first and then join in the data from the INCLUDE. include_03.rq - SPARQL comment changes only. include03a.rq - the new version of the test for TestInclude. BigdataSPARQLUpdateTest - javadoc only. BigdataSPARQLUpdateTestTx - javadoc only. @see https://sourceforge.net/apps/trac/bigdata/ticket/531 (SPARQL UPDATE for NAMED SOLUTION SETS) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03.rq branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03a.rq Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-05-28 15:07:45 UTC (rev 7166) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/AST2BOpUtility.java 2013-05-28 21:21:16 UTC (rev 7167) @@ -1191,7 +1191,7 @@ } else { - /* + /** * Attempt to resolve a pre-existing named solution set. * * If we find the named solution set, then we will handle it in @@ -1214,6 +1214,10 @@ * operator and what is known bound in the named solution set * itself. We will then do a hash join against the generated * hash index. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/531" + * > SPARQL UPDATE for NAMED SOLUTION SETS </a> */ final ISolutionSetStats stats = ctx.sa Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-05-28 15:07:45 UTC (rev 7166) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestInclude.java 2013-05-28 21:21:16 UTC (rev 7167) @@ -327,7 +327,157 @@ } + /** + * A unit test for an INCLUDE with another JOIN. For this test, the INCLUDE + * will run first: + * + * <pre> + * %solutionSet1:: + * {x=:Mike, y=2} + * {x=:Bryan, y=4} + * {x=:DC, y=1} + * </pre> + * + * <pre> + * prefix : <http://www.bigdata.com/> + * prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> + * prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> + * prefix foaf: <http://xmlns.com/foaf/0.1/> + * + * SELECT ?x ?y WHERE { + * + * # Turn off the join order optimizer. + * hint:Query hint:optimizer "None" . + * + * # Run joins in the given order (INCLUDE is 1st). + * + * # SCAN => {(x=Mike,y=2);(x=Bryan;y=4);(x=DC,y=1)} + * INCLUDE %solutionSet1 . + * + * # JOIN on (x) => {(x=Mike,y=2);(x=Bryan,y=4)} + * ?x rdf:type foaf:Person . + * + * } + * </pre> + * + * Note: This excercises the code path in {@link AST2BOpUtility} where we do + * a SCAN on the named solution set for the INCLUDE and then join with the + * access path. + * + * @see #test_include_03() + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531" > + * SPARQL UPDATE for NAMED SOLUTION SETS </a> + */ + public void test_include_03a() throws Exception { + + final TestHelper testHelper = new TestHelper( + "include_03a",// name + "include_03a.rq",// query URL + "include_03.trig",// data URL + "include_03.srx",// results URL + false,// lax cardinality + false // check order + ); + + final AbstractTripleStore tripleStore = testHelper.getTripleStore(); + + final BigdataValueFactory vf = tripleStore.getValueFactory(); + + final QueryEngine queryEngine = QueryEngineFactory + .getQueryController(tripleStore.getIndexManager()); + + final ICacheConnection cacheConn = CacheConnectionFactory + .getCacheConnection(queryEngine); + + final ISolutionSetCache sparqlCache = cacheConn.getSparqlCache( + tripleStore.getNamespace(), tripleStore.getTimestamp()); + + final String solutionSet = "%solutionSet1"; + + final IVariable<?> x = Var.var("x"); + final IVariable<?> y = Var.var("y"); + + // Resolve terms pre-loaded into the kb. + final BigdataURI Mike = vf.createURI("http://www.bigdata.com/Mike"); + final BigdataURI Bryan = vf.createURI("http://www.bigdata.com/Bryan"); + final BigdataURI DC = vf.createURI("http://www.bigdata.com/DC"); + { + tripleStore.addTerms(new BigdataValue[] { Mike, Bryan, DC }); + assertNotNull(Mike.getIV()); + assertNotNull(Bryan.getIV()); + assertNotNull(DC.getIV()); + } + + final XSDNumericIV<BigdataLiteral> one = new XSDNumericIV<BigdataLiteral>( + 1); + one.setValue(vf.createLiteral(1)); + + final XSDNumericIV<BigdataLiteral> two = new XSDNumericIV<BigdataLiteral>( + 2); + two.setValue(vf.createLiteral(2)); + +// final XSDNumericIV<BigdataLiteral> three = new XSDNumericIV<BigdataLiteral>( +// 3); +// three.setValue(vf.createLiteral(3)); + + final XSDNumericIV<BigdataLiteral> four = new XSDNumericIV<BigdataLiteral>( + 4); + four.setValue(vf.createLiteral(4)); + +// final XSDNumericIV<BigdataLiteral> five = new XSDNumericIV<BigdataLiteral>( +// 5); +// five.setValue(vf.createLiteral(5)); + + final List<IBindingSet> bsets = new LinkedList<IBindingSet>(); + { + final IBindingSet bset = new ListBindingSet(); + bset.set(x, asConst(Mike.getIV())); + bset.set(y, asConst(two)); + bsets.add(bset); + } + { + final IBindingSet bset = new ListBindingSet(); + bset.set(x, asConst(Bryan.getIV())); + bset.set(y, asConst(four)); + bsets.add(bset); + } + { + final IBindingSet bset = new ListBindingSet(); + bset.set(x, asConst(DC.getIV())); + bset.set(y, asConst(one)); + bsets.add(bset); + } + + final IBindingSet[] bindingSets = bsets.toArray(new IBindingSet[]{}); + + sparqlCache.putSolutions(solutionSet, + BOpUtility.asIterator(bindingSets)); + + final ASTContainer astContainer = testHelper.runTest(); + + final PipelineOp queryPlan = astContainer.getQueryPlan(); + + // top level should be the PROJECTION operator. + final PipelineOp projectionOp = (PipelineOp) queryPlan; + assertTrue(projectionOp instanceof ProjectionOp); + + // sole argument should be the PIPELINE JOIN operator. + final PipelineOp joinOp = (PipelineOp) projectionOp.get(0); + assertTrue(joinOp instanceof PipelineJoin); + + /* + * The sole argument of JOIN should be the INCLUDE operator, which + * should be evaluated using a solution set SCAN. This is where we start + * evaluation for this query. + */ + final PipelineOp includeOp = (PipelineOp) joinOp.get(0); + assertTrue(includeOp instanceof NestedLoopJoinOp); + + } + + /** * A unit test for an INCLUDE which is NOT the first JOIN in the WHERE * clause. This condition is enforced by turning off the join order * optimizer for this query. @@ -336,6 +486,13 @@ * order guarantee for the resulting solutions. * * <pre> + * %solutionSet1:: + * {x=:Mike, y=2} + * {x=:Bryan, y=4} + * {x=:DC, y=1} + * </pre> + * + * <pre> * prefix : <http://www.bigdata.com/> * prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> * prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> @@ -356,6 +513,11 @@ * * } * </pre> + * + * @see #test_include_03a() + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531" > + * SPARQL UPDATE for NAMED SOLUTION SETS </a> */ public void test_include_03() throws Exception { @@ -417,6 +579,14 @@ // 5); // five.setValue(vf.createLiteral(5)); + /** + * <pre> + * %solutionSet1:: + * {x=:Mike, y=2} + * {x=:Bryan, y=4} + * {x=:DC, y=1} + * </pre> + */ final List<IBindingSet> bsets = new LinkedList<IBindingSet>(); { final IBindingSet bset = new ListBindingSet(); Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03.rq =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03.rq 2013-05-28 15:07:45 UTC (rev 7166) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03.rq 2013-05-28 21:21:16 UTC (rev 7167) @@ -1,19 +1,19 @@ -prefix : <http://www.bigdata.com/> -prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> -prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> -prefix foaf: <http://xmlns.com/foaf/0.1/> - -SELECT ?x ?y WHERE { - - # Turn off the join order optimizer. - hint:Query hint:optimizer "None" . - - # Run joins in the given order (INCLUDE is 2nd). - - # bind x => {Mike;Bryan} - ?x rdf:type foaf:Person . - - # join on (x) => {(x=Mike,y=2);(x=Bryan;y=4)} - INCLUDE %solutionSet1 . - -} +prefix : <http://www.bigdata.com/> +prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> +prefix foaf: <http://xmlns.com/foaf/0.1/> + +SELECT ?x ?y WHERE { + + # Turn off the join order optimizer. + hint:Query hint:optimizer "None" . + + # Run joins in the given order (INCLUDE is 2nd). + + # RANGE SCAN x => {(x=Mike);(x=Bryan)} + ?x rdf:type foaf:Person . + + # JOIN on (x) => {(x=Mike,y=2);(x=Bryan;y=4)} + INCLUDE %solutionSet1 . + +} Added: branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03a.rq =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03a.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/include_03a.rq 2013-05-28 21:21:16 UTC (rev 7167) @@ -0,0 +1,19 @@ +prefix : <http://www.bigdata.com/> +prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> +prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> +prefix foaf: <http://xmlns.com/foaf/0.1/> + +SELECT ?x ?y WHERE { + + # Turn off the join order optimizer. + hint:Query hint:optimizer "None" . + + # Run joins in the given order (INCLUDE is 1st). + + # SCAN => {(x=Mike,y=2);(x=Bryan;y=4);(x=DC,y=1)} + INCLUDE %solutionSet1 . + + # JOIN on (x) => {(x=Mike,y=2);(x=Bryan,y=4)} + ?x rdf:type foaf:Person . + +} Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java 2013-05-28 15:07:45 UTC (rev 7166) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java 2013-05-28 21:21:16 UTC (rev 7167) @@ -53,6 +53,9 @@ /** * Integration with the openrdf SPARQL 1.1 update test suite. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531" > SPARQL + * UPDATE for NAMED SOLUTION SETS </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ Modified: branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest.java =================================================================== --- branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest.java 2013-05-28 15:07:45 UTC (rev 7166) +++ branches/BIGDATA_RELEASE_1_2_0/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTxTest.java 2013-05-28 21:21:16 UTC (rev 7167) @@ -34,6 +34,9 @@ /** * A variant of the test suite using full read/write transactions. * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/531" > SPARQL + * UPDATE for NAMED SOLUTION SETS </a> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * @version $Id$ */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 15:07:56
|
Revision: 7166 http://bigdata.svn.sourceforge.net/bigdata/?rev=7166&view=rev Author: thompsonbry Date: 2013-05-28 15:07:45 +0000 (Tue, 28 May 2013) Log Message: ----------- - Bug fix to recent commit (r7162) where I broke RESTORE. - Significant changes to handleReplicatedWrite() and enterErrorState() in order to address the failure to identify in a timely fashion live writes that violate the expectation of a joined service and to ensure that a joined service whose expectations are violated for a live write will: (a) do a serviceLeave(); (b) call setQuorumToken() on the journal with the then current quorum token in order to clear the haReadyToken and haStatus fields; (c) disable the open HALog. These changes to handleReplicatedWrite are green for all HA CI tests *except* the "overrides" test suite. I am committing this to sync to Martyn while I work on those overrides. Revision Links: -------------- http://bigdata.svn.sourceforge.net/bigdata/?rev=7162&view=rev Modified Paths: -------------- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-28 15:04:14 UTC (rev 7165) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-28 15:07:45 UTC (rev 7166) @@ -1146,6 +1146,39 @@ void enterErrorState() { + /* + * Do synchronous service leave. + */ + serviceLeave(); + + /* + * Update the haReadyTokena and haStatus regardless of whether the + * quorum token has changed since this service is no longer joined + * with a met quorum. + */ + journal.setQuorumToken(getQuorum().token()); + + logLock.lock(); + try { + if (journal.getHALogNexus().isHALogOpen()) { + /* + * Note: Closing the HALog is necessary for us to be able to + * re-enter SeekConsensus without violating a pre-condition + * for that run state. + */ + try { + journal.getHALogNexus().disableHALog(); + } catch (IOException e) { + log.error(e, e); + } + } + } finally { + logLock.unlock(); + } + + /* + * Transition into the error state. + */ enterRunState(new ErrorTask()); } @@ -1566,26 +1599,26 @@ * yet. */ // server.haGlueService.bounceZookeeperConnection(); - /* - * Note: Try moving to doRejectedCommit() so this will be - * synchronous. - */ - logLock.lock(); - try { - if (journal.getHALogNexus().isHALogOpen()) { - /* - * Note: Closing the HALog is necessary for us to be - * able to re-enter SeekConsensus without violating a - * pre-condition for that run state. - */ - journal.getHALogNexus().disableHALog(); - } - } finally { - logLock.unlock(); - } +// /* +// * Note: Try moving to doRejectedCommit() so this will be +// * synchronous. +// */ +// logLock.lock(); +// try { +// if (journal.getHALogNexus().isHALogOpen()) { +// /* +// * Note: Closing the HALog is necessary for us to be +// * able to re-enter SeekConsensus without violating a +// * pre-condition for that run state. +// */ +// journal.getHALogNexus().disableHALog(); +// } +// } finally { +// logLock.unlock(); +// } - // Force a service leave. - getQuorum().getActor().serviceLeave(); +// // Force a service leave. +// getQuorum().getActor().serviceLeave(); // /* // * Set token. Journal will notice that it is no longer @@ -1909,11 +1942,13 @@ final long commitCounter = journal.getRootBlockView() .getCommitCounter(); - final IHALogReader r = journal.getHALogNexus().getReader( - commitCounter + 1); + IHALogReader r = null; try { + r = journal.getHALogNexus() + .getReader(commitCounter + 1); + if (r.isEmpty()) { /* @@ -1956,7 +1991,11 @@ } finally { - r.close(); + if (r != null) { + + r.close(); + + } } @@ -2791,6 +2830,7 @@ protected void handleReplicatedWrite(final IHASyncRequest req, final IHAWriteMessage msg, final ByteBuffer data) throws Exception { + if (req == null //&& journal.getQuorumToken() == Quorum.NO_QUORUM && journal.getRootBlockView().getCommitCounter() == 0L && (msg.getUUID() != null && !journal.getUUID().equals(msg.getUUID()))) { @@ -2816,6 +2856,7 @@ */ return; } + pipelineSetup(); logLock.lock(); @@ -2829,9 +2870,9 @@ // Save off reference to most recent *live* message. journal.getHALogNexus().lastLiveHAWriteMessage = msg; - } + } else - if (req != null && req instanceof IHARebuildRequest) { + if (/*req != null &&*/ req instanceof IHARebuildRequest) { /* * This message and payload are part of a ground up service @@ -2906,37 +2947,86 @@ handleResyncMessage((IHALogRequest) req, msg, data); - } else if (req == null // Note: MUST be a live message! - && journal.getRootBlockView().getCommitCounter() == msg - .getCommitCounter() - && isJoinedMember(msg.getQuorumToken())) { + return; + } else if (req != null) { + /* - * We are not resynchronizing this service. This is a - * message for the current write set. The service is joined - * with the quorum. + * A historical message that is being ignored on this node. */ + + dropMessage(req, msg, data); - // write on the log and the local store. - acceptHAWriteMessage(msg, data); + return; + + } else { - } else { + assert req == null; // Note: MUST be a live message! + + if (!isJoinedMember(msg.getQuorumToken())) { + + /* + * If we are not joined, we can not do anything with a + * live write. + */ + + dropMessage(req, msg, data); + + return; + + } + + try { + + /* + * We are not resynchronizing this service. + * + * The service is joined with the quorum. + * + * The message SHOULD be for the current commit counter + * and the expected next write cache block sequence. If + * it is not, then we will enter error handling logic + * below. + */ + + // write on the log and the local store. + acceptHAWriteMessage(msg, data); + + return; + + } catch(Throwable t) { + if (InnerCause.isInnerCause(t, + InterruptedException.class)) { + // propagate interrupt + Thread.currentThread().interrupt(); + return; + } + /* + * Error handler. + * + * Live write is not for expected commit counter and + * write cache block sequence. + */ + log.error(t, t); + try { + enterErrorState(); + } catch (RuntimeException e) { + // log and ignore. + log.error(e, e); + } + // rethrow exception. + throw new RuntimeException(t); + } - if (log.isInfoEnabled()) - log.info("Ignoring message: " + msg); - - /* - * Drop the pipeline message. - * - * Note: There are two cases here. - * - * (A) It is a historical message that is being ignored on - * this node; - * - * (B) It is a live message, but this node is not caught up - * and therefore can not log the message yet. - */ - +// /* +// * Drop the pipeline message. +// * +// * Note: It is a live message, but this node is not caught +// * up and therefore can not log the message yet. +// */ +// +// dropMessage(req, msg, data); + } } finally { @@ -2947,6 +3037,14 @@ } + private void dropMessage(final IHASyncRequest req, + final IHAWriteMessage msg, final ByteBuffer data) { + + if (log.isInfoEnabled()) + log.info("Ignoring message: req=" + req + ", msg=" + msg); + + } + /** * Adjust the size on the disk of the local store to that given in the * message. @@ -3217,12 +3315,21 @@ private void acceptHAWriteMessage(final IHAWriteMessage msg, final ByteBuffer data) throws IOException, InterruptedException { - if (msg.getCommitCounter() != journal.getHALogNexus() - .getCommitCounter()) { + // Note: Caller must be holding the logLock! + + final long expectedCommitCounter = journal.getHALogNexus() + .getCommitCounter(); - throw new AssertionError(); + final long expectedBlockSequence = journal.getHALogNexus() + .getSequence(); - } + if (msg.getCommitCounter() != expectedCommitCounter) + throw new IllegalStateException("expectedCommitCounter=" + + expectedCommitCounter+ ", but msg=" + msg); + + if (msg.getSequence() != expectedBlockSequence) + throw new IllegalStateException("expectedBlockSequence=" + + expectedBlockSequence + ", but msg=" + msg); /* * Log the message and write cache block. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 15:04:26
|
Revision: 7165 http://bigdata.svn.sourceforge.net/bigdata/?rev=7165&view=rev Author: thompsonbry Date: 2013-05-28 15:04:14 +0000 (Tue, 28 May 2013) Log Message: ----------- javadoc (spelling error) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-28 15:03:39 UTC (rev 7164) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-28 15:04:14 UTC (rev 7165) @@ -5386,7 +5386,7 @@ didBreak = false; didMeet = false; didJoinMetQuorum = false; - didLeaveMetQuorum = true; // service left met quorum. quorum stil met. + didLeaveMetQuorum = true; // service left met quorum. quorum still met. } else { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 15:03:49
|
Revision: 7164 http://bigdata.svn.sourceforge.net/bigdata/?rev=7164&view=rev Author: thompsonbry Date: 2013-05-28 15:03:39 +0000 (Tue, 28 May 2013) Log Message: ----------- Some minor changes in the HA3 test suite: - checking the quorum token progression. - using sequential start helper class rather than winging it. - increased timeout for awaitQuorumMeet() (timeout was being exceeded locally on my laptop) Modified Paths: -------------- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Modified: branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-05-28 15:01:53 UTC (rev 7163) +++ branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2013-05-28 15:03:39 UTC (rev 7164) @@ -1013,7 +1013,7 @@ serverA, serverB }); // C will have go through Rebuild before joining - assertEquals(token2, awaitFullyMetQuorum()); + assertEquals(token2, awaitFullyMetQuorum(4/* ticks */)); // Note: I have seen this timeout. This warrants exploring. BBT. // // Wait until C is fully ready. @@ -1069,7 +1069,7 @@ final HAGlue serverC = startC(); // C will have go through Rebuild before joining - awaitFullyMetQuorum(); + awaitFullyMetQuorum(4/* ticks */); // Verify binary equality of ALL journals. assertDigestsEquals(new HAGlue[] { serverA, serverB, serverC }); @@ -1409,11 +1409,12 @@ * @throws Exception */ public void testStartABC_halogRestart() throws Exception { - // Start 3 services, with delay to ensure clean starts - startA(); - Thread.sleep(1000); // ensure A will be leader - startB(); - startC(); + // Start 3 services in strict order. +// startA(); +// Thread.sleep(1000); // ensure A will be leader +// startB(); +// startC(); + new ABC(true/*sequential*/); awaitFullyMetQuorum(); @@ -1422,6 +1423,11 @@ final File logsB = getHALogDirB(); final File logsC = getHALogDirC(); + final long token = awaitFullyMetQuorum(); + + // initial token value. + assertEquals(0L, token); + // committed log files not purged prior to fully met commit assertLogCount(logsA, 2); assertLogCount(logsB, 2); @@ -1449,8 +1455,11 @@ startA(); startB(); - awaitMetQuorum(); + final long token1 = awaitMetQuorum(); + // Verify new quorum token. + assertEquals(token + 1, token1); + // and check that there are open logs assertLogCount(logsA, 1); assertLogCount(logsB, 1); @@ -1458,7 +1467,7 @@ // add C startC(); - awaitFullyMetQuorum(); + assertEquals(token1, awaitFullyMetQuorum()); // and check again for ABC assertLogCount(logsA, 1); @@ -1470,14 +1479,17 @@ * Variant where A is shutdown first. */ public void testStartABC_halogRestart2() throws Exception { - // Start 3 services, with delay to ensure clean starts - startA(); - Thread.sleep(1000); // ensure A will be leader - startB(); - startC(); + // Start 3 services in strict order. +// startA(); +// Thread.sleep(1000); // ensure A will be leader +// startB(); +// startC(); + new ABC(true/* sequential */); - awaitFullyMetQuorum(); + final long token = awaitFullyMetQuorum(); + assertEquals(0L, token); + // setup log directories final File logsA = getHALogDirA(); final File logsB = getHALogDirB(); @@ -1495,6 +1507,9 @@ assertLogCount(logsA, 1); assertLogCount(logsB, 1); assertLogCount(logsC, 1); + + // Verify token unchanged. + assertEquals(token, awaitFullyMetQuorum()); // Now shutdown all servers shutdownA(); @@ -1510,8 +1525,14 @@ startA(); startB(); - awaitMetQuorum(); + final long token1 = awaitMetQuorum(); + /* + * Verify new quorum token (could be a quorum meet when the leader + * leaves so this might be ONE (1) or TWO (2). + */ + assertTrue(token1 >= token + 1); + // and check that there are open logs assertLogCount(logsA, 1); assertLogCount(logsB, 1); @@ -1519,7 +1540,8 @@ // add C startC(); - awaitFullyMetQuorum(); + // Verify quorum token is unchanged. + assertEquals(token1, awaitFullyMetQuorum()); // and check again for ABC assertLogCount(logsA, 1); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 15:02:02
|
Revision: 7163 http://bigdata.svn.sourceforge.net/bigdata/?rev=7163&view=rev Author: thompsonbry Date: 2013-05-28 15:01:53 +0000 (Tue, 28 May 2013) Log Message: ----------- javadoc on handleReplicatedWrite() Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-05-28 12:15:58 UTC (rev 7162) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumPipelineImpl.java 2013-05-28 15:01:53 UTC (rev 7163) @@ -1480,6 +1480,9 @@ /** * Core implementation handles the message and payload when received on a * service. + * <p> + * Note: Replication of the message and payload is handled by the caller. + * The implementation of this method is NOT responsible for replication. * * @param req * The synchronization request (optional). When non- Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-05-28 12:15:58 UTC (rev 7162) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/QuorumServiceBase.java 2013-05-28 15:01:53 UTC (rev 7163) @@ -232,6 +232,9 @@ /** * Core implementation handles the message and payload when received on a * service. + * <p> + * Note: Replication of the message and payload is handled by the caller. + * The implementation of this method is NOT responsible for replication. * * @param req * The synchronization request (optional). When non- This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-28 12:16:11
|
Revision: 7162 http://bigdata.svn.sourceforge.net/bigdata/?rev=7162&view=rev Author: thompsonbry Date: 2013-05-28 12:15:58 +0000 (Tue, 28 May 2013) Log Message: ----------- I tracked this down to the getHALogRootBlocksForWriteSet() method rather than the sendHALogForWriteSet() method. I have reviewed all code paths that open HALog files. I have commented out the finalize() method on HALogReader since it should not be required. Some cleanup on the HALog test suite with respect to the guaranteed close() of HALog files. Added public method on HALogNexus to open an HALog file using a File which makes an atomic decision regarding whether or not this is the live HALog file. Identified and closed a open file descriptor leak in the HAJournalServer RESTORE doRun() method. @see https://sourceforge.net/apps/trac/bigdata/ticket/678 (DGC Thread and Open File Leaks: sendHALogForWriteSet()) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -76,6 +76,18 @@ private final int magic; private final int version; + /** + * <strong>CAUTION: This constructor should not be used in circumstances in + * which the {@link HALogWriter} is active since this constructor can not + * differentiate atomically between the live HALog and a historical HALog + * and will always provide a read-only view, even if the live HALog file is + * opened.</strong> + * + * @param file + * The HALog file. + * + * @throws IOException + */ public HALogReader(final File file) throws IOException { m_file = file; @@ -157,25 +169,25 @@ } - /** - * {@inheritDoc} - * - * TODO This was added to address a file handle leak. However, I am quite - * dubious that this will fix the problem. While GC may be necessary to - * finalize {@link HALogReader} instances during a RESYNC, we have already - * invoked {@link #close()} on those instances in the SendHALogTask(). It - * may be better to remove this since finalize() methods add overhead to - * GC. - * - * @see <a - * href="https://sourceforge.net/apps/trac/bigdata/ticket/678#comment:4" - * > DGC Thread Leak: sendHALogForWriteSet() </a> - */ - @Override - protected void finalize() throws Throwable { - close(); - super.finalize(); - } +// /** +// * {@inheritDoc} +// * +// * TODO This was added to address a file handle leak. However, I am quite +// * dubious that this will fix the problem. While GC may be necessary to +// * finalize {@link HALogReader} instances during a RESYNC, we have already +// * invoked {@link #close()} on those instances in the SendHALogTask(). It +// * may be better to remove this since finalize() methods add overhead to +// * GC. +// * +// * @see <a +// * href="https://sourceforge.net/apps/trac/bigdata/ticket/678#comment:4" +// * > DGC Thread Leak: sendHALogForWriteSet() </a> +// */ +// @Override +// protected void finalize() throws Throwable { +// close(); +// super.finalize(); +// } /** * Hook for Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogWriter.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -47,6 +47,13 @@ /** * Return the commit counter that is expected for the writes that will be * logged (the same commit counter that is on the opening root block). + * <p> + * Note: Once the HALog is sealed, the closing root block will have a + * commitCounter that is equal to <code>getCommitCounter() + 1</code>. + * <p> + * Note: The HALog filename contains the closing commit counter - that is, + * the HALog file is named for the commit counter associated with the + * closing root block for a given write set. */ public long getCommitCounter(); Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -279,40 +279,55 @@ final IHALogReader reader = writer.getReader(openRB .getCommitCounter() + 1); - // This is the "live" HALog. - assertTrue(reader.isLive()); + try { - // The reader is open. - assertTrue(reader.isOpen()); - - // The HALog is logically empty. -// assertTrue(reader.isEmpty()); - - /* - * Note: Don't do this here. The method will block for the live - * HALog until the file is closed (sealed with the closing root - * block) or destroyed. - */ -// assertTrue(reader.hasMoreBuffers()); + // This is the "live" HALog. + assertTrue(reader.isLive()); - // close the reader. should not close the writer. - reader.close(); + // The reader is open. + assertTrue(reader.isOpen()); - // the reader is closed. - assertFalse(reader.isOpen()); - - // once closed, this method should return immediately. - assertFalse(reader.hasMoreBuffers()); - - // the writer is still open. - assertTrue(writer.isHALogOpen()); + // The HALog is logically empty. + // assertTrue(reader.isEmpty()); - // double-close the reader. should be ignored. - reader.close(); + /* + * Note: Don't do this here. The method will block for the + * live HALog until the file is closed (sealed with the + * closing root block) or destroyed. + */ + // assertTrue(reader.hasMoreBuffers()); + + // close the reader. should not close the writer. + reader.close(); + + // the reader is closed. + assertFalse(reader.isOpen()); + + // once closed, this method should return immediately. + assertFalse(reader.hasMoreBuffers()); + + // the writer is still open. + assertTrue(writer.isHALogOpen()); + + // double-close the reader. should be ignored. + reader.close(); + + // the reader is closed. + assertFalse(reader.isOpen()); + + // the writer should *still* be open. + assertTrue(writer.isHALogOpen()); + + } finally { + + if(reader.isOpen()) { + + reader.close(); + + } + + } - // the writer should *still* be open. - assertTrue(writer.isHALogOpen()); - } /* @@ -549,12 +564,12 @@ // Note: Throws FileNotFoundException if does not exist. final IHALogReader reader = writer.getReader(commitCounter); - assertNotNull(reader); - - long nread = 0L; - try { + assertNotNull(reader); + + long nread = 0L; + while (reader.hasMoreBuffers()) { checkWriterFuture(); @@ -860,6 +875,8 @@ */ public void test_doubleOpen_close_historicalHALog() throws Exception { + IHALogReader r1 = null, r2 = null; + final HALogWriter writer = new HALogWriter(logdir); try { @@ -901,16 +918,14 @@ * Setup two readers on that HALog file. */ - final IHALogReader r1 = writer.getReader(openRB - .getCommitCounter() + 1); + r1 = writer.getReader(openRB.getCommitCounter() + 1); assertFalse(r1.isLive()); assertTrue(r1.isOpen()); assertFalse(r1.isEmpty()); assertTrue(r1.hasMoreBuffers()); - final IHALogReader r2 = writer.getReader(openRB - .getCommitCounter() + 1); + r2 = writer.getReader(openRB.getCommitCounter() + 1); assertFalse(r2.isLive()); assertTrue(r2.isOpen()); @@ -965,6 +980,16 @@ writer.disableHALog(); + if (r1 != null && r1.isOpen()) { + r1.close(); + r1 = null; + } + + if (r2 != null && r2.isOpen()) { + r2.close(); + r2 = null; + } + } // Read all files in the test directory. @@ -1038,59 +1063,68 @@ final IHALogReader r1 = writer .getReader(openRB.getCommitCounter() + 1); - assertFalse(r1.isLive()); - assertTrue(r1.isOpen()); - assertFalse(r1.isEmpty()); - assertTrue(r1.hasMoreBuffers()); + try { - for (int i = 0; i < MAX_OPEN_FILE_HANDLES; i++) { + assertFalse(r1.isLive()); + assertTrue(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertTrue(r1.hasMoreBuffers()); - final IHALogReader r2 = writer.getReader(openRB - .getCommitCounter() + 1); + for (int i = 0; i < MAX_OPEN_FILE_HANDLES; i++) { - assertFalse(r2.isLive()); - assertTrue(r2.isOpen()); - assertFalse(r2.isEmpty()); - assertTrue(r2.hasMoreBuffers()); + final IHALogReader r2 = writer.getReader(openRB + .getCommitCounter() + 1); - /* - * Now use the 2nd reader to read the data to make sure that the - * IHALogReader is really open and functional. - */ - try { + assertFalse(r2.isLive()); + assertTrue(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertTrue(r2.hasMoreBuffers()); - // Allocate a heap ByteBuffer - final ByteBuffer rbuf = ByteBuffer - .allocate(DirectBufferPool.INSTANCE - .getBufferCapacity()); + /* + * Now use the 2nd reader to read the data to make sure that + * the IHALogReader is really open and functional. + */ + try { - while (r2.hasMoreBuffers()) { + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE + .getBufferCapacity()); - // read data into reader's buffer. - r2.processNextBuffer(rbuf); + while (r2.hasMoreBuffers()) { + // read data into reader's buffer. + r2.processNextBuffer(rbuf); + + } + + } finally { + + r2.close(); + } - } finally { + assertFalse(r2.isLive()); + assertFalse(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertFalse(r2.hasMoreBuffers()); - r2.close(); - } - assertFalse(r2.isLive()); - assertFalse(r2.isOpen()); - assertFalse(r2.isEmpty()); - assertFalse(r2.hasMoreBuffers()); + // close [r1]. + r1.close(); + assertFalse(r1.isLive()); + assertFalse(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertFalse(r1.hasMoreBuffers()); + } finally { + + if (r1.isOpen()) + r1.close(); + } - - // close [r1]. - r1.close(); - assertFalse(r1.isLive()); - assertFalse(r1.isOpen()); - assertFalse(r1.isEmpty()); - assertFalse(r1.hasMoreBuffers()); - + } finally { writer.disableHALog(); Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -737,7 +737,13 @@ } } - + + /** + * {@inheritDoc} + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/678" > + * DGC Thread Leak: sendHALogForWriteSet() </a> + */ @Override public IHALogRootBlocksResponse getHALogRootBlocksForWriteSet( final IHALogRootBlocksRequest msg) throws IOException { @@ -764,16 +770,24 @@ } - final HALogReader r = new HALogReader(logFile); + final IHALogReader r = getHALogNexus().getReader(logFile); - final HALogRootBlocksResponse resp = new HALogRootBlocksResponse( - r.getOpeningRootBlock(), r.getClosingRootBlock()); + try { - if (haLog.isDebugEnabled()) - haLog.debug("msg=" + msg + ", resp=" + resp); + final HALogRootBlocksResponse resp = new HALogRootBlocksResponse( + r.getOpeningRootBlock(), r.getClosingRootBlock()); - return resp; + if (haLog.isDebugEnabled()) + haLog.debug("msg=" + msg + ", resp=" + resp); + return resp; + + } finally { + + r.close(); + + } + } finally { logLock.unlock(); @@ -820,14 +834,15 @@ // Note: open file handle - must be closed eventually. r = getHALogNexus().getReader(commitCounter); + // true iff is live log at moment reader was opened. isLive = r.isLive(); - - // Task sends an HALog file along the pipeline. + + // Task sends an HALog file along the pipeline. ft = new FutureTaskMon<Void>(new SendHALogTask(req, r)); - // Run task. - getExecutorService().submit(ft); - + // Run task. + getExecutorService().submit(ft); + // Clear reference. File handle will be closed by task. r = null; Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -1909,11 +1909,11 @@ final long commitCounter = journal.getRootBlockView() .getCommitCounter(); + final IHALogReader r = journal.getHALogNexus().getReader( + commitCounter + 1); + try { - final IHALogReader r = journal.getHALogNexus() - .getReader(commitCounter + 1); - if (r.isEmpty()) { /* @@ -1954,6 +1954,10 @@ break; + } finally { + + r.close(); + } } Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -43,6 +43,7 @@ import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; import com.bigdata.ha.QuorumServiceBase; +import com.bigdata.ha.halog.HALogReader; import com.bigdata.ha.halog.HALogWriter; import com.bigdata.ha.halog.IHALogReader; import com.bigdata.ha.halog.IHALogWriter; @@ -831,6 +832,60 @@ } /** + * Return the {@link IHALogReader} for the specified HALog file. If the + * request identifies the HALog that is currently being written, then an + * {@link IHALogReader} will be returned that will "see" newly written + * entries on the HALog. If the request identifies a historical HALog that + * has been closed and which exists, then a reader will be returned for that + * HALog file. Otherwise, an exception is thrown. + * + * @param logFile + * The HALog file. + * + * @return The {@link IHALogReader}. + * + * @throws IllegalArgumentException + * if the argument is <code>null</code>. + * @throws IOException + * if the HALog file does not exist or can not be read. + */ + public IHALogReader getReader(final File logFile) throws IOException { + + if (logFile == null) + throw new IllegalArgumentException(); + + logLock.lock(); + + try { + + if (haLogWriter.getFile().equals(logFile)) { + + /* + * This is the live HALog file. + */ + + // The closing commit counter. + final long cc = haLogWriter.getCommitCounter() + 1; + + return haLogWriter.getReader(cc); + + } + + /* + * This is an historical HALog file. + */ + + return new HALogReader(logFile); + + } finally { + + logLock.unlock(); + + } + + } + + /** * Open an HALog file for the write set starting with the given root block. * * @param rootBlock Modified: branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java =================================================================== --- branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2013-05-24 17:23:19 UTC (rev 7161) +++ branches/READ_CACHE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HAStatusServletUtil.java 2013-05-28 12:15:58 UTC (rev 7162) @@ -315,11 +315,11 @@ final long nbytes = rec.sizeOnDisk(); final long closingCommitCounter = rec.getRootBlock() .getCommitCounter(); + String digestStr = null; + final File file = nexus + .getHALogFile(closingCommitCounter); final IHALogReader r = nexus.getHALogWriter() .getReader(closingCommitCounter); - final File file = nexus - .getHALogFile(closingCommitCounter); - String digestStr = null; try { if (digests && !r.isEmpty()) { try { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-24 17:23:30
|
Revision: 7161 http://bigdata.svn.sourceforge.net/bigdata/?rev=7161&view=rev Author: thompsonbry Date: 2013-05-24 17:23:19 +0000 (Fri, 24 May 2013) Log Message: ----------- Write cache payload compression prior to replication and compacted HALog files. Changes in process to also provide payload storage for the WORM in the HALog files. Added a CompressorRegistry for configurable block compression schemes. WCS compaction still observed to fail for testStartAB_C_LiveResync and is disabled in the WCS constructor. Compression, WriteCache, WORM, RWJournal, HA, and SPARQL test suites are green locally. See https://sourceforge.net/apps/trac/bigdata/ticket/652 (Compress write cache blocks for replication and in HALogs) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAWriteMessage.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/IRecordCompressor.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/NOPRecordCompressor.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/RecordCompressor.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/DiskBackedBufferStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/sector/MemStrategy.java branches/READ_CACHE/bigdata/src/test/com/bigdata/io/compression/TestAll.java branches/READ_CACHE/bigdata/src/test/com/bigdata/rwstore/TestRWJournal.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java Added Paths: ----------- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/CompressorRegistry.java branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/GZipCompressor.java branches/READ_CACHE/bigdata/src/test/com/bigdata/io/compression/TestCompressorRegistry.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3WORMJournalServer.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -347,26 +347,19 @@ @Override public FileChannel reopenChannel() throws IOException { - - final Lock lock = m_stateLock.readLock(); - - lock.lock(); - - try { - + final Lock lock = m_stateLock.readLock(); + lock.lock(); + try { if (m_state == null || m_state.m_channel == null || !m_state.m_channel.isOpen()) { throw new IOException("Closed"); - + } - + return m_state.m_channel; - } finally { - - lock.unlock(); - + lock.unlock(); } } @@ -527,6 +520,7 @@ } switch (m_rootBlock.getStoreType()) { + case WORM: case RW: { /* * Write the WriteCache block on the channel. @@ -538,12 +532,7 @@ FileChannelUtility.writeAll(reopener, data.duplicate(), m_position); m_position += nbytes; - } - case WORM: { - /* - * We will use the HA failover read API to recover the block - * from a node in the quorum when we need to replay the HA log. - */ + break; } default: @@ -776,14 +765,12 @@ }; private FileState(final File file, final StoreTypeEnum storeType) - throws FileNotFoundException { - + throws FileNotFoundException { m_haLogFile = file; m_storeType = storeType; m_raf = new RandomAccessFile(m_haLogFile, "rw"); m_channel = m_raf.getChannel(); m_accessors = 1; // the writer is a reader also - } /** @@ -828,11 +815,11 @@ m_raf.close(); } } finally { - // wake up anyone waiting. - this.notifyAll(); - } - } + // wake up anyone waiting. + this.notifyAll(); + } } + } public void addRecord() { synchronized (this) { @@ -879,72 +866,66 @@ } } - /** - * - * @param record - * - the next sequence required - */ + /** + * + * @param record + * - the next sequence required + */ /* * TODO We should support wait up to a timeout here to make the API more * pleasant. */ public void waitOnStateChange(final long record) { - synchronized (this) { - + synchronized (this) { // Condition variable. while (m_records < record && !m_committed) { if (!isOpen()) { - // Provable nothing left to read. - return; + return; + } - } + try { + wait(); + } catch (InterruptedException e) { + + // Propagate the interrupt. + Thread.currentThread().interrupt(); + + return; + + } - try { + } - wait(); + } - } catch (InterruptedException e) { - - // Propagate the interrupt. - Thread.currentThread().interrupt(); - - return; - - } - - } - - } - } } // class FileState static class OpenHALogReader implements IHALogReader { - private final FileState m_state; private long m_record = 0L; private long m_position = headerSize0; // initial position - + /** <code>true</code> iff this reader is open. */ private final AtomicBoolean open = new AtomicBoolean(true); - OpenHALogReader(final FileState state) { + OpenHALogReader(final FileState state) { if (state == null) throw new IllegalArgumentException(); - m_state = state; + m_state = state; // Note: Must be synchronized for visibility and atomicity! synchronized (m_state) { - m_state.m_accessors++; + m_state.m_accessors++; } @@ -988,7 +969,7 @@ if (!isOpen()) return false; - + synchronized (m_state) { /* @@ -998,13 +979,13 @@ if (!m_state.isOpen()) return false; - if (m_state.isCommitted() && m_state.recordCount() <= m_record) - return false; + if (m_state.isCommitted() && m_state.recordCount() <= m_record) + return false; - if (m_state.recordCount() > m_record) - return true; + if (m_state.recordCount() > m_record) + return true; - m_state.waitOnStateChange(m_record + 1); + m_state.waitOnStateChange(m_record + 1); } @@ -1018,33 +999,27 @@ return open.get(); } - + @Override public boolean isEmpty() { - - return m_state.isEmpty(); - + return m_state.isEmpty(); } - @Override + @Override public IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) - throws IOException { + throws IOException { final IHAWriteMessage msg; synchronized (m_state) { + final long savePosition = m_state.m_channel.position(); + m_state.m_channel.position(m_position); - final long savePosition = m_state.m_channel.position(); - - m_state.m_channel.position(m_position); - msg = HALogReader.processNextBuffer(m_state.m_raf, m_state.reopener, m_state.m_storeType, clientBuffer); m_position = m_state.m_channel.position(); - m_state.m_channel.position(savePosition); - } m_record++; @@ -1053,7 +1028,7 @@ } @Override - public void close() throws IOException { + public void close() throws IOException { // Note: this pattern prevents a double-close of a reader. if (open.compareAndSet(true/* expected */, false/* newValue */)) { @@ -1082,20 +1057,18 @@ } - m_state.close(); - - } - - } - + m_state.close(); + } } + } + @Override public void computeDigest(final MessageDigest digest) throws DigestException, IOException { HALogReader.computeDigest(m_state.reopener, digest); - + } } // class OpenHAReader Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -77,7 +77,7 @@ * same file and one is closed, then other will remain open. Likewise, if a * reader is open for the live HALog file, closing the writer will not close * the reader and closing the reader will not close the writer. - */ + */ void close() throws IOException; /** @@ -107,7 +107,7 @@ */ IRootBlockView getClosingRootBlock() throws IOException; - /** + /** * Checks whether we have reached the end of the file (blocking). * <p> * Note: This method will block if this is the live HALog. This allows a @@ -127,7 +127,7 @@ * * @throws IOException * if there is an error reading from the backing file. - */ + */ boolean hasMoreBuffers() throws IOException; /** Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAWriteMessage.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -30,8 +30,13 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import java.nio.ByteBuffer; import java.util.UUID; +import org.apache.log4j.Logger; + +import com.bigdata.io.compression.CompressorRegistry; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.journal.StoreTypeEnum; /** @@ -44,6 +49,8 @@ public class HAWriteMessage extends HAWriteMessageBase implements IHAWriteMessage { + protected static final Logger log = Logger.getLogger(HAWriteMessage.class); + /** * */ @@ -61,8 +68,11 @@ /** The write sequence since last commit beginning at zero */ private long sequence; - /** The type of backing store (RW or WORM). */ + /** The type of backing store (RW or WORM). */ private StoreTypeEnum storeType; + + /** Indicates if data is compressed (if included in file). */ + private String compressorKey; /** The quorum token for which this message is valid. */ private long quorumToken; @@ -102,6 +112,8 @@ return sequence; } + + /* (non-Javadoc) * @see com.bigdata.journal.ha.IHAWriteMessage#getStoreType() */ @@ -134,8 +146,13 @@ return firstOffset; } - public String toString() { + @Override + public String getCompressorKey() { + return compressorKey; + } + public String toString() { + return getClass().getName() // + "{size=" + getSize() // + ",chksum=" + getChk() // @@ -144,6 +161,7 @@ + ",commitTime=" + lastCommitTime // + ",sequence=" + sequence // + ",storeType=" + getStoreType() // + + ",compressorKey=" + getCompressorKey() // + ",quorumToken=" + getQuorumToken()// + ",fileExtent=" + getFileExtent() // + ",firstOffset=" + getFirstOffset() // @@ -186,6 +204,8 @@ * The length of the backing file on the disk. * @param firstOffset * The file offset at which the data will be written (WORM only). + * + * @deprecated by the version that accepts the compressor key. */ public HAWriteMessage(final UUID uuid, final long commitCounter, final long commitTime, final long sequence, final int sze, @@ -193,6 +213,52 @@ final long quorumToken, final long fileExtent, final long firstOffset) { + this(uuid, commitCounter, commitTime, sequence, sze, chk, storeType, + quorumToken, fileExtent, firstOffset, null/* compressorKey */); + + } + + /** + * @param uuid + * The {@link UUID} associated with the backing store on the + * leader. This can be used to decide whether the message is for + * a given store, or (conversly) whether the receiver has already + * setup its root blocks based on the leader (and hence has the + * correct {@link UUID} for its local store). + * @param commitCounter + * The commit counter for the current root block for the write + * set which is being replicated by this message. + * @param commitTime + * The commit time for the current root block for the write set + * which is being replicated by this message. + * @param sequence + * The write cache block sequence number. This is reset to ZERO + * (0) for the first replicated write cache block in each write + * set. + * @param sze + * The #of bytes in the payload. + * @param chk + * The checksum of the payload. + * @param storeType + * The type of backing store (RW or WORM). + * @param quorumToken + * The quorum token for which this message is valid. + * @param fileExtent + * The length of the backing file on the disk. + * @param firstOffset + * The file offset at which the data will be written (WORM only). + * @param compressorKey + * The key under which an {@link IRecordCompressor} has been + * registered against the {@link CompressorRegistry} -or- + * <code>null</code> for no compression. + */ + public HAWriteMessage(final UUID uuid, final long commitCounter, + final long commitTime, final long sequence, final int sze, + final int chk, final StoreTypeEnum storeType, + final long quorumToken, final long fileExtent, + final long firstOffset, + final String compressorKey) { + super(sze, chk); if (uuid == null) @@ -217,6 +283,8 @@ this.firstOffset = firstOffset; + this.compressorKey = compressorKey; + } /** @@ -235,10 +303,31 @@ private static final byte VERSION1 = 0x1; /** + * Supports optional data compression for the payload (backwards compatible + * default for {@link #VERSION1} is no compression). + */ + private static final byte VERSION2 = 0x2; + + /** * The current version. */ - private static final byte currentVersion = VERSION1; + private static final byte currentVersion = VERSION2; // VERSION2; + /** + * Determine whether message data is compressed + */ + private static boolean compressData = true; // default + + /** + * Static method to indicate whether the message will reference + * compressed data. + * + * @return + */ + public static boolean isDataCompressed() { + return compressData; + } + @Override public boolean equals(final Object obj) { @@ -268,11 +357,17 @@ ClassNotFoundException { super.readExternal(in); - final byte version = in.readByte(); + + final byte version = in.readByte(); switch (version) { case VERSION0: uuid = null; // Note: not available. break; + case VERSION2: { + final boolean isNull = in.readBoolean(); + compressorKey = isNull ? null : in.readUTF(); + // fall through. + } case VERSION1: uuid = new UUID(// in.readLong(), // MSB @@ -295,6 +390,11 @@ super.writeExternal(out); if (currentVersion >= VERSION1 && uuid != null) { out.write(currentVersion); + if (currentVersion >= VERSION2) { + out.writeBoolean(compressorKey == null); + if (compressorKey != null) + out.writeUTF(compressorKey); + } out.writeLong(uuid.getMostSignificantBits()); out.writeLong(uuid.getLeastSignificantBits()); } else { @@ -310,4 +410,61 @@ out.writeLong(firstOffset); } +// // Versions of compress/expand with Deflator using RecordCompressor +// static IRecordCompressor compressor = CompressorRegistry.fetch(CompressorRegistry.DEFLATE_BEST_SPEED); +// static String compressorKey = CompressorRegistry.DEFLATE_BEST_SPEED; + +// /** +// * This configuration method has a dual role since if the Deflater is configured +// * with NO_COMPRESSION, the message indicates directly that the buffer is not compressed +// * avoiding the double buffering of the Deflater class. +// * +// * Note that the strategy is only applicable for the compression, the expansion is +// * determined by the source data. +// */ +// public static void setCompression(final String strategy) { +// compressorKey = strategy; +// compressor = CompressorRegistry.fetch(strategy); +// } +// +// public ByteBuffer compress(final ByteBuffer buffer) { +// +// final IRecordCompressor compressor = CompressorRegistry.getInstance() +// .get(compressionMethod); +// +// if (compressor == null) +// throw new UnsupportedOperationException("Unknown compressor: " +// + compressionMethod); +// +// return compressor.compress(buffer); +// } + + public ByteBuffer expand(final ByteBuffer buffer) { + + final String compressorKey = getCompressorKey(); + + if (compressorKey == null) { + + /* + * No compression. + */ + + return buffer; + + } + + final IRecordCompressor compressor = CompressorRegistry.getInstance() + .get(compressorKey); + + if (compressor == null) + throw new UnsupportedOperationException("Unknown compressor: " + + compressorKey); + + return compressor.decompress(buffer); + + } + + // public static IRecordCompressor getCompressor() { + // return compressor; + // } } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAWriteMessage.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAWriteMessage.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAWriteMessage.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -23,8 +23,10 @@ */ package com.bigdata.ha.msg; +import java.nio.ByteBuffer; import java.util.UUID; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.journal.StoreTypeEnum; /** @@ -51,6 +53,17 @@ */ long getSequence(); + /** + * Applies associated {@link IRecordCompressor} (if any) to decompress the + * data + */ + ByteBuffer expand(ByteBuffer bin); + + /** + * Return the associated {@link IRecordCompressor} key (if any). + */ + String getCompressorKey(); + /** The type of backing store (RW or WORM). */ StoreTypeEnum getStoreType(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/pipeline/HAReceiveService.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -1076,7 +1076,7 @@ + ", number of reads: " + reads + ", buffer: " + localBuffer); - if (message.getChk() != (int) chk.getValue()) { + if (message.getChk() != (int) chk.getValue()) { throw new ChecksumError("msg=" + message.toString() + ", actual=" + chk.getValue()); } Added: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/CompressorRegistry.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/CompressorRegistry.java (rev 0) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/CompressorRegistry.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -0,0 +1,120 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.io.compression; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.zip.Deflater; + +/** + * Registration pattern for {@link IRecordCompressor} implementations. + * + * @author Martyn Cutcher + */ +public class CompressorRegistry { + + /** + * Key for {@link Deflater} compression with BEST SPEED. + * + * @see RecordCompressor + */ + final public static String DEFLATE_BEST_SPEED = "DBS"; + + /** + * Key for {@link Deflater} compression with BEST COMPRESSION. + * + * @see RecordCompressor + */ + final public static String DEFLATE_BEST_COMPRESSION = "DBC"; + + /** + * Key for GZIP compression. + * + * @see GZipCompressor + */ + final public static String GZIP = "GZIP"; + + /** + * Key for no compression. + * <p> + * Note: <code>null</code> is more efficient than the + * {@link NOPRecordCompressor} since it avoids all copy for all + * {@link IRecordCompressor} methods. + * + * @see NOPRecordCompressor + */ + final public static String NOP = "NOP"; + + private static CompressorRegistry DEFAULT = new CompressorRegistry(); + + static public CompressorRegistry getInstance() { + + return DEFAULT; + + } + + final private ConcurrentHashMap<String, IRecordCompressor> compressors = new ConcurrentHashMap<String, IRecordCompressor>(); + + private CompressorRegistry() { + add(DEFLATE_BEST_SPEED, new RecordCompressor(Deflater.BEST_SPEED)); + add(DEFLATE_BEST_COMPRESSION, new RecordCompressor(Deflater.BEST_COMPRESSION)); + add(GZIP, new GZipCompressor()); + add(NOP, new NOPRecordCompressor()); + } + + /** + * Global hook to allow customized compression strategies + * + * @param key + * @param compressor + */ + public void add(final String key, final IRecordCompressor compressor) { + + if (compressors.putIfAbsent(key, compressor) != null) { + + throw new UnsupportedOperationException("Already declared: " + key); + + } + + } + + /** + * Return the {@link IRecordCompressor} registered under that key (if any). + * + * @param key + * The key (optional - may be <code>null</code>). + * @return The {@link IRecordCompressor} -or- <code>null</code> if the key + * is <code>null</code> or if there is nothing registered under that + * key. + */ + public IRecordCompressor get(final String key) { + + if (key == null) + return null; + + return compressors.get(key); + + } + +} Added: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/GZipCompressor.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/GZipCompressor.java (rev 0) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/GZipCompressor.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -0,0 +1,148 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2013. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +package com.bigdata.io.compression; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +import com.bigdata.io.ByteBufferInputStream; +import com.bigdata.io.ByteBufferOutputStream; + +public class GZipCompressor implements IRecordCompressor { + + @Override + public void compress(final ByteBuffer bin, final ByteBuffer out) { + + compress(bin, new ByteBufferOutputStream(out)); + + } + + @Override + public ByteBuffer compress(final ByteBuffer bin) { + + final ByteArrayOutputStream os = new ByteArrayOutputStream(); + + compress(bin, os); + + return ByteBuffer.wrap(os.toByteArray()); + } + + @Override + public void compress(final ByteBuffer bin, final OutputStream os) { + try { + final GZIPOutputStream gzout = new GZIPOutputStream(os); + final DataOutputStream dout = new DataOutputStream(gzout); + + // First write the length of the expanded data + dout.writeInt(bin.limit()); + if (bin.hasArray()) { + dout.write(bin.array()); + } else { + final byte[] tbuf = new byte[bin.limit()]; + bin.get(tbuf); + dout.write(tbuf); + } + dout.flush(); + gzout.flush(); + + dout.close(); + gzout.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void compress(final byte[] bytes, final OutputStream os) { + compress(bytes, 0, bytes.length, os); + } + + @Override + public void compress(final byte[] bytes, final int off, final int len, + final OutputStream os) { + try { + final GZIPOutputStream gzout = new GZIPOutputStream(os); + final DataOutputStream dout = new DataOutputStream(gzout); + + // First write the length of the expanded data + dout.writeInt(len); + dout.write(bytes, off, len); + + dout.flush(); + gzout.flush(); + + dout.close(); + gzout.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public ByteBuffer decompress(final ByteBuffer bin) { + final InputStream instr; + if (bin.hasArray()) { + instr = new ByteArrayInputStream(bin.array()); + } else { + instr = new ByteBufferInputStream(bin); + } + + return decompress(instr); + } + + @Override + public ByteBuffer decompress(final byte[] bin) { + return decompress(new ByteArrayInputStream(bin)); + } + + public ByteBuffer decompress(final InputStream instr) { + try { + final GZIPInputStream gzin = new GZIPInputStream(instr); + final DataInputStream din = new DataInputStream(gzin); + + final int length = din.readInt(); + final byte[] xbuf = new byte[length]; + for (int cursor = 0; cursor < length;) { + final int rdlen = din.read(xbuf, cursor, (length - cursor)); + + cursor += rdlen; + + } + + return ByteBuffer.wrap(xbuf); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + +} Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/IRecordCompressor.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/IRecordCompressor.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/IRecordCompressor.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -40,6 +40,32 @@ public interface IRecordCompressor { /** + * Compresses data onto the provided ByteBuffer. + * + * @param bin + * The data. The data from the position to the limit will be + * compressed. The position will be advanced to the limit as a + * side effect. + * + * @param out + * The ByteBuffer into which the compressed data is written + */ + void compress(final ByteBuffer bin, final ByteBuffer out); + + /** + * Compresses data onto the provided ByteBuffer. + * + * @param bin + * The data. The data from the position to the limit will be + * compressed. The position will be advanced to the limit as a + * side effect. + * + * @return + * The ByteBuffer into which the compressed data is written + */ + ByteBuffer compress(final ByteBuffer bin); + + /** * Compresses data onto the output stream. * * @param bin Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/NOPRecordCompressor.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/NOPRecordCompressor.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/NOPRecordCompressor.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -57,6 +57,14 @@ } + public void compress(ByteBuffer bin, ByteBuffer out) { + out.put(bin); + } + + public ByteBuffer compress(ByteBuffer bin) { + return bin; + } + /** * Writes the buffer on the output stream. */ Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/RecordCompressor.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/RecordCompressor.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/compression/RecordCompressor.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -28,6 +28,7 @@ package com.bigdata.io.compression; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.Externalizable; import java.io.IOException; import java.io.ObjectInput; @@ -41,6 +42,7 @@ import com.bigdata.btree.IndexSegment; import com.bigdata.io.ByteBufferInputStream; +import com.bigdata.io.ByteBufferOutputStream; /** * Bulk data (de-)compressor used for leaves in {@link IndexSegment}s. The @@ -114,8 +116,21 @@ } - public void compress(final ByteBuffer bin, final OutputStream os) { + public void compress(ByteBuffer bin, ByteBuffer out) { + compress(bin, new ByteBufferOutputStream(out)); + } + + public ByteBuffer compress(ByteBuffer bin) { + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + + compress(bin, out); + + return ByteBuffer.wrap(out.toByteArray()); + } + + public void compress(final ByteBuffer bin, final OutputStream os) { + if (bin.hasArray() && bin.position() == 0 && bin.limit() == bin.capacity()) { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -61,6 +61,8 @@ import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; +import com.bigdata.io.compression.CompressorRegistry; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.journal.AbstractBufferStrategy; import com.bigdata.journal.StoreTypeEnum; import com.bigdata.journal.WORMStrategy; @@ -772,6 +774,16 @@ // package private : exposed to WriteTask.call(). int getWholeBufferChecksum(final ByteBuffer checksumBuffer) { + final ByteBuffer src = peek().duplicate(); + // flip(limit=pos;pos=0) + src.flip(); + + return getWholeBufferChecksum(checksumBuffer, src); + + } + + int getWholeBufferChecksum(final ByteBuffer checksumBuffer, final ByteBuffer src) { + if (checker == null) throw new UnsupportedOperationException(); @@ -785,17 +797,13 @@ * checksum. */ - final ByteBuffer b = peek().duplicate(); - // flip(limit=pos;pos=0) - b.flip(); - - assert checksumBuffer.capacity() == b.capacity() : "b.capacity=" - + b.capacity() + ", checksumBuffer.capacity=" + assert checksumBuffer.capacity() == src.capacity() : "b.capacity=" + + src.capacity() + ", checksumBuffer.capacity=" + checksumBuffer.capacity(); checksumBuffer.limit(checksumBuffer.capacity()); checksumBuffer.position(0); - checksumBuffer.put(b); + checksumBuffer.put(src); checksumBuffer.flip(); checker.reset(); @@ -1526,30 +1534,132 @@ } +// /** +// * Return the RMI message object that will accompany the payload from the +// * {@link WriteCache} when it is replicated along the write pipeline. +// * +// * @return cache A {@link WriteCache} to be replicated. +// */ +// final IHAWriteMessage newHAWriteMessage(// +// final UUID storeUUID, +// final long quorumToken, +// final long lastCommitCounter,// +// final long lastCommitTime,// +// final long sequence, +// final ByteBuffer tmp +// ) { +// +// return new HAWriteMessage(// +// storeUUID,// +// lastCommitCounter,// +// lastCommitTime,// +// sequence, // +// bytesWritten(), getWholeBufferChecksum(tmp), +// prefixWrites ? StoreTypeEnum.RW : StoreTypeEnum.WORM, +// quorumToken, fileExtent.get(), firstOffset.get()); +// +// } + /** - * Return the RMI message object that will accompany the payload from the - * {@link WriteCache} when it is replicated along the write pipeline. - * - * @return cache A {@link WriteCache} to be replicated. + * Used to retrieve the {@link HAWriteMessage} AND the associated + * {@link ByteBuffer}. + * <p> + * This allows the {@link WriteCache} to compress the data and create the + * correct {@link HAWriteMessage}. */ - final IHAWriteMessage newHAWriteMessage(// - final UUID storeUUID, - final long quorumToken, + static public class HAPackage { + + /** + * The message as it will be sent. + */ + private final IHAWriteMessage m_msg; + /** + * The data as it will be sent, with compression already applied if + * compression will be used. + */ + private final ByteBuffer m_data; + + /** + * + * @param msg + * The message as it will be sent. + * @param data + * The data as it will be sent, with compression already + * applied if compression will be used. + */ + HAPackage(final IHAWriteMessage msg, final ByteBuffer data) { + m_msg = msg; + m_data = data; + } + + public IHAWriteMessage getMessage() { + return m_msg; + } + + public ByteBuffer getData() { + return m_data; + } + } + + /** + * Return the optional key for the {@link CompressorRegistry} which + * identifies the {@link IRecordCompressor} to be applied. + */ + protected String getCompressorKey() { + + // Default is NO compression. + return null; + + } + + /** + * Return the RMI message object plus the payload (the payload has been + * optionally compressed, depending on the configuration). + */ + final HAPackage newHAPackage(// + final UUID storeUUID,// + final long quorumToken,// final long lastCommitCounter,// final long lastCommitTime,// - final long sequence, - final ByteBuffer tmp + final long sequence,// + final ByteBuffer checksumBuffer ) { + + final ByteBuffer b = peek().duplicate(); + b.flip(); - return new HAWriteMessage(// + final ByteBuffer send; + + final String compressorKey = getCompressorKey(); + + final IRecordCompressor compressor = CompressorRegistry.getInstance() + .get(compressorKey); + + if (compressor != null) { + + // Compress current buffer + send = compressor.compress(b); + + } else { + + send = b; + + } + + // log.warn("Message, position: " + send.position() + ", limit: " + send.limit()); + + final HAWriteMessage msg = new HAWriteMessage(// storeUUID,// lastCommitCounter,// lastCommitTime,// sequence, // - bytesWritten(), getWholeBufferChecksum(tmp), + send.limit(), getWholeBufferChecksum(checksumBuffer, send.duplicate()), prefixWrites ? StoreTypeEnum.RW : StoreTypeEnum.WORM, - quorumToken, fileExtent.get(), firstOffset.get()); - + quorumToken, fileExtent.get(), firstOffset.get(), + compressorKey); + + return new HAPackage(msg, send); + } /** @@ -1829,6 +1939,8 @@ recordMap.clear(); final int limit = buf.limit(); // end position. int pos = buf.position(); // start position + + // log.trace("position: " + pos + ", limit: " + limit); while (pos < limit) { buf.position(pos); // 8 bytes (negative iff record is deleted) @@ -1839,7 +1951,10 @@ assert recordLength != 0; // 4 bytes final int latchedAddr = buf.getInt(); -// if (sze == 0 /* old style deleted */) { + + // log.trace("Record fileOffset: " + fileOffset + ", length: " + recordLength + ", latchedAddr: " + latchedAddr); + + // if (sze == 0 /* old style deleted */) { // /* // * Should only happen if a previous write was already made // * to the buffer but the allocation has since been freed. Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -62,7 +62,6 @@ import com.bigdata.counters.CounterSet; import com.bigdata.ha.HAPipelineGlue; import com.bigdata.ha.QuorumPipeline; -import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; @@ -544,7 +543,18 @@ this.useChecksum = useChecksum; - this.compactionEnabled = false;//canCompact() && compactionThreshold < 100; + /** + * FIXME WCS compaction fails! + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/674" > + * WCS write cache compaction causes errors in RWS postHACommit() + * </a> + */ + this.compactionEnabled = false; //canCompact() && compactionThreshold < 100; + + if (log.isInfoEnabled()) + log.info("Compaction Enabled: " + compactionEnabled + + " @ threshold=" + compactionThreshold); // this.opener = opener; @@ -1376,11 +1386,6 @@ * non-final follower will receiveAndReplicate the write cache * buffer. The last follower will receive the buffer. */ - // duplicate the write cache's buffer. - final ByteBuffer b = cache.peek().duplicate(); - // flip(limit=pos;pos=0) - b.flip(); - assert b.remaining() > 0 : "Empty cache: " + cache; // send to 1st follower. @SuppressWarnings("unchecked") @@ -1389,7 +1394,7 @@ assert quorumMember != null : "Not quorum member?"; - final IHAWriteMessage msg = cache.newHAWriteMessage(// + final WriteCache.HAPackage pkg = cache.newHAPackage(// quorumMember.getStoreUUID(),// quorumToken,// quorumMember.getLastCommitCounter(),// @@ -1398,6 +1403,8 @@ checksumBuffer ); + assert pkg.getData().remaining() > 0 : "Empty cache: " + cache; + /* * Start the remote asynchronous IO before the local synchronous * IO. @@ -1413,11 +1420,11 @@ * then clean up the documentation here (see the commented * out version of this line below). */ - quorumMember.logWriteCacheBlock(msg, b.duplicate()); + quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); // ASYNC MSG RMI + NIO XFER. - remoteWriteFuture = quorumMember.replicate(null/* req */, msg, - b.duplicate()); + remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(), + pkg.getData().duplicate()); counters.get().nsend++; @@ -1468,7 +1475,7 @@ } } // writeCacheBlock() - + } // class WriteTask /** @@ -3835,6 +3842,23 @@ } /** + * Debug method to verify that the {@link WriteCacheService} has flushed all + * {@link WriteCache} buffers. + * + * @return whether there are no outstanding writes buffered + */ + public boolean isFlushed() { + + final boolean clear = + dirtyList.size() == 0 + && compactingCacheRef.get() == null + && (current.get() == null || current.get().isEmpty()); + + return clear; + + } + + /** * An array of writeCache actions is maintained that can be used * to provide a breadcrumb of how that address has been written, saved, * freed or removed. Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractBufferStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -676,4 +676,15 @@ return false; } +// +// /** +// * {@inheritDoc} +// * <p> +// * Note: By default there is no WriteCache to buffer any writes +// * +// * @return <code>true</code> unless overridden. +// */ +// public boolean isFlushed() { +// return true; +// } } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -3274,12 +3274,31 @@ final long nextOffset = _bufferStrategy.getNextOffset(); final long blockSequence; + if (_bufferStrategy instanceof IHABufferStrategy) { + // always available for HA. blockSequence = ((IHABufferStrategy) _bufferStrategy) .getBlockSequence(); + + if (!((IHABufferStrategy) _bufferStrategy) + .getWriteCacheService().isFlushed()) { + + /** + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/674" + * > WCS write cache compaction causes errors in RWS + * postHACommit() </a> + */ + + throw new AssertionError(); + + } + } else { + blockSequence = old.getBlockSequence(); + } /* Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/DiskBackedBufferStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/DiskBackedBufferStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/DiskBackedBufferStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -433,5 +433,6 @@ public void setNextOffset(long lastOffset) { // void for default DiskBackedBufferStrategy } - + + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IBufferStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -276,4 +276,9 @@ */ public boolean useChecksums(); +// /** +// * Determines whether there are outstanding writes to the underlying store +// */ +// public boolean isFlushed(); + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/IHABufferStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -275,4 +275,9 @@ Quorum<HAGlue, QuorumService<HAGlue>> quorum, long token) throws IOException, QuorumException; + /** + * Return the {@link WriteCacheService} (mainly for debugging). + */ + WriteCacheService getWriteCacheService(); + } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Options.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -36,6 +36,8 @@ import com.bigdata.cache.HardReferenceQueue; import com.bigdata.io.DirectBufferPool; import com.bigdata.io.FileLockUtility; +import com.bigdata.io.compression.CompressorRegistry; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.io.writecache.WriteCache; import com.bigdata.io.writecache.WriteCache.ReadCache; import com.bigdata.io.writecache.WriteCacheService; @@ -332,6 +334,21 @@ // String WRITE_CACHE_CAPACITY = AbstractJournal.class.getName()+".writeCacheCapacity"; /** + * Optional {@link IRecordCompressor} strategy for the + * {@link WriteCacheService} in support of compressed payloads for + * replicated messages and compressed HALogs (default + * {@value #DEFAULT_HALOG_COMPRESSOR}). The value is a <code>key</code> + * declared to the {@link CompressorRegistry}. + * + * @see CompressorRegistry + * + * @see <a href="https://sourceforge.net/apps/trac/bigdata/ticket/652" > + * Compress write cache blocks for replication and in HALogs </a> + */ + String HALOG_COMPRESSOR = "HALogCompressor"; + String DEFAULT_HALOG_COMPRESSOR = null;//FIXME Change default: CompressorRegistry.DEFLATE_BEST_SPEED; + + /** * The initial extent of the journal (bytes). When the journal is backed by * a file, this is the initial length of that file. The initial user extent * is typically slightly smaller as the head of the file contains some Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -47,6 +47,7 @@ import com.bigdata.ha.msg.IHARebuildRequest; import com.bigdata.ha.msg.IHAWriteMessage; import com.bigdata.io.IBufferAccess; +import com.bigdata.io.writecache.WriteCacheService; import com.bigdata.mdi.IResourceMetadata; import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; @@ -894,6 +895,15 @@ m_store.postHACommit(rootBlock); } + @Override + public WriteCacheService getWriteCacheService() { + return m_store.getWriteCacheService(); + } + +// @Override +// public boolean isFlushed() { +// return m_store.isFlushed(); +// } // private int m_rebuildSequence = -1; // // @Override Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -63,6 +63,8 @@ import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; +import com.bigdata.io.compression.CompressorRegistry; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.io.writecache.IBackingReader; import com.bigdata.io.writecache.WriteCache; import com.bigdata.io.writecache.WriteCacheCounters; @@ -217,6 +219,11 @@ */ private volatile WORMWriteCacheService writeCacheService; + @Override + public WORMWriteCacheService getWriteCacheService() { + return writeCacheService; + } + /** * <code>true</code> iff the backing store has record level checksums. */ @@ -252,6 +259,14 @@ private final int hotCacheSize; /** + * The key for the {@link CompressorRegistry} which identifies the + * {@link IRecordCompressor} to be applied (optional). + * + * @see com.bigdata.journal.Options#HALOG_COMPRESSOR + */ + private final String compressorKey; + + /** * <code>true</code> if the backing store will be used in an HA * {@link Quorum} (this is passed through to the {@link WriteCache} objects * which use this flag to conditionally track the checksum of the entire @@ -951,6 +966,10 @@ com.bigdata.journal.Options.HOT_CACHE_SIZE, com.bigdata.journal.Options.DEFAULT_HOT_CACHE_SIZE)); + this.compressorKey = fileMetadata.getProperty( + com.bigdata.journal.Options.HALOG_COMPRESSOR, + com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR); + isHighlyAvailable = quorum != null && quorum.isHighlyAvailable(); final boolean useWriteCacheService = fileMetadata.writeCacheEnabled @@ -1002,7 +1021,8 @@ public WriteCacheImpl newWriteCache(final IBufferAccess buf, final boolean useChecksum, final boolean bufferHasData, final IReopenChannel<? extends Channel> opener, - final long fileExtent) throws InterruptedException { + final long fileExtent) + throws InterruptedException { return new WriteCacheImpl(0/* baseOffset */, buf, useChecksum, bufferHasData, (IReopenChannel<FileChannel>) opener, @@ -1034,6 +1054,13 @@ } + @Override + protected String getCompressorKey() { + + return compressorKey; + + } + /** * {@inheritDoc} * <p> @@ -2480,12 +2507,18 @@ public void writeRawBuffer(final IHAWriteMessage msg, final IBufferAccess b) throws IOException, InterruptedException { + // FIXME Must EXPAND() iff message is compressed. + /* * Wrap up the data from the message as a WriteCache object. This will * build up a RecordMap containing the allocations to be made, and * including a ZERO (0) data length if any offset winds up being deleted * (released). - */ + * + * Note: We do not need to pass in the compressorKey here. It is ignored + * by WriteCache.flush(). We have expanded the payload above. Now we are + * just flushing the write cache onto the disk. + */ final WriteCacheImpl writeCache = writeCacheService.newWriteCache(b, useChecksums, true/* bufferHasData */, opener, msg.getFileExtent()); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -345,7 +345,7 @@ // assert block.releaseSession(m_store.m_writeCache) == 0; // clear out writes - FIXME is releaseSession okay - block.releaseCommitWrites(m_store.m_writeCacheService); + block.releaseCommitWrites(m_store.getWriteCacheService()); // Moved to postCommit() // block.m_transients = block.m_live.clone(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-05-23 19:25:57 UTC (rev 7160) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2013-05-24 17:23:19 UTC (rev 7161) @@ -50,6 +50,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; @@ -81,6 +82,8 @@ import com.bigdata.io.FileChannelUtility; import com.bigdata.io.IBufferAccess; import com.bigdata.io.IReopenChannel; +import com.bigdata.io.compression.CompressorRegistry; +import com.bigdata.io.compression.IRecordCompressor; import com.bigdata.io.writecache.BufferedWrite; import com.bigdata.io.writecache.IBackingReader; import com.bigdata.io.writecache.IBufferedWriter; @@ -493,13 +496,36 @@ private final int m_hotCacheSize; /** + * The key for the {@link CompressorRegistry} which identifies the + * {@link IRecordCompressor} to be applied (optional). + * + * @see com.bigdata.journal.Options#HALOG_COMPRESSOR + */ + private final String m_compressorKey; + + /** * Note: This is not final because we replace the {@link WriteCacheService} * during {@link #reset(long)} in order to propagate the then current quorum * token to the {@link WriteCacheService}. */ - RWWriteCacheService m_writeCacheService; + private RWWriteCacheService m_writeCacheService; /** + * Return the then current {@link WriteCacheService} object. + * + * @see IHABufferStrategy#getWriteCacheService() + */ + public RWWriteCacheService getWriteCacheService() { + m_allocationReadLock.lock(); + try { + return m_writeCacheService; + } finally { + m_allocationReadLock.unlock(); + } + + } + + /** * The actual allocation sizes as read from the store. * * @see #DEFAULT_ALLOCATION_SIZES @@ -650,11 +676,14 @@ private ConcurrentHashMap<Integer, Long> m_lockAddresses = null; class WriteCacheImpl extends WriteCache.FileChannelScatteredWriteCache { + + final private String compressorKey; + public WriteCacheImpl(final IBufferAcces... [truncated message content] |
From: <tho...@us...> - 2013-05-23 19:26:04
|
Revision: 7160 http://bigdata.svn.sourceforge.net/bigdata/?rev=7160&view=rev Author: thompsonbry Date: 2013-05-23 19:25:57 +0000 (Thu, 23 May 2013) Log Message: ----------- javadoc Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-23 14:28:13 UTC (rev 7159) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-23 19:25:57 UTC (rev 7160) @@ -159,11 +159,13 @@ /** * {@inheritDoc} - * <p> - * Note: This was added to address a file handle leak. However, I am quite + * + * TODO This was added to address a file handle leak. However, I am quite * dubious that this will fix the problem. While GC may be necessary to * finalize {@link HALogReader} instances during a RESYNC, we have already - * invoked {@link #close()} on those instances in the SendHALogTask(). + * invoked {@link #close()} on those instances in the SendHALogTask(). It + * may be better to remove this since finalize() methods add overhead to + * GC. * * @see <a * href="https://sourceforge.net/apps/trac/bigdata/ticket/678#comment:4" This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-23 14:28:22
|
Revision: 7159 http://bigdata.svn.sourceforge.net/bigdata/?rev=7159&view=rev Author: thompsonbry Date: 2013-05-23 14:28:13 +0000 (Thu, 23 May 2013) Log Message: ----------- Additional tests and bug fixes for HALog support. However, the open file handle leak appears to be a possible JVM/OS bug. Some potentially relevant links are given below. The file handles are definitly NOT pinned by Java classes. I have added a finalize() method to HALogReader in case we are somehow failing to close() the historical HALog files in SendHALogTask, but I rate this as very unlikely to fix the problem. The readers are closed in a try{} finally{} pattern by SendHALogTask. A workaround is to increase the number of open files using ulimit or to restart the leader when it runs out of file handles. However, I suspect a JVM/OS bug. See below for some possibly relevant links: See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7118373 Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-22 21:00:02 UTC (rev 7158) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-23 14:28:13 UTC (rev 7159) @@ -157,6 +157,24 @@ } + /** + * {@inheritDoc} + * <p> + * Note: This was added to address a file handle leak. However, I am quite + * dubious that this will fix the problem. While GC may be necessary to + * finalize {@link HALogReader} instances during a RESYNC, we have already + * invoked {@link #close()} on those instances in the SendHALogTask(). + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/678#comment:4" + * > DGC Thread Leak: sendHALogForWriteSet() </a> + */ + @Override + protected void finalize() throws Throwable { + close(); + super.finalize(); + } + /** * Hook for * {@link FileChannelUtility#readAll(FileChannel, ByteBuffer, long)} @@ -241,7 +259,8 @@ @Override public boolean hasMoreBuffers() throws IOException { - assertOpen(); + if (!isOpen()) + return false; if (isEmpty()) { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-22 21:00:02 UTC (rev 7158) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-23 14:28:13 UTC (rev 7159) @@ -502,8 +502,8 @@ throw new IllegalStateException("nextSequence=" + m_nextSequence + ", but msg=" + msg); - if (haLog.isInfoEnabled()) - haLog.info("msg=" + msg + ", position=" + m_position); + if (haLog.isDebugEnabled()) + haLog.debug("msg=" + msg + ", position=" + m_position); if (m_position < headerSize0) throw new AssertionError("position=" + m_position @@ -631,9 +631,8 @@ * Conditional remove iff file is open. Will not remove * something that has been closed. */ + m_state.forceCloseAll(); - m_state.m_channel.close(); - if (m_state.m_haLogFile.exists() && !m_state.m_haLogFile.delete()) { /* @@ -787,6 +786,25 @@ } + /** + * Force an actual close of the backing file (used when we will remove a + * file). + * <p> + * Note: Any readers on the closed file will notice the close and fail + * if they are blocking on a read. + * + * TODO In fact, we probably should have the set of + * {@link OpenHALogReader}s so we can explicitly close them. See + * {@link OpenHALogReader#close()}. + */ + public void forceCloseAll() throws IOException { + synchronized (this) { + if (m_accessors > 0) + m_accessors = 1; + close(); + } + } + public void close() throws IOException { synchronized (this) { try { @@ -842,12 +860,25 @@ } } + /** + * FIXME The API states that IHALogReader.isEmpty() reports true until + * the closing root block is laid down. However, this isEmpty() + * implementation does not adhere to those semantics. Review how (and + * if) isEmpty() is used for the live HALog and then fix either the API + * or this method. + */ public boolean isEmpty() { synchronized(this) { return m_committed && m_records == 0; } } + public boolean isOpen() { + synchronized (this) { + return m_accessors != 0 && m_raf.getChannel().isOpen(); + } + } + /** * * @param record @@ -857,31 +888,37 @@ * TODO We should support wait up to a timeout here to make the API more * pleasant. */ - public void waitOnStateChange(final long record) { - - synchronized (this) { - - if (m_records >= record) { - - return; - - } + public void waitOnStateChange(final long record) { - try { + synchronized (this) { - wait(); - - } catch (InterruptedException e) { - - // Propagate the interrupt. - Thread.currentThread().interrupt(); - - return; - - } + // Condition variable. + while (m_records < record && !m_committed) { - } + if (!isOpen()) { + // Provable nothing left to read. + return; + + } + + try { + + wait(); + + } catch (InterruptedException e) { + + // Propagate the interrupt. + Thread.currentThread().interrupt(); + + return; + + } + + } + + } + } } // class FileState @@ -947,19 +984,30 @@ } @Override - public boolean hasMoreBuffers() throws IOException { + public boolean hasMoreBuffers() { if (!isOpen()) return false; - - if (m_state.isCommitted() && m_state.recordCount() <= m_record) - return false; - if (m_state.recordCount() > m_record) - return true; + synchronized (m_state) { - m_state.waitOnStateChange(m_record + 1); + /* + * Note: synchronized(FileState) makes these decisions atomic. + */ + + if (!m_state.isOpen()) + return false; + + if (m_state.isCommitted() && m_state.recordCount() <= m_record) + return false; + if (m_state.recordCount() > m_record) + return true; + + m_state.waitOnStateChange(m_record + 1); + + } + return hasMoreBuffers(); // tail recursion. } @@ -978,9 +1026,9 @@ } - @Override - public IHAWriteMessage processNextBuffer(ByteBuffer clientBuffer) - throws IOException { + @Override + public IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) + throws IOException { final IHAWriteMessage msg; @@ -1013,7 +1061,30 @@ /* * Close an open reader. */ - m_state.close(); + synchronized(m_state) { + + if(m_state.m_accessors == 0) { + + /** + * TODO This is a bit of a hack. The problem is that + * disableHALog() can force the close of all open + * readers. This is noticed by the FileState, but the + * OpenHALogReader itself does not know that it is + * "closed" (it's open flag has not been cleared). We + * could "fix" this by keeping an explicit set of the + * open readers for the live HALog and then invoking + * OpenHALogReader.close() on each of them in + * forceCloseAll(). + * + * @see forceCloseAll() + */ + return; + + } + + m_state.close(); + + } } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-22 21:00:02 UTC (rev 7158) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-23 14:28:13 UTC (rev 7159) @@ -118,6 +118,15 @@ * decision can be made deterministically by inspecting the #of messages * available (in the closing root block) and the #of messages consumed by * the reader. + * + * @return Return <code>false</code> if (a) the file is closed on entry to + * this method; (b) the live HALog is closed while waiting for more + * data to become available; or (c) the end of a historical HALog + * file has been reached. Return <code>true</code> iff more data can + * be read from the file. + * + * @throws IOException + * if there is an error reading from the backing file. */ boolean hasMoreBuffers() throws IOException; Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-22 21:00:02 UTC (rev 7158) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-23 14:28:13 UTC (rev 7159) @@ -34,6 +34,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import junit.framework.TestCase2; @@ -675,6 +677,180 @@ } /** + * A unit test where the reader is blocked awaiting more input in + * {@link IHALogReader#hasMoreBuffers()} on the live HALog. The writer is + * closed. The reader should immediately notice this event and return + * <code>false</code>. + */ + public void test_closeLiveLogWithOpenReader() throws IOException, + InterruptedException, ExecutionException { + + final HALogWriter writer = new HALogWriter(logdir); + + try { + + final IRootBlockView openRB = openRBV(StoreTypeEnum.RW); + + assertEquals(StoreTypeEnum.RW, openRB.getStoreType()); + + writer.createLog(openRB); + + final IHALogReader reader = writer.getReader(openRB + .getCommitCounter() + 1); + + try { + + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE.getBufferCapacity()); + + int sequence = 0; + + final ByteBuffer data = randomData(2000); + + final UUID storeUUID = UUID.randomUUID(); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), openRB.getFirstCommitTime(), + sequence, data.limit()/* size */, ChecksumUtility + .getCHK().checksum(data), + openRB.getStoreType(), openRB.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); + + writer.writeOnHALog(msg, data); + + final Future<Void> f = executorService + .submit(new Callable<Void>() { + public Void call() throws Exception { + // should be immediately true. + assertTrue(reader.hasMoreBuffers()); + // read data into reader's buffer. + reader.processNextBuffer(rbuf); + // should block until writer is closed. + assertFalse(reader.hasMoreBuffers()); + // done - success. + return (Void) null; + } + }); + + // Make sure the Futuer is blocked. + try { + f.get(500, TimeUnit.MILLISECONDS); + fail("Reader did not block"); + } catch (TimeoutException ex) { + // ignore expected exception + } + + writer.closeHALog(closeRBV(openRB)); + + // Block and wait for the future. Verify no errors. + f.get(); + + } finally { + + reader.close(); + + } + + } finally { + + writer.disableHALog(); + + } + + // Read all files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + + } + + /** + * A unit test where the reader is blocked awaiting more input in + * {@link IHALogReader#hasMoreBuffers()} on the live HALog. The writer is + * {@link HALogWriter#disableHALog() disabled}. The reader should + * immediately notice this event and return <code>false</code>. + */ + public void test_disableLiveLogWithOpenReader() throws IOException, + InterruptedException, ExecutionException { + + final HALogWriter writer = new HALogWriter(logdir); + + try { + + final IRootBlockView openRB = openRBV(StoreTypeEnum.RW); + + assertEquals(StoreTypeEnum.RW, openRB.getStoreType()); + + writer.createLog(openRB); + + final IHALogReader reader = writer.getReader(openRB + .getCommitCounter() + 1); + + try { + + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE.getBufferCapacity()); + + int sequence = 0; + + final ByteBuffer data = randomData(2000); + + final UUID storeUUID = UUID.randomUUID(); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), openRB.getFirstCommitTime(), + sequence, data.limit()/* size */, ChecksumUtility + .getCHK().checksum(data), + openRB.getStoreType(), openRB.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); + + writer.writeOnHALog(msg, data); + + final Future<Void> f = executorService + .submit(new Callable<Void>() { + public Void call() throws Exception { + // should be immediately true. + assertTrue(reader.hasMoreBuffers()); + // read data into reader's buffer. + reader.processNextBuffer(rbuf); + // should block until writer is closed. + assertFalse(reader.hasMoreBuffers()); + // done - success. + return (Void) null; + } + }); + + // Make sure the Futuer is blocked. + try { + f.get(500, TimeUnit.MILLISECONDS); + fail("Reader did not block"); + } catch (TimeoutException ex) { + // ignore expected exception + } + + writer.disableHALog(); + + // Block and wait for the future. Verify no errors. + f.get(); + + } finally { + + reader.close(); + + } + + } finally { + + writer.disableHALog(); + + } + + // Read all files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + + } + + /** * Unit test verifies that each open of an {@link IHALogReader} is distinct * and the an {@link IHALogReader#close()} will not close the backing * channel for a different reader instance that is reading from the same @@ -682,8 +858,248 @@ * file. The case for the live HALog file is tested by * {@link #testSimpleRWWriterReader()}. */ - public void test_doubleOpen_close_historicalHALog() { - fail("write test"); + public void test_doubleOpen_close_historicalHALog() throws Exception { + + final HALogWriter writer = new HALogWriter(logdir); + + try { + + /* + * Generate and close (seal with a closing root block) an HALog + * file. + */ + final IRootBlockView openRB = openRBV(StoreTypeEnum.RW); + + { + + assertEquals(StoreTypeEnum.RW, openRB.getStoreType()); + + writer.createLog(openRB); + + int sequence = 0; + + final ByteBuffer data = randomData(2000); + + final UUID storeUUID = UUID.randomUUID(); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), openRB.getFirstCommitTime(), + sequence, data.limit()/* size */, ChecksumUtility + .getCHK().checksum(data), + openRB.getStoreType(), openRB.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); + + writer.writeOnHALog(msg, data); + + writer.closeHALog(closeRBV(openRB)); + + } + + /* + * The HALog file is now closed. + * + * Setup two readers on that HALog file. + */ + + final IHALogReader r1 = writer.getReader(openRB + .getCommitCounter() + 1); + + assertFalse(r1.isLive()); + assertTrue(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertTrue(r1.hasMoreBuffers()); + + final IHALogReader r2 = writer.getReader(openRB + .getCommitCounter() + 1); + + assertFalse(r2.isLive()); + assertTrue(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertTrue(r2.hasMoreBuffers()); + + /* + * Close one of the readers and make sure that the other reader + * remains open. + */ + + // close [r1]. + r1.close(); + assertFalse(r1.isLive()); + assertFalse(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertFalse(r1.hasMoreBuffers()); + + // verify [r2] remains open. + assertFalse(r2.isLive()); + assertTrue(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertTrue(r2.hasMoreBuffers()); + + /* + * Now use the 2nd reader to read the data to make sure that the + * IHALogReader is really open and functional. + */ + try { + + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE.getBufferCapacity()); + + while (r2.hasMoreBuffers()) { + // read data into reader's buffer. + r2.processNextBuffer(rbuf); + } + + } finally { + + r2.close(); + + } + + assertFalse(r2.isLive()); + assertFalse(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertFalse(r2.hasMoreBuffers()); + + } finally { + + writer.disableHALog(); + + } + + // Read all files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + } - + + /** + * Unit test for an open file leak for a historical log reader. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/678#comment:4" + * > DGC Thread Leak: sendHALogForWriteSet() </a> + */ + public void test_fileLeak_historicalHALog() throws Exception { + + /* + * This should be more than the #of open file handles that are supported + * by the OS platform / configured limits. Of course, some platforms do + * not have limits in which case this test can not fail on those + * platforms, but you could still use something like "lsof" or a heap + * dump / profiler to look for leaked file handles. + */ + final int MAX_OPEN_FILE_HANDLES = 10000; + + final HALogWriter writer = new HALogWriter(logdir); + + try { + + /* + * Generate and close (seal with a closing root block) an HALog + * file. + */ + final IRootBlockView openRB = openRBV(StoreTypeEnum.RW); + + { + + assertEquals(StoreTypeEnum.RW, openRB.getStoreType()); + + writer.createLog(openRB); + + int sequence = 0; + + final ByteBuffer data = randomData(2000); + + final UUID storeUUID = UUID.randomUUID(); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), openRB.getFirstCommitTime(), + sequence, data.limit()/* size */, ChecksumUtility + .getCHK().checksum(data), + openRB.getStoreType(), openRB.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); + + writer.writeOnHALog(msg, data); + + writer.closeHALog(closeRBV(openRB)); + + } + + /* + * The HALog file is now closed. + * + * Setup a reader on that HALog file. This reader will stay open. We + * then open and close and second reader a bunch of times. These + * readers should be completely distinct and use distinct file + * handles to read on the same file. Thus the #of open file handles + * should not grow over time. + */ + + final IHALogReader r1 = writer + .getReader(openRB.getCommitCounter() + 1); + + assertFalse(r1.isLive()); + assertTrue(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertTrue(r1.hasMoreBuffers()); + + for (int i = 0; i < MAX_OPEN_FILE_HANDLES; i++) { + + final IHALogReader r2 = writer.getReader(openRB + .getCommitCounter() + 1); + + assertFalse(r2.isLive()); + assertTrue(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertTrue(r2.hasMoreBuffers()); + + /* + * Now use the 2nd reader to read the data to make sure that the + * IHALogReader is really open and functional. + */ + try { + + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE + .getBufferCapacity()); + + while (r2.hasMoreBuffers()) { + + // read data into reader's buffer. + r2.processNextBuffer(rbuf); + + } + + } finally { + + r2.close(); + + } + + assertFalse(r2.isLive()); + assertFalse(r2.isOpen()); + assertFalse(r2.isEmpty()); + assertFalse(r2.hasMoreBuffers()); + + } + + // close [r1]. + r1.close(); + assertFalse(r1.isLive()); + assertFalse(r1.isOpen()); + assertFalse(r1.isEmpty()); + assertFalse(r1.hasMoreBuffers()); + + } finally { + + writer.disableHALog(); + + } + + // Read all files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-22 21:00:09
|
Revision: 7158 http://bigdata.svn.sourceforge.net/bigdata/?rev=7158&view=rev Author: thompsonbry Date: 2013-05-22 21:00:02 +0000 (Wed, 22 May 2013) Log Message: ----------- Moved buffer acquire to immediately before the try {} whose finally {} releases the buffer. Modified Paths: -------------- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-22 20:58:08 UTC (rev 7157) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-22 21:00:02 UTC (rev 7158) @@ -904,12 +904,14 @@ public Void call() throws Exception { try { - final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); - long nsent = 0; - boolean success = false; - try { + long nsent = 0; + boolean success = false; + final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); + + try { + while (r.hasMoreBuffers()) { // IHABufferStrategy @@ -964,25 +966,25 @@ return null; + } finally { + + buf.release(); + + if (haLog.isDebugEnabled()) + haLog.debug("req=" + req + ", nsent=" + nsent + + ", success=" + success); + + } + } finally { - buf.release(); + // Close the open log file. + r.close(); - if (haLog.isDebugEnabled()) - haLog.debug("req=" + req + ", nsent=" + nsent - + ", success=" + success); - } - } finally { + } // call() - // Close the open log file. - r.close(); - - } - - } // call() - } // class SendHALogTask /* This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-22 20:58:15
|
Revision: 7157 http://bigdata.svn.sourceforge.net/bigdata/?rev=7157&view=rev Author: thompsonbry Date: 2013-05-22 20:58:08 +0000 (Wed, 22 May 2013) Log Message: ----------- Removed 2nd file close. Modified Paths: -------------- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-22 20:55:47 UTC (rev 7156) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-22 20:58:08 UTC (rev 7157) @@ -968,8 +968,6 @@ buf.release(); - r.close(); - if (haLog.isDebugEnabled()) haLog.debug("req=" + req + ", nsent=" + nsent + ", success=" + success); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-22 20:55:53
|
Revision: 7156 http://bigdata.svn.sourceforge.net/bigdata/?rev=7156&view=rev Author: thompsonbry Date: 2013-05-22 20:55:47 +0000 (Wed, 22 May 2013) Log Message: ----------- HALogWriter : Modified inner close to close the RandomAccessFile rather than the FileChannel. This is an attempt to address an open file leak. I doubt that this will fix the leak, but the RandomAccessFile will close all of its resources, including the FileChannel, while the inverse is not promised by the documentation. Also, m_record must be a long (not int). Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-22 19:26:41 UTC (rev 7155) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-22 20:55:47 UTC (rev 7156) @@ -574,13 +574,13 @@ /** * Close the file (does not flush). */ - private void close() throws IOException { + private void close() throws IOException { // Note: caller owns m_stateLock! try { if (m_state != null) { m_state.close(); } } finally { - reset(); + reset(); // Note: reset() clears [m_state]! } } @@ -789,10 +789,30 @@ public void close() throws IOException { synchronized (this) { - if (--m_accessors == 0) - m_channel.close(); - // wake up anyone waiting. - this.notifyAll(); + try { + if (m_accessors == 0) { + /* + * Already at zero. Do not decrement further. + */ + throw new IllegalStateException(); + } + // One less reader/writer. + --m_accessors; + if (m_accessors == 0) { + if (haLog.isDebugEnabled()) + haLog.debug("Closing file"); + /* + * Note: Close the RandomAccessFile rather than the + * FileChannel. Potential fix for leaking open file + * handles. + */ + // m_channel.close(); + m_raf.close(); + } + } finally { + // wake up anyone waiting. + this.notifyAll(); + } } } @@ -837,7 +857,7 @@ * TODO We should support wait up to a timeout here to make the API more * pleasant. */ - public void waitOnStateChange(final int record) { + public void waitOnStateChange(final long record) { synchronized (this) { @@ -870,7 +890,7 @@ private final FileState m_state; - private int m_record = 0; + private long m_record = 0L; private long m_position = headerSize0; // initial position @@ -965,14 +985,18 @@ final IHAWriteMessage msg; synchronized (m_state) { - final long savePosition = m_state.m_channel.position(); - m_state.m_channel.position(m_position); + final long savePosition = m_state.m_channel.position(); + + m_state.m_channel.position(m_position); + msg = HALogReader.processNextBuffer(m_state.m_raf, m_state.reopener, m_state.m_storeType, clientBuffer); m_position = m_state.m_channel.position(); + m_state.m_channel.position(savePosition); + } m_record++; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-22 19:26:54
|
Revision: 7155 http://bigdata.svn.sourceforge.net/bigdata/?rev=7155&view=rev Author: thompsonbry Date: 2013-05-22 19:26:41 +0000 (Wed, 22 May 2013) Log Message: ----------- HAJournal: sendHALogForWriteSet() was not closing the HALog. This lead to a "too many open files" exception when trying to resync a follower for a delta involving a lot of commit points. HAGlue: removed globalWriteLock() method. It is difficult to reconcile this with the concurrent unisolated writers task that we are taking up next and the global write lock is no longer required for backups. The test suite for this method was also removed. HALogNexus: sort the files in a directory before loading them into the index (files are not lexically sorted by default on some OS platforms and sorting facilitates index writes and makes the restart process more intelligible since we scan the files in commit order). HALogFile (althalog package). removed unused method. HALogReader, HALogWriter, IHALogReader, and test suite for same: found and cured several synchronization errors, cleaned up the test cases, and expanded test coverage. There is one test that is not yet written and hence fails with a "write me" message. See https://sourceforge.net/apps/trac/bigdata/ticket/678 (DGC Thread Leak: sendHALogForWriteSet()) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/HAGlue.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/althalog/HALogFile.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournalTest.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerOverride.java Removed Paths: ------------- branches/READ_CACHE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHAJournalServerGlobalWriteLock.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/HAGlue.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/HAGlue.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/HAGlue.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -34,7 +34,6 @@ import com.bigdata.ha.msg.IHADigestRequest; import com.bigdata.ha.msg.IHADigestResponse; -import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; import com.bigdata.ha.msg.IHALogDigestRequest; import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHARemoteRebuildRequest; @@ -45,7 +44,6 @@ import com.bigdata.ha.msg.IHASnapshotRequest; import com.bigdata.ha.msg.IHASnapshotResponse; import com.bigdata.journal.AbstractJournal; -import com.bigdata.journal.Journal; import com.bigdata.journal.jini.ha.HAJournalServer; import com.bigdata.quorum.AsynchronousQuorumCloseException; import com.bigdata.quorum.QuorumException; @@ -184,39 +182,42 @@ IHASnapshotDigestResponse computeHASnapshotDigest(IHASnapshotDigestRequest req) throws IOException, NoSuchAlgorithmException, DigestException; - /** - * Obtain a global write lock on the leader. The lock only blocks writers. - * Readers may continue to execute without delay. - * <p> - * You can not obtain a coherent backup of the {@link Journal} while there - * are concurrent write operations. This method may be used to coordinate - * full backups of the {@link Journal} by suspending low level writes on the - * backing file. - * <p> - * This method will block until the lock is held, the lock request is - * interrupted, or the lock request timeout expires. - * - * @param req - * The request. - * - * @return A {@link Future} for the lock. The lock may be released by - * canceling the {@link Future}. The lock is acquired before this - * method returns and is held while the {@link Future} is running. - * If the {@link Future#isDone()} then the lock is no longer held. - * - * @throws IOException - * if there is an RMI problem. - * @throws TimeoutException - * if a timeout expires while awaiting the global lock. - * @throws InterruptedException - * if interrupted while awaiting the lock. - * - * @deprecated This is no longer necessary to support backups since we can - * now take snapshots without suspending writers. - */ - @Deprecated - Future<Void> globalWriteLock(IHAGlobalWriteLockRequest req) - throws IOException, TimeoutException, InterruptedException; +// /** +// * Obtain a global write lock on the leader. The lock only blocks writers. +// * Readers may continue to execute without delay. +// * <p> +// * You can not obtain a coherent backup of the {@link Journal} while there +// * are concurrent write operations. This method may be used to coordinate +// * full backups of the {@link Journal} by suspending low level writes on the +// * backing file. +// * <p> +// * This method will block until the lock is held, the lock request is +// * interrupted, or the lock request timeout expires. +// * +// * @param req +// * The request. +// * +// * @return A {@link Future} for the lock. The lock may be released by +// * canceling the {@link Future}. The lock is acquired before this +// * method returns and is held while the {@link Future} is running. +// * If the {@link Future#isDone()} then the lock is no longer held. +// * +// * @throws IOException +// * if there is an RMI problem. +// * @throws TimeoutException +// * if a timeout expires while awaiting the global lock. +// * @throws InterruptedException +// * if interrupted while awaiting the lock. +// * +// * @deprecated This is no longer necessary to support backups since we can +// * now take snapshots without suspending writers. +// * @see https://sourceforge.net/apps/trac/bigdata/ticket/566 ( +// * Concurrent unisolated operations against multiple KBs on the +// * same Journal) +// */ +// @Deprecated +// Future<Void> globalWriteLock(IHAGlobalWriteLockRequest req) +// throws IOException, TimeoutException, InterruptedException; /** * Request that the service take a snapshot. If there is already a snapshot Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/althalog/HALogFile.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/althalog/HALogFile.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/althalog/HALogFile.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -745,16 +745,16 @@ } - /** - * The {@link IRootBlockView} for the committed state BEFORE the write - * set contained in the HA log file. - */ - public HALogFile getHALogFile() { +// /** +// * The {@link IRootBlockView} for the committed state BEFORE the write +// * set contained in the HA log file. +// */ +// public HALogFile getHALogFile() { +// +// return HALogFile.this; +// +// } - return HALogFile.this; - - } - public boolean hasMoreBuffers() throws IOException { assertOpen(); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -33,6 +33,7 @@ import java.nio.channels.FileChannel; import java.security.DigestException; import java.security.MessageDigest; +import java.util.Arrays; import org.apache.log4j.Logger; @@ -176,20 +177,29 @@ @Override public void close() { - if (m_channel.isOpen()) { + if (isOpen()) { try { - m_raf.close(); + + m_raf.close(); + } catch (IOException e) { - log - .error("Problem closing file: file=" + m_file + " : " - + e, e); - } + + log.error("Problem closing file: file=" + m_file + " : " + e, e); + + } } } + @Override + public boolean isOpen() { + + return m_channel.isOpen(); + + } + @Override public boolean isLive() { @@ -438,6 +448,17 @@ } }); + /* + * Sort into lexical order to force visitation in lexical order. + * + * Note: This should work under any OS. Files will be either directory + * names (3 digits) or filenames (21 digits plus the file extension). + * Thus the comparison centers numerically on the digits that encode + * either part of a commit counter (subdirectory) or an entire commit + * counter (HALog file). + */ + Arrays.sort(files); + for (File file : files) { if (file.isDirectory()) { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -31,6 +31,7 @@ import java.nio.channels.FileChannel; import java.security.DigestException; import java.security.MessageDigest; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -126,9 +127,6 @@ /** current write point on the channel. */ private long m_position = headerSize0; - /** number of open readers **/ - private int m_readers = 0; - /** * Return the commit counter that is expected for the writes that will be * logged (the same commit counter that is on the opening root block). @@ -349,15 +347,26 @@ @Override public FileChannel reopenChannel() throws IOException { - final Lock lock = m_stateLock.readLock(); - lock.lock(); - try { - if (m_state == null || m_state.m_channel == null) - throw new IOException("Closed"); + final Lock lock = m_stateLock.readLock(); + + lock.lock(); + + try { + + if (m_state == null || m_state.m_channel == null + || !m_state.m_channel.isOpen()) { + + throw new IOException("Closed"); + + } + return m_state.m_channel; + } finally { - lock.unlock(); + + lock.unlock(); + } } @@ -666,29 +675,6 @@ } /** - * FIXME This method is only used by the unit tests. They need to modified - * to use {@link #getReader(long)} instead. - * - * @deprecated Use {@link #getReader(long)}. That code can make an atomic - * decision about whether the current HALog is being request or - * a historical HALog. It is not possible for the caller to make - * this decision from the outside. - */ - public IHALogReader getReader() { - - final Lock lock = m_stateLock.readLock(); - lock.lock(); - try { - if (m_state == null) - return null; - - return new OpenHALogReader(m_state); - } finally { - lock.unlock(); - } - } - - /** * Return the {@link IHALogReader} for the specified commit counter. If the * request identifies the HALog that is currently being written, then an * {@link IHALogReader} will be returned that will "see" newly written @@ -756,15 +742,28 @@ * The FileState class encapsulates the file objects shared by the Writer * and Readers. */ - static class FileState { - final StoreTypeEnum m_storeType; - final File m_haLogFile; - final FileChannel m_channel; - final RandomAccessFile m_raf; - int m_records = 0; - boolean m_committed = false; + private static class FileState { + private final StoreTypeEnum m_storeType; + private final File m_haLogFile; + private final FileChannel m_channel; + private final RandomAccessFile m_raf; + /* + * Note: Mutable fields are guarded by synchronized(this) for the + * FileState object. + */ + /** + * The #of messages written onto the live HALog file. + */ + private long m_records = 0; + /** + * <code>false</code> until the live HALog file has been committed (by + * writing the closing root block). + */ + private boolean m_committed = false; + /** number of open writers (at most one) plus readers **/ + private int m_accessors; - final IReopenChannel<FileChannel> reopener = new IReopenChannel<FileChannel>() { + private final IReopenChannel<FileChannel> reopener = new IReopenChannel<FileChannel>() { @Override public FileChannel reopenChannel() throws IOException { @@ -777,20 +776,24 @@ } }; - int m_accessors = 0; - - FileState(final File file, StoreTypeEnum storeType) - throws FileNotFoundException { + private FileState(final File file, final StoreTypeEnum storeType) + throws FileNotFoundException { + m_haLogFile = file; m_storeType = storeType; m_raf = new RandomAccessFile(m_haLogFile, "rw"); m_channel = m_raf.getChannel(); m_accessors = 1; // the writer is a reader also + } public void close() throws IOException { - if (--m_accessors == 0) - m_channel.close(); + synchronized (this) { + if (--m_accessors == 0) + m_channel.close(); + // wake up anyone waiting. + this.notifyAll(); + } } public void addRecord() { @@ -800,7 +803,7 @@ } } - public int recordCount() { + public long recordCount() { synchronized (this) { return m_records; } @@ -825,36 +828,69 @@ } } - /** - * - * @param record - * - the next sequence required - */ + /** + * + * @param record + * - the next sequence required + */ + /* + * TODO We should support wait up to a timeout here to make the API more + * pleasant. + */ public void waitOnStateChange(final int record) { - synchronized (this) { - if (m_records >= record) { - return; + + synchronized (this) { + + if (m_records >= record) { + + return; + } try { - wait(); + + wait(); + } catch (InterruptedException e) { - // okay; + + // Propagate the interrupt. + Thread.currentThread().interrupt(); + + return; + } + } } - } + } // class FileState static class OpenHALogReader implements IHALogReader { + private final FileState m_state; + private int m_record = 0; + private long m_position = headerSize0; // initial position + + /** <code>true</code> iff this reader is open. */ + private final AtomicBoolean open = new AtomicBoolean(true); - OpenHALogReader(final FileState state) { - m_state = state; - m_state.m_accessors++; + OpenHALogReader(final FileState state) { + + if (state == null) + throw new IllegalArgumentException(); + + m_state = state; + + // Note: Must be synchronized for visibility and atomicity! + synchronized (m_state) { + + m_state.m_accessors++; + + } + } @Override @@ -892,7 +928,11 @@ @Override public boolean hasMoreBuffers() throws IOException { - if (m_state.isCommitted() && m_state.recordCount() <= m_record) + + if (!isOpen()) + return false; + + if (m_state.isCommitted() && m_state.recordCount() <= m_record) return false; if (m_state.recordCount() > m_record) @@ -900,12 +940,22 @@ m_state.waitOnStateChange(m_record + 1); - return hasMoreBuffers(); + return hasMoreBuffers(); // tail recursion. + } @Override + public boolean isOpen() { + + return open.get(); + + } + + @Override public boolean isEmpty() { - return m_state.isEmpty(); + + return m_state.isEmpty(); + } @Override @@ -931,20 +981,28 @@ } @Override - public void close() throws IOException { - if (m_state != null) { - m_state.close(); - } + public void close() throws IOException { + + // Note: this pattern prevents a double-close of a reader. + if (open.compareAndSet(true/* expected */, false/* newValue */)) { + + /* + * Close an open reader. + */ + m_state.close(); + + } + } @Override - public void computeDigest(MessageDigest digest) throws DigestException, - IOException { + public void computeDigest(final MessageDigest digest) + throws DigestException, IOException { HALogReader.computeDigest(m_state.reopener, digest); - + } - } + } // class OpenHAReader } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -70,20 +70,32 @@ }; /** - * Closes the Reader. - * - * @throws IOException - */ + * Closes the reader iff it is open. + * <p> + * Note: Closing a reader does not have a side-effect on any open reader or + * writer for the same file. Specifically, if two readers are open for the + * same file and one is closed, then other will remain open. Likewise, if a + * reader is open for the live HALog file, closing the writer will not close + * the reader and closing the reader will not close the writer. + */ void close() throws IOException; /** * Return <code>true</code> if the root blocks in the log file have the same * commit counter. Such log files are logically empty regardless of their * length. + * + * FIXME The code disagress and will report [false] if the live log has the + * same root blocks but has not yet been closed. */ boolean isEmpty(); /** + * Return <code>true</code> iff the reader is open. + */ + boolean isOpen(); + + /** * The {@link IRootBlockView} for the committed state BEFORE the write set * contained in the HA log file. */ @@ -95,9 +107,18 @@ */ IRootBlockView getClosingRootBlock() throws IOException; - /** - * Checks whether we have reached the end of the file. - */ + /** + * Checks whether we have reached the end of the file (blocking). + * <p> + * Note: This method will block if this is the live HALog. This allows a + * process to block until the next message is made available on the live + * HALog by the writer. + * <p> + * Note: This method is non-blocking if this is not the live HALog since the + * decision can be made deterministically by inspecting the #of messages + * available (in the closing root block) and the #of messages consumed by + * the reader. + */ boolean hasMoreBuffers() throws IOException; /** @@ -128,7 +149,8 @@ /** * Return <code>true</code> iff this is was the live HALog at the moment - * when it was opened. + * when it was opened (if true, then this flag will remain <code>true</code> + * even if the writer moves onto another HALog file). */ boolean isLive(); } Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/HAGlobalWriteLockRequest.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -26,6 +26,13 @@ import java.io.Serializable; import java.util.concurrent.TimeUnit; +/** + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @see https://sourceforge.net/apps/trac/bigdata/ticket/566 ( Concurrent + * unisolated operations against multiple KBs on the same Journal) + */ +@Deprecated public class HAGlobalWriteLockRequest implements IHAGlobalWriteLockRequest, Serializable { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/msg/IHAGlobalWriteLockRequest.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -29,7 +29,10 @@ * Message requesting a global write lock. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * @see https://sourceforge.net/apps/trac/bigdata/ticket/566 ( Concurrent + * unisolated operations against multiple KBs on the same Journal) */ +@Deprecated public interface IHAGlobalWriteLockRequest extends IHAMessage { /** Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -6142,14 +6142,14 @@ } - @Override - public Future<Void> globalWriteLock(final IHAGlobalWriteLockRequest req) - throws IOException, TimeoutException, InterruptedException { +// @Override +// public Future<Void> globalWriteLock(final IHAGlobalWriteLockRequest req) +// throws IOException, TimeoutException, InterruptedException { +// +// throw new UnsupportedOperationException(); +// +// } - throw new UnsupportedOperationException(); - - } - @Override public Future<IHASnapshotResponse> takeSnapshot( final IHASnapshotRequest req) throws IOException { Modified: branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata/src/test/com/bigdata/ha/halog/TestHALogWriter.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -29,18 +29,116 @@ import java.nio.ByteBuffer; import java.util.Random; import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import junit.framework.TestCase2; + import com.bigdata.ha.msg.HAWriteMessage; +import com.bigdata.ha.msg.IHAMessage; import com.bigdata.ha.msg.IHAWriteMessage; +import com.bigdata.io.DirectBufferPool; +import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.RootBlockView; import com.bigdata.journal.StoreTypeEnum; +import com.bigdata.rawstore.Bytes; import com.bigdata.util.ChecksumUtility; +import com.bigdata.util.concurrent.DaemonThreadFactory; -import junit.framework.TestCase; +/** + * Test suite for {@link HALogWriter} and {@link HALogReader}. + * + * @author <a href="mailto:mar...@us...">Martyn Cutcher</a> + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ +public class TestHALogWriter extends TestCase2 { -public class TestHALogWriter extends TestCase { + private Random r; + private File logdir; + private ExecutorService executorService; + @Override + protected void setUp() throws Exception { + + super.setUp(); + + // create temporary file for the test. + logdir = File.createTempFile(getClass().getSimpleName(), "halogdir"); + + // delete temp file. will recreate as a directory. + if (!logdir.delete()) + throw new IOException("Could not remove: file=" + logdir); + + // re-create as a directory. + if (!logdir.mkdirs()) + throw new IOException("Could not create: dir=" + logdir); + + r = new Random(); + + executorService = Executors.newCachedThreadPool(DaemonThreadFactory + .defaultThreadFactory()); + + } + + @Override + protected void tearDown() throws Exception { + + super.tearDown(); + + r = null; + + if (logdir != null && logdir.exists()) { + + recursiveDelete(logdir); + + } + + if (executorService != null) { + + executorService.shutdownNow(); + + executorService = null; + + } + + } + + /** + * Recursively removes any files and subdirectories and then removes the + * file (or directory) itself. + * + * @param f + * A file or directory. + */ + private void recursiveDelete(final File f) { + + if (f.isDirectory()) { + + final File[] children = f.listFiles(); + + for (int i = 0; i < children.length; i++) { + + recursiveDelete(children[i]); + + } + + } + + if (log.isInfoEnabled()) + log.info("Removing: " + f); + + if (f.exists() && !f.delete()) { + + log.warn("Could not remove: " + f); + + } + + } + /* * Need to mock up some valid rootblocks * @@ -53,7 +151,7 @@ * storeTypeEnum, // VERSION1 final long createTime, final long closeTime, * final int version, final ChecksumUtility checker) */ - private IRootBlockView openRBV(final StoreTypeEnum st) { + private static IRootBlockView openRBV(final StoreTypeEnum st) { return new RootBlockView( // true /* rb0 */, 0, 0, 0 /* commitTime */, 0, @@ -84,235 +182,508 @@ RootBlockView.currentVersion, ChecksumUtility.getCHK()); } - final static Random r = new Random(); + private ByteBuffer randomData(final int sze) { - static ByteBuffer randomData(final int sze) { - byte[] buf = new byte[sze]; - r.nextBytes(buf); + final byte[] buf = new byte[sze]; + + r.nextBytes(buf); return ByteBuffer.wrap(buf, 0, sze); + } - /** - * Simple writelog test, open file, write data and commit. - */ - public void testSimpleRWWriter() throws FileNotFoundException, IOException { - // establish halogdir - File logdir = new File("/tmp/halogdir"); - logdir.mkdirs(); + /** + * Simple writelog test, open file, write data and commit. + */ + public void testSimpleRWWriter() throws FileNotFoundException, IOException, + InterruptedException { - final ChecksumUtility checker = ChecksumUtility.getCHK(); + final HALogWriter writer = new HALogWriter(logdir); - final HALogWriter writer = new HALogWriter(logdir); - final IRootBlockView rbv = openRBV(StoreTypeEnum.RW); + try { - assertTrue(rbv.getStoreType() == StoreTypeEnum.RW); + final IRootBlockView rbv = openRBV(StoreTypeEnum.RW); - writer.createLog(rbv); + assertEquals(StoreTypeEnum.RW, rbv.getStoreType()); - int sequence = 0; + writer.createLog(rbv); - final ByteBuffer data = randomData(2000); + int sequence = 0; - final UUID storeUUID = UUID.randomUUID(); - - IHAWriteMessage msg = new HAWriteMessage(storeUUID, rbv.getCommitCounter(), rbv - .getFirstCommitTime(), sequence, data.limit(), checker - .checksum(data), rbv.getStoreType(), rbv.getQuorumToken(), - 1000, 0); + final ByteBuffer data = randomData(2000); - writer.writeOnHALog(msg, data); + final UUID storeUUID = UUID.randomUUID(); - writer.closeHALog(closeRBV(rbv)); + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + rbv.getCommitCounter(), rbv.getFirstCommitTime(), sequence, + data.limit()/* size */, ChecksumUtility.getCHK().checksum( + data), rbv.getStoreType(), rbv.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); - // for sanity, let's run through the standard reader - try { - HALogReader.main(new String[] { "/tmp/halogdir" }); - } catch (InterruptedException e) { - // NOP - } + writer.writeOnHALog(msg, data); + + writer.closeHALog(closeRBV(rbv)); + + } finally { + + writer.disableHALog(); + + } + + // Read all files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + } /** * Simple WriteReader, no concurrency, confirms non-delayed responses. */ public void testSimpleRWWriterReader() throws FileNotFoundException, - IOException { - // establish halogdir - File logdir = new File("/tmp/halogdir"); - logdir.mkdirs(); + IOException, InterruptedException { - final ChecksumUtility checker = ChecksumUtility.getCHK(); - final HALogWriter writer = new HALogWriter(logdir); - final IRootBlockView rbv = openRBV(StoreTypeEnum.RW); + + try { - assertTrue(rbv.getStoreType() == StoreTypeEnum.RW); + // The opening root block. + final IRootBlockView openRB = openRBV(StoreTypeEnum.RW); - writer.createLog(rbv); + assertEquals(StoreTypeEnum.RW, openRB.getStoreType()); - int sequence = 0; + { + // should not be able to open the reader yet. + try { + writer.getReader(openRB.getCommitCounter() + 1); + } catch (FileNotFoundException ex) { + // Ignore expected exception. + if (log.isInfoEnabled()) + log.info("Ignoring expected exception: " + ex); + } - final ByteBuffer data = randomData(2000); + } - final UUID storeUUID = UUID.randomUUID(); + // writer is not open. + assertFalse(writer.isHALogOpen()); - final IHAWriteMessage msg = new HAWriteMessage(storeUUID, rbv.getCommitCounter(), rbv - .getFirstCommitTime(), sequence, data.limit(), checker - .checksum(data), rbv.getStoreType(), rbv.getQuorumToken(), - 1000, 0); + // create HALog file. + writer.createLog(openRB); - writer.writeOnHALog(msg, data); + { - final IHALogReader reader = writer.getReader(); + // writer is open. + assertTrue(writer.isHALogOpen()); + + // should be able to open the reader for that log now. + final IHALogReader reader = writer.getReader(openRB + .getCommitCounter() + 1); - assertTrue(reader.hasMoreBuffers()); + // This is the "live" HALog. + assertTrue(reader.isLive()); - ByteBuffer rbuf = ByteBuffer.allocate(1 * 1024 * 1024); // 1 mb - IHAWriteMessage rmsg = reader.processNextBuffer(rbuf); + // The reader is open. + assertTrue(reader.isOpen()); + + // The HALog is logically empty. +// assertTrue(reader.isEmpty()); + + /* + * Note: Don't do this here. The method will block for the live + * HALog until the file is closed (sealed with the closing root + * block) or destroyed. + */ +// assertTrue(reader.hasMoreBuffers()); - assertTrue(rmsg.getSize() == msg.getSize()); + // close the reader. should not close the writer. + reader.close(); - // commit the log file - writer.closeHALog(closeRBV(rbv)); + // the reader is closed. + assertFalse(reader.isOpen()); + + // once closed, this method should return immediately. + assertFalse(reader.hasMoreBuffers()); + + // the writer is still open. + assertTrue(writer.isHALogOpen()); - // the writer should have closed the file, so the reader should return - // immediately to report no more buffers - assertFalse(reader.hasMoreBuffers()); + // double-close the reader. should be ignored. + reader.close(); + + // the writer should *still* be open. + assertTrue(writer.isHALogOpen()); + + } - // for sanity, let's run through the standard reader - try { - HALogReader.main(new String[] { "/tmp/halogdir" }); - } catch (InterruptedException e) { - // NOP + /* + * Verify that we can open two distinct readers on the same live + * HALog and that closing one does not close the other and does not + * close the writer. + */ + { + + final IHALogReader r1 = writer.getReader(openRB.getCommitCounter() + 1); + final IHALogReader r2 = writer.getReader(openRB.getCommitCounter() + 1); + + assertTrue(r1.isOpen()); + assertTrue(r2.isOpen()); + + // close one reader. + r1.close(); + + // one reader is closed, the other is open. + assertFalse(r1.isOpen()); + assertTrue(r2.isOpen()); + + // the writer should *still* be open. + assertTrue(writer.isHALogOpen()); + + // close the other reader. + r2.close(); + + // Verify both are now closed. + assertFalse(r2.isOpen()); + assertFalse(r2.isOpen()); + + // the writer should *still* be open. + assertTrue(writer.isHALogOpen()); + + } + + int sequence = 0; + + final ByteBuffer data = randomData(2000); + + final UUID storeUUID = UUID.randomUUID(); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), openRB.getFirstCommitTime(), sequence, + data.limit()/* size */, ChecksumUtility.getCHK().checksum( + data), openRB.getStoreType(), openRB.getQuorumToken(), + 1000/* fileExtent */, 0/* firstOffset */); + + // write a message on the HALog. + writer.writeOnHALog(msg, data); + + // should be able to open the reader for that log now. + final IHALogReader reader = writer + .getReader(openRB.getCommitCounter() + 1); + + assertTrue(reader.hasMoreBuffers()); + + { + + // Allocate heap byte buffer for the reader. + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE.getBufferCapacity()); + + final IHAWriteMessage rmsg = reader.processNextBuffer(rbuf); + + assertEquals(rmsg.getSize(), msg.getSize()); + + } + + // commit the log file (write the closing root block). + writer.closeHALog(closeRBV(openRB)); + + /* + * The writer should have closed the file, so the reader should + * return immediately to report no more buffers. + */ + assertFalse(reader.hasMoreBuffers()); + + } finally { + + writer.disableHALog(); + } + + // Read all HALog files in the test directory. + HALogReader.main(new String[] { logdir.toString() }); + } /** * SimpleWriter writes a number of log files with a set of messages in each */ - static class SimpleWriter implements Runnable { - final ByteBuffer data = randomData(2000); + private class SimpleWriter implements Callable<Void> { - int sequence = 0; + private IRootBlockView openRB; + private final HALogWriter writer; + private final int count; - private IRootBlockView rbv; - private HALogWriter writer; - private ChecksumUtility checker; - private int count; + /** + * + * @param openRB + * The opening root block. + * @param writer + * The {@link HALogWriter}. + * @param count + * The HALog files to write. Each will have a random #of + * records. + */ + SimpleWriter(final IRootBlockView openRB, final HALogWriter writer, + final int count) { - SimpleWriter(IRootBlockView rbv, HALogWriter writer, ChecksumUtility checker, int count) { - this.rbv = rbv; + this.openRB = openRB; this.writer = writer; - this.checker = checker; this.count = count; + } @Override - public void run() { - final UUID storeUUID = UUID.randomUUID(); - try { - for (int i = 0; i < count; i++) { - // add delay to write thread to test read thread waiting for data - Thread.sleep(10); - final IHAWriteMessage msg = new HAWriteMessage(storeUUID, rbv - .getCommitCounter(), rbv.getLastCommitTime(), - sequence++, data.limit(), checker - .checksum(data), rbv.getStoreType(), - rbv.getQuorumToken(), 1000, 0); + public Void call() throws Exception { - writer.writeOnHALog(msg, data); - if (((i+1) % (1 + r.nextInt(count/3))) == 0) { - System.out.println("Cycling HALog after " + sequence + " records"); - rbv = closeRBV(rbv); - writer.closeHALog(rbv); - sequence = 0; - writer.createLog(rbv); - } - } - rbv = closeRBV(rbv); - writer.closeHALog(rbv); - } catch (FileNotFoundException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - + final UUID storeUUID = UUID.randomUUID(); + final long fileExtent = 1000; // NB: ignored for test. + final long firstOffset = 0; // NB: ignored for test. + + // Note: Thread Local! Can not be passed in by the caller. + final ChecksumUtility checker = ChecksumUtility.getCHK(); + + for (int i = 0; i < count; i++) { + + // Min of 1 message. Max of r.nextInt(). + final long nmessages = r.nextInt(100) + 1; + + for (long sequence = 0; sequence < nmessages; sequence++) { + + // add delay to write thread to test reader waiting + Thread.sleep(10); + + // Use random data of random length. + final int size = r.nextInt(4 * Bytes.kilobyte32) + 1; + + final ByteBuffer data = randomData(size); + + final IHAWriteMessage msg = new HAWriteMessage(storeUUID, + openRB.getCommitCounter(), + openRB.getLastCommitTime(), sequence, size, + checker.checksum(data), openRB.getStoreType(), + openRB.getQuorumToken(), fileExtent, firstOffset); + + writer.writeOnHALog(msg, data); + + } + + if (log.isInfoEnabled()) + log.info("Cycling HALog after " + nmessages + " records"); + + // close log. + writer.closeHALog(openRB = closeRBV(openRB)); + + // open new log. + writer.createLog(openRB); + + } // next HALog file. + + // Close the last HALog. + writer.closeHALog(openRB = closeRBV(openRB)); + + // Done. + return null; + + } + + } // class SimpleWriter. + + /** + * Reader consumes an HALog file. The file must exist before you start + * running the {@link ReaderTask}. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + private static class ReaderTask implements Callable<Long> { + + private final long commitCounter; + private final HALogWriter writer; + private final Future<Void> wf; + + /** + * + * @param commitCounter + * The commit counter that identifies the closing commit + * point for the HALog file to be read. + * @param writer + * The {@link HALogWriter}. + * @param wf + * The {@link Future} for the {@link HALogWriter}. This is + * used to monitor for an error in the writer so the reader + * does not block the test from completing (or failing). + */ + public ReaderTask(final long commitCounter, final HALogWriter writer, + final Future<Void> wf) { + + this.commitCounter = commitCounter; + + this.writer = writer; + + this.wf = wf; + + } + + /** Test future if done. Throws exception if writer fails. */ + private void checkWriterFuture() throws InterruptedException, + ExecutionException { + + if (wf.isDone()) { + + wf.get(); + + } + + } + + /** + * + * @return The #of {@link IHAMessage}s read from the file. + */ + @Override + public Long call() throws Exception { + + // Allocate a heap ByteBuffer + final ByteBuffer rbuf = ByteBuffer + .allocate(DirectBufferPool.INSTANCE.getBufferCapacity()); + + // Note: Throws FileNotFoundException if does not exist. + final IHALogReader reader = writer.getReader(commitCounter); + + assertNotNull(reader); + + long nread = 0L; + + try { + + while (reader.hasMoreBuffers()) { + + checkWriterFuture(); + + final IHAWriteMessage rmsg = reader.processNextBuffer(rbuf); + + if (log.isDebugEnabled()) + log.debug("Read message: " + rmsg.getSequence() + + ", size: " + rmsg.getSize()); + + assertEquals(nread, rmsg.getSequence()); + + nread++; + + checkWriterFuture(); + + } + + return nread; + + } finally { + + /* + * Note: This should not throw an IOException. + * + * Note: It it does throw an IOException, then it can also be + * masking an error in the try{} above. Diagnose both if you get + * anything thrown out of here. + */ + + reader.close(); + + } + + } + } + /** * While a writer thread writes a number of HALogs, readers are opened * to process them. + * + * @throws Exception */ - public void testConcurrentRWWriterReader() throws FileNotFoundException, - IOException { - // establish halogdir - File logdir = new File("/tmp/halogdir"); - logdir.mkdirs(); + public void testConcurrentRWWriterReader() throws Exception { - final ChecksumUtility checker = ChecksumUtility.getCHK(); - final HALogWriter writer = new HALogWriter(logdir); - final IRootBlockView rbv = openRBV(StoreTypeEnum.RW); + + final IRootBlockView rbv = openRBV(StoreTypeEnum.RW); - assertTrue(rbv.getStoreType() == StoreTypeEnum.RW); + assertEquals(StoreTypeEnum.RW, rbv.getStoreType()); writer.createLog(rbv); -// final ByteBuffer data = randomData(2000); + // The #of HALog files to write. If GT 1000, then more than one + // subdirectory worth of files will be written. + final int nfiles = 100 + r.nextInt(1000); - Thread wthread = new Thread(new SimpleWriter(rbv, writer, checker, 500)); + // Start the writer. + final Future<Void> wf = executorService.submit(new SimpleWriter(rbv, + writer, nfiles)); - Runnable rreader = new Runnable() { + try { - ByteBuffer rbuf = ByteBuffer.allocate(1 * 1024 * 1024); // 1 mb + /* + * Now keep on opening readers for "current file" while writer + * continues. + * + * Note: The writer will write multiple files. For each file that it + * writes, we run the reader until it is done, then we open a new + * reader on the next HALog file. + */ + for (long commitCounter = 1L; commitCounter <= nfiles; commitCounter++) { - @Override - public void run() { - final IHALogReader reader = writer.getReader(); - if (reader == null) { - return; - } - - try { - while (reader.hasMoreBuffers()) { - final IHAWriteMessage rmsg = reader - .processNextBuffer(rbuf); + /* + * Note: We need to spin here in case the reader tries to open + * the HALog for reading before the writer has created the HALog + * for that commit point. This can be done by monitoring the + * writer or the file system. + */ + final File file = CommitCounterUtility.getCommitCounterFile( + logdir, commitCounter, IHALogReader.HA_LOG_EXT); -// System.out.println("Read message: " + rmsg.getSequence() -// + ", size: " + rmsg.getSize()); - } - } catch (IOException e) { - e.printStackTrace(); - } - } + while (!file.exists()) { - }; - - // start the writer first - wthread.start(); - - // now keep on opening readers for "current file" while writer continues - while (wthread.isAlive()) { - Thread rthread = new Thread(rreader); - rthread.start(); - while (rthread.isAlive()) { - try { - Thread.sleep(10); - } catch (InterruptedException e) { - break; - } - } - } + if (wf.isDone()) { + // Check writer for errors. + wf.get(); + } - // for sanity, let's run through the standard reader - try { - HALogReader.main(new String[] { "/tmp/halogdir" }); - } catch (InterruptedException e) { - // NOP - } + if (log.isInfoEnabled()) + log.info("Blocked waiting on writer: commitCounter=" + + commitCounter + ", file=" + file); + + // Wait for the file. + Thread.sleep(100/* ms */); + + } + + /* + * Open and read the next HALog file, blocking until all data + * has been read from that file. + */ + new ReaderTask(commitCounter, writer, wf).call(); + + } + + // Check writer for errors. There should not be any. + wf.get(); + + } finally { + + wf.cancel(true/* mayInterruptIfRunning */); + + } + + // for sanity, let's run through the standard reader + HALogReader.main(new String[] { logdir.toString() }); + } + /** + * Unit test verifies that each open of an {@link IHALogReader} is distinct + * and the an {@link IHALogReader#close()} will not close the backing + * channel for a different reader instance that is reading from the same + * HALog file. This version of the test is for a historical (non-live) HALog + * file. The case for the live HALog file is tested by + * {@link #testSimpleRWWriterReader()}. + */ + public void test_doubleOpen_close_historicalHALog() { + fail("write test"); + } + } Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-21 20:00:46 UTC (rev 7154) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-22 19:26:41 UTC (rev 7155) @@ -39,13 +39,9 @@ import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import net.jini.config.Configuration; @@ -73,7 +69,6 @@ import com.bigdata.ha.msg.HASnapshotDigestResponse; import com.bigdata.ha.msg.IHADigestRequest; import com.bigdata.ha.msg.IHADigestResponse; -import com.bigdata.ha.msg.IHAGlobalWriteLockRequest; import com.bigdata.ha.msg.IHALogDigestRequest; import com.bigdata.ha.msg.IHALogDigestResponse; import com.bigdata.ha.msg.IHALogRequest; @@ -97,11 +92,9 @@ import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.ITx; import com.bigdata.journal.Journal; -import com.bigdata.journal.WriteExecutorService; import com.bigdata.journal.jini.ha.HAJournalServer.HAQuorumService; import com.bigdata.journal.jini.ha.HAJournalServer.NSSConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalServer.RunStateEnum; -import com.bigdata.quorum.AsynchronousQuorumCloseException; import com.bigdata.quorum.Quorum; import com.bigdata.service.AbstractTransactionService; import com.bigdata.service.jini.JiniClient; @@ -801,20 +794,62 @@ final long commitCounter = req.getCommitCounter(); /* + * Open the HALog file. If it exists, then we will run a task to + * send it along the pipeline. + * * Note: The choice of the "live" versus a historical "closed" log * file needs to be an atomic decision and thus MUST be made by the * HALogManager. + * + * Note: Once opened, the HALog file must be closed. Once we submit + * the task for execution, the SendHALogTask() is responsible for + * closing the HALog file. If we do not get that far, then the file + * is closed by this code block. + * + * Note: This can leak an open file handle in the case where the + * ExecutorService is shutdown before the task runs, but that only + * occurs on Journal shutdown. */ - final IHALogReader r = getHALogNexus().getReader(commitCounter); + final boolean isLive; + final FutureTask<Void> ft; + { - final boolean isLive = r.isLive(); - - // Task sends an HALog file along the pipeline. - final FutureTask<Void> ft = new FutureTaskMon<Void>( - new SendHALogTask(req, r)); + IHALogReader r = null; + + try { - // Run task. - getExecutorService().submit(ft); + // Note: open file handle - must be closed eventually. + r = getHALogNexus().getReader(commitCounter); + + isLive = r.isLive(); + + // Task sends an HALog file along the pipeline. + ft = new FutureTaskMon<Void>(new SendHALogTask(req, r)); + + // Run task. + getExecutorService().submit(ft); + + // Clear reference. File handle will be closed by task. + r = null; + + } finally { + + if (r != null) { + + try { + + r.close(); + + } catch (Throwable t) { + + log.error(t, t); + } + + } + + } + + } /** * Return Future. @@ -868,6 +903,7 @@ public Void call() throws Exception { + try { final IBufferAccess buf = DirectBufferPool.INSTANCE.acquire(); long nsent = 0; @@ -922,7 +958,8 @@ // ft.cancel(true/* mayInterruptIfRunning */); } - } + } // while(hasMoreBuffers()) + success = true; return null; @@ -931,15 +968,24 @@ buf.release(); + r.close(); + if (haLog.isDebugEnabled()) haLog.debug("req=" + req + ", nsent=" + nsent + ", success=" + success); } + } finally { + + // Close the open log file. + r.close(); + } - } + } // call() + + } // class SendHALogTask /* * REBUILD: Take a read lock and send everything from the backing file, @@ -1191,308 +1237,308 @@ } - /** - * {@inheritDoc} - * - * TODO This method relies on the unisolated semaphore. That provides a - * sufficient guarantee for updates that original through the NSS since - * all such updates will eventually require the unisolated connection to - * execute. However, if we support multiple concurrent unisolated - * connections distinct KBs per the ticket below, then we will need to - * have a different global write lock - perhaps via the - * {@link WriteExecutorService}. - * - * @deprecated This method is no longer necessary to support backups - * since we can now take snapshots without suspending - * writers. - * - * @see https://sourceforge.net/apps/trac/bigdata/ticket/566 ( - * Concurrent unisolated operations against multiple KBs on the - * same Journal) - */ - @Override - public Future<Void> globalWriteLock(final IHAGlobalWriteLockRequest req) - throws IOException, InterruptedException, TimeoutException { - - if (req == null) - throw new IllegalArgumentException(); - - /* - * This token will be -1L if there is no met quorum. This method may - * only execute while there is a met quorum and this service is the - * leader for that met quorum. - * - * Note: This method must stop waiting for the global lock if this - * service is no longer the leader (quorum break). - * - * Note: This method must stop holding the global lock if this - * service is no longer the leader (quorum break). - */ - final long token = getQuorum().token(); - - // Verify that the quorum is met and that this is the leader. - getQuorum().assertLeader(token); - - // Set true IFF we acquire the global write lock. - final AtomicBoolean didAcquire = new AtomicBoolean(false); - - // Task to acquire the lock - final FutureTask<Void> acquireLockTaskFuture = new FutureTask<Void>( - new AcquireGlobalLockTask(didAcquire)); - - // Task used to interrupt task acquiring the lock if quorum breaks. - final FutureTask<Void> interruptLockTaskFuture = new FutureTask<Void>( - new InterruptAcquireLockTask(token, acquireLockTaskFuture, - req)); - - // Task to release the lock. - final FutureTask<Void> releaseLockTaskFuture = new FutureTask<Void>( - new ReleaseGlobalLockTask(token, req)); - - // Service to run those tasks. - final Executor executor = getExecutorService(); - - // Set true iff we will run with the global lock. - boolean willRunWithLock = false; - try { - - /* - * Submit task to interrupt the task that is attempting to - * acquire the lock if the quorum breaks. This prevents us - * waiting for the global long beyond a quorum break. - */ - executor.execute(interruptLockTaskFuture); - - /* - * Submit task to acquire the lock. - */ - executor.execute(acquireLockTaskFuture); - - /* - * Wait for the global lock (blocks up to the timeout). - */ - acquireLockTaskFuture.get(req.getLockWaitTimeout(), - req.getLockWaitUnits()); - - // We will run with the global lock. - willRunWithLock = true; - - } catch (RejectedExecutionException ex) { - - /* - * Note: This will throw a RejectedExecutionException if the - * executor has been shutdown. That unchecked exception will be - * thrown back to the client. Since the lock has not been - * acquired if that exception is thrown, we do not need to do - * anything else here. - */ - - haLog.warn(ex); - - throw ex; - - } catch (ExecutionException e) { - - haLog.error(e, e); - - throw new RuntimeException(e); - - } finally { - - /* - * Make sure these tasks are cancelled. - */ - - interruptLockTaskFuture.cancel(true/* mayInterruptIfRunning */); - - acquireLockTaskFuture.cancel(true/* mayInterruptIfRunning */); - - /* - * Release the global lock if we acquired it but will not run - * with that lock held (e.g., due to some error). - */ - - if (!willRunWithLock && didAcquire.get()) { - - HAJournal.this.releaseUnisolatedConnection(); - - log.wa... [truncated message content] |
From: <tho...@us...> - 2013-05-21 20:00:53
|
Revision: 7154 http://bigdata.svn.sourceforge.net/bigdata/?rev=7154&view=rev Author: thompsonbry Date: 2013-05-21 20:00:46 +0000 (Tue, 21 May 2013) Log Message: ----------- Added isLive() to the IHALog interface. Modified HAJournal.sendHALogForWriteSet() to use an asyncnonous future only when the live log is being sent along the pipeline. This will minimize DGC thread leaks per [1]. [1] https://sourceforge.net/apps/trac/bigdata/ticket/678 (DGC Thread Leak: sendHALogForWriteSet()) Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-21 16:08:18 UTC (rev 7153) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogReader.java 2013-05-21 20:00:46 UTC (rev 7154) @@ -173,6 +173,7 @@ } }; + @Override public void close() { if (m_channel.isOpen()) { @@ -189,6 +190,14 @@ } + @Override + public boolean isLive() { + + return false; + + } + + @Override public boolean isEmpty() { return m_openRootBlock.getCommitCounter() == m_closeRootBlock @@ -219,6 +228,7 @@ } + @Override public boolean hasMoreBuffers() throws IOException { assertOpen(); @@ -260,6 +270,7 @@ } + @Override public IHAWriteMessage processNextBuffer(final ByteBuffer clientBuffer) throws IOException { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-21 16:08:18 UTC (rev 7153) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/HALogWriter.java 2013-05-21 20:00:46 UTC (rev 7154) @@ -857,6 +857,13 @@ m_state.m_accessors++; } + @Override + public boolean isLive() { + + return true; + + } + @Override public IRootBlockView getOpeningRootBlock() throws IOException { Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-21 16:08:18 UTC (rev 7153) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/ha/halog/IHALogReader.java 2013-05-21 20:00:46 UTC (rev 7154) @@ -126,4 +126,9 @@ void computeDigest(MessageDigest digest) throws DigestException, IOException; + /** + * Return <code>true</code> iff this is was the live HALog at the moment + * when it was opened. + */ + boolean isLive(); } Modified: branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-21 16:08:18 UTC (rev 7153) +++ branches/READ_CACHE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2013-05-21 20:00:46 UTC (rev 7154) @@ -807,6 +807,8 @@ */ final IHALogReader r = getHALogNexus().getReader(commitCounter); + final boolean isLive = r.isLive(); + // Task sends an HALog file along the pipeline. final FutureTask<Void> ft = new FutureTaskMon<Void>( new SendHALogTask(req, r)); @@ -814,8 +816,36 @@ // Run task. getExecutorService().submit(ft); - // Return *ASYNCHRONOUS* proxy (interruptable). - return getProxy(ft, true/* asynch */); + /** + * Return Future. + * + * FIXME DGC: This leaks a thread every time we return an + * asynchronous proxy, but we need the ability to interrupt the + * transfer of an HALog file. + * + * Look at HAJournalServer and how it manages the transition to a + * joined service in RESYNC and identify a different mechanism for + * interrupting the transfer of the HALog. + * + * Consider using a well known exception thrown back long the write + * pipeline to indicate that a receiver is done recieving data for + * some HALog (or backing store) or sending a message which + * explicitly cancels a transfer using an identifier for that + * transfer. If this is done synchronously while in + * handleReplicatedWrite then we will get the same decidability as + * using an asyncrhonous future, but without the thread leak + * problem. + * + * This issue is most pressing for sendHALogForWriteSet() since we + * can synchronous many 1000s of HALog files when resynchronizing a + * service. However, the same DGC thread leak exists for several + * other methods as specified on the ticket below. + * + * @see <a + * href="https://sourceforge.net/apps/trac/bigdata/ticket/678" + * > DGC Thread Leak: sendHALogForWriteSet() </a> + */ + return getProxy(ft, isLive/* asynch */); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-21 16:08:28
|
Revision: 7153 http://bigdata.svn.sourceforge.net/bigdata/?rev=7153&view=rev Author: thompsonbry Date: 2013-05-21 16:08:18 +0000 (Tue, 21 May 2013) Log Message: ----------- modified to throw exception if the functionURI is null. See https://sourceforge.net/apps/trac/bigdata/ticket/672 (Occasional error on BSBM Explore query) Modified Paths: -------------- branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java Modified: branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java =================================================================== --- branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java 2013-05-20 19:38:56 UTC (rev 7152) +++ branches/READ_CACHE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/FunctionNode.java 2013-05-21 16:08:18 UTC (rev 7153) @@ -61,6 +61,11 @@ super(args, NV.asMap(new NV[] { new NV(Annotations.SCALAR_VALS, scalarValues), new NV(Annotations.FUNCTION_URI, functionURI), })); + + if (functionURI == null) + throw new IllegalArgumentException(); + + // scalarValues MAY be null. // super(args, null/*anns*/); // @@ -73,8 +78,10 @@ /** * Required deep copy constructor. */ - public FunctionNode(FunctionNode op) { + public FunctionNode(final FunctionNode op) { + super(op); + } /** This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2013-05-20 19:39:03
|
Revision: 7152 http://bigdata.svn.sourceforge.net/bigdata/?rev=7152&view=rev Author: thompsonbry Date: 2013-05-20 19:38:56 +0000 (Mon, 20 May 2013) Log Message: ----------- refactored the clock skew assertions and made them more permissive. Modified Paths: -------------- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-20 17:32:22 UTC (rev 7151) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2013-05-20 19:38:56 UTC (rev 7152) @@ -1759,6 +1759,67 @@ // } /** + * Assert that <code>t1</code> LT <code>t2</code>, where <code>t1</code> and + * <code>t2</code> are timestamps obtain such that this relation will be + * <code>true</code> if the clocks on the nodes are synchronized. + * <p> + * Note: Clock synchronization errors can arise across nodes if the nodes + * are not using a common network time source. + * <p> + * Note: Synchronization errors can arise on a single node if the clock is + * changed on that node - specifically if the clock is move backwards to + * before the most recent commit timestamp. For example, if the timezone is + * changed. + * + * @param serviceId1 + * The service that reported the timestamp <code>t1</code>. + * @param serviceId2 + * The service that reported the timestamp <code>t2</code>. + * @param t1 + * A timestamp from one service. + * @param t2 + * A timestamp from the another service. + * + * @throws ClocksNotSynchronizedException + */ + protected void assertBefore(final UUID serviceId1, final UUID serviceId2, + final long t1, final long t2) throws ClocksNotSynchronizedException { + + // Maximum allowed clock skew. + final long maxSkew = getMaximumClockSkewMillis(); + + final long delta = Math.abs(t1 - t2); + + if (delta < maxSkew) + return; + + throw new ClocksNotSynchronizedException("service1=" + serviceId1 + + ", serviceId2=" + serviceId2 + ", skew=" + delta + + "ms exceeds maximumSkew=" + maxSkew + "ms."); + + } + + /** + * The maximum error allowed (milliseconds) in the clocks. This is used by + * {@link #assertBefore(UUID, UUID, long, long)} to verify that the clocks + * are within some acceptable skew of one another. It is also used by + * {@link #nextCommitTimestamp()} where it specifies the maximum clock skew + * that will be corrected without operator intervention. + * + * @see #assertBefore(UUID, UUID, long, long) + * + * FIXME HA TXS : Configuration Option. Note: This is not just an HA + * issue. We also need to be able to override this in order to write on + * a journal if the local clock is wildly different from the clock on + * the machine where the journal was produced. + */ + protected long getMaximumClockSkewMillis() { + + return 5000; + + } + + /** * The HA timeout in milliseconds for a 2-phase prepare. * * @throws UnsupportedOperationException @@ -2885,7 +2946,7 @@ * Note: delta is in ms. */ final long delta = Math.abs(t - lastCommitTime); - if (delta > 5000/* ms */) + if (delta > getMaximumClockSkewMillis()/* ms */) throw new ClocksNotSynchronizedException("Clocks off by " + delta + " ms: lastCommitTime=" + lastCommitTime + ", but localTimestamp=" + t); Modified: branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java 2013-05-20 17:32:22 UTC (rev 7151) +++ branches/READ_CACHE/bigdata/src/java/com/bigdata/journal/Journal.java 2013-05-20 19:38:56 UTC (rev 7152) @@ -95,7 +95,6 @@ import com.bigdata.service.IBigdataFederation; import com.bigdata.sparse.GlobalRowStoreHelper; import com.bigdata.sparse.SparseRowStore; -import com.bigdata.util.ClocksNotSynchronizedException; import com.bigdata.util.InnerCause; import com.bigdata.util.concurrent.DaemonThreadFactory; import com.bigdata.util.concurrent.LatchedExecutor; @@ -357,6 +356,11 @@ final private long timestampOnLeader; /** + * The {@link UUID} of the quorum leader. + */ + final private UUID leaderId; + + /** * This is the earliest visible commit point on the leader. */ final private IHANotifyReleaseTimeRequest leadersValue; @@ -446,9 +450,11 @@ quorumService = getQuorum().getClient(); this.joinedServiceIds = joinedServiceIds; + + this.leaderId = quorumService.getServiceId(); leadersValue = ((InnerJournalTransactionService) getTransactionService()) - .newHANotifyReleaseTimeRequest(quorumService.getServiceId()); + .newHANotifyReleaseTimeRequest(leaderId); // Note: Local method call. timestampOnLeader = leadersValue.getTimestamp(); @@ -481,8 +487,8 @@ // This is the timestamp for right now. final long timeNow = newConsensusProtocolTimestamp(); - // The local clock must be moving forward. - assertBefore(timeLeader, timeNow); +// // The local clock must be moving forward. +// assertBefore(timeLeader, timeNow); // Start with the leader's value (from ctor). minimumResponse = leadersValue; @@ -492,6 +498,8 @@ if (log.isTraceEnabled()) log.trace("follower: " + response); + final UUID followerId = response.getServiceUUID(); + if (minimumResponse.getPinnedCommitCounter() > response .getPinnedCommitCounter()) { @@ -503,14 +511,16 @@ * Verify that the timestamp from the ctor is BEFORE the * timestamp assigned by the follower in the GatherTask. */ - assertBefore(timeLeader, response.getTimestamp()); + assertBefore(leaderId, followerId, timeLeader, + response.getTimestamp()); /* * Verify that the timestamp from the GatherTask on the follower * is before the timestamp obtained at the top of this run() * method. */ - assertBefore(response.getTimestamp(), timeNow); + assertBefore(followerId, leaderId, response.getTimestamp(), + timeNow); } @@ -968,32 +978,7 @@ } -// /** -// * The maximum error allowed (milliseconds) in the clocks. -// */ -// private static final long epsilon = 3; - /** - * Assert that t1 LT t2. - * - * @param t1 - * A timestamp from one service. - * @param t2 - * A timestamp from another service. - * - * @throws ClocksNotSynchronizedException - */ - private void assertBefore(final long t1, final long t2) - throws ClocksNotSynchronizedException { - - if (t1 < t2) - return; - - throw new ClocksNotSynchronizedException(); - - } - - /** * Note: This deliberately uses the (non-remote) method * {@link BasicHA#nextTimestamp()}. This is done so we can write a unit test * of the {@link GatherTask} that imposes clock skew by overridding the next This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |