This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <tob...@us...> - 2014-04-21 21:04:56
|
Revision: 8134 http://sourceforge.net/p/bigdata/code/8134 Author: tobycraig Date: 2014-04-21 21:04:52 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Fixed error messages not being displayed Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 20:59:35 UTC (rev 8133) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-21 21:04:52 UTC (rev 8134) @@ -170,7 +170,7 @@ data: data, contentType: 'application/xml', success: function() { input.val(''); getNamespaces(); }, - error: function(jqXHR, textStatus, errorThrown) { alert(errorThrown); } + error: function(jqXHR, textStatus, errorThrown) { alert(jqXHR.statusText); } }; $.ajax('/bigdata/namespace', settings); } @@ -435,7 +435,7 @@ function updateResponseError(jqXHR, textStatus, errorThrown) { $('#load-response, #load-clear').show(); - $('#load-response pre').text('Error! ' + textStatus + ' ' + errorThrown); + $('#load-response pre').text('Error! ' + textStatus + ' ' + jqXHR.statusText); } @@ -536,7 +536,7 @@ }); function downloadRDFError(jqXHR, textStatus, errorThrown) { - alert(errorThrown); + alert(jqXHR.statusText); } function exportXML(filename) { @@ -698,7 +698,7 @@ function queryResultsError(jqXHR, textStatus, errorThrown) { $('#query-response, #query-tab .bottom *').show(); - $('#query-response').text('Error! ' + textStatus + ' ' + errorThrown); + $('#query-response').text('Error! ' + textStatus + ' ' + jqXHR.statusText); } /* Pagination */ @@ -1000,7 +1000,7 @@ function updateExploreError(jqXHR, textStatus, errorThrown) { $('#explore-tab .bottom').show(); $('#explore-results .box').html('').hide(); - $('#explore-header').text('Error! ' + textStatus + ' ' + errorThrown); + $('#explore-header').text('Error! ' + textStatus + ' ' + jqXHR.statusText); $('#explore-results, #explore-header').show(); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-21 20:59:42
|
Revision: 8133 http://sourceforge.net/p/bigdata/code/8133 Author: thompsonbry Date: 2014-04-21 20:59:35 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Merging in changes from the main development branch into the RDR branch. {{{ merge https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0 /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java R /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java R /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties R /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/disco G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/disco --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/attr G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/attr --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/util/config G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/util/config --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/lubm G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/lubm --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/uniprot/src G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/uniprot/src --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/uniprot G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/uniprot --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/btc/src/resources G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/btc/src/resources --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/btc G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf/btc --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-perf --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/HAJournal/HAJournal.config --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/bin/config G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/bin/config --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/bin/startHAServices A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/bin/HARestore A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/etc/default A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/etc/default/bigdataHA A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/etc/default/bigdata U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/etc/init.d/bigdataHA D /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/src/resources/etc/bigdata --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/lib/jetty G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/lib/jetty --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/bop/joinGraph G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/bop/joinGraph --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/bop/util G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/bop/util --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/jsr166 G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/jsr166 --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/cache/StressTestGlobalLRU.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/util/httpd G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/test/com/bigdata/util/httpd --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/search/FullTextIndex.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/search/ReadIndexTask.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/aggregate G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/aggregate --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/util G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/util --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/joinGraph G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/joinGraph --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/jsr166 G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/jsr166 --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/btree/AbstractBTree.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/btree/PageStats.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/rwstore/RWStore.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/htree/raba G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/htree/raba --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/journal/DumpJournal.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/journal/WORMStrategy.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/journal/RWStrategy.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/journal/AbstractJournal.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/java/com/bigdata/util/CSVReader.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/build.xml --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/osgi G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/osgi --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-compatibility G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-compatibility --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/bench --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/StressTest_ClosedByInterrupt_RW.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/bench --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.srx A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.srx A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.trig A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.rq U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.ttl A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.ttl A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.srx A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.rq U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnions.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.rq A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.trig A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874b.rq U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAggregationQuery.java A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.srx A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq A /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.rq --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/internal G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/internal --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/relation G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/test/com/bigdata/rdf/relation --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/changesets G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/changesets --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/error G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/error --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java U /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/internal G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/internal --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/relation G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/relation --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/util G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/java/com/bigdata/rdf/util --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/samples G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-rdf/src/samples --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/LEGAL G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/LEGAL --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/lib G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/lib --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test/it/unimi/dsi G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test/it/unimi/dsi --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test/it/unimi G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test/it/unimi --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/test --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java/it/unimi G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java/it/unimi --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java/it G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java/it --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src/java --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils/src --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/dsi-utils --- Merging r7913 through r8131 into /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE G /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE Merge complete. ===== File Statistics: ===== Deleted: 4 Added: 74 Updated: 54 ==== Property Statistics: ===== Merged: 50 Updated: 1 ==== Conflict Statistics: ===== File conflicts: 5 }}} I am working to reconcile the conflicts and validate the merge... ---- The file conflicts were: {{{ C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java C /Users/bryan/Documents/workspace/RDR_CLEAN_FOR_MERGE/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java }}} HAJournalServer: Resolved conflict in getNSSPort(). AbstractHAJournalServerTestCase: Resolved conflict in generation of the URL of the end point. MultiTenancyServlet: Resolved a conflict around transaction protection for the description of the graphs and the refactor to parameterize the graph description request in support of the new workbench. BigdataRDFContext: Conflict on imports. getKBInfo() was removed in the main branch. RemoteRepository: Resolved conflicts around use of log.warn() for IOException (versus ignoring the problem). ---- I have run through the NSS test suite, the AST test suite, and the QUADS mode SAIL test suite. Things look good. Committing to CI. See #526 (RDR) Revision Links: -------------- http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 http://sourceforge.net/p/bigdata/code/7913 http://sourceforge.net/p/bigdata/code/8131 Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java branches/RDR/bigdata/src/java/com/bigdata/bop/solutions/MemoryGroupByOp.java branches/RDR/bigdata/src/java/com/bigdata/btree/AbstractBTree.java branches/RDR/bigdata/src/java/com/bigdata/btree/PageStats.java branches/RDR/bigdata/src/java/com/bigdata/btree/data/DefaultNodeCoder.java branches/RDR/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java branches/RDR/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java branches/RDR/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/RDR/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/RDR/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/RDR/bigdata/src/java/com/bigdata/journal/DumpJournal.java branches/RDR/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/RDR/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/RDR/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java branches/RDR/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/RDR/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/RDR/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/RDR/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/RDR/bigdata/src/java/com/bigdata/search/ReadIndexTask.java branches/RDR/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java branches/RDR/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java branches/RDR/bigdata/src/java/com/bigdata/sparse/GlobalRowStoreHelper.java branches/RDR/bigdata/src/java/com/bigdata/util/CSVReader.java branches/RDR/bigdata/src/test/com/bigdata/btree/data/AbstractNodeDataRecordTestCase.java branches/RDR/bigdata/src/test/com/bigdata/cache/StressTestGlobalLRU.java branches/RDR/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/RDR/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/RDR/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java branches/RDR/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java branches/RDR/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTEvalHelper.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestAggregationQuery.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnions.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFContext.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/BigdataRDFServletContextListener.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/RDR/build.xml branches/RDR/src/resources/HAJournal/HAJournal.config branches/RDR/src/resources/bin/startHAServices branches/RDR/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHANamespace.java branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_distinct_emptyResult.trig branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/count_emptyResult.trig branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.ttl branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.rq branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.srx branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.ttl branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874b.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/StressTest_ClosedByInterrupt_RW.java branches/RDR/src/resources/bin/HARestore branches/RDR/src/resources/etc/default/ branches/RDR/src/resources/etc/default/bigdata/ branches/RDR/src/resources/etc/default/bigdataHA Removed Paths: ------------- branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/RDR/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties branches/RDR/src/resources/etc/bigdata/ branches/RDR/src/resources/etc/default/bigdata/ branches/RDR/src/resources/etc/default/bigdataHA Property Changed: ---------------- branches/RDR/bigdata/lib/jetty/ branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate/ branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/java/com/bigdata/bop/util/ branches/RDR/bigdata/src/java/com/bigdata/htree/raba/ branches/RDR/bigdata/src/java/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/RDR/bigdata/src/test/com/bigdata/bop/util/ branches/RDR/bigdata/src/test/com/bigdata/jsr166/ branches/RDR/bigdata/src/test/com/bigdata/util/httpd/ branches/RDR/bigdata-compatibility/ branches/RDR/bigdata-jini/src/java/com/bigdata/attr/ branches/RDR/bigdata-jini/src/java/com/bigdata/disco/ branches/RDR/bigdata-jini/src/java/com/bigdata/util/config/ branches/RDR/bigdata-jini/src/test/com/bigdata/journal/jini/ha/ branches/RDR/bigdata-perf/ branches/RDR/bigdata-perf/btc/ branches/RDR/bigdata-perf/btc/src/resources/ branches/RDR/bigdata-perf/lubm/ branches/RDR/bigdata-perf/uniprot/ branches/RDR/bigdata-perf/uniprot/src/ branches/RDR/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/RDR/bigdata-rdf/src/samples/ branches/RDR/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/RDR/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/RDR/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/RDR/dsi-utils/ branches/RDR/dsi-utils/LEGAL/ branches/RDR/dsi-utils/lib/ branches/RDR/dsi-utils/src/ branches/RDR/dsi-utils/src/java/ branches/RDR/dsi-utils/src/java/it/ branches/RDR/dsi-utils/src/java/it/unimi/ branches/RDR/dsi-utils/src/test/ branches/RDR/dsi-utils/src/test/it/unimi/ branches/RDR/dsi-utils/src/test/it/unimi/dsi/ branches/RDR/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/RDR/osgi/ branches/RDR/src/resources/bin/config/ Index: branches/RDR/bigdata/lib/jetty =================================================================== --- branches/RDR/bigdata/lib/jetty 2014-04-21 19:15:16 UTC (rev 8132) +++ branches/RDR/bigdata/lib/jetty 2014-04-21 20:59:35 UTC (rev 8133) Property changes on: branches/RDR/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo ## -1,6 +1,7 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7665-7913 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:7665-8131 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-21 19:15:16 UTC (rev 8132) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-21 20:59:35 UTC (rev 8133) Property changes on: branches/RDR/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,6 +1,7 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 -/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7665-7913 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7665-8131 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-04-21 19:15:16 UTC (rev 8132) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/engine/QueryEngine.java 2014-04-21 20:59:35 UTC (rev 8133) @@ -610,7 +610,7 @@ * <p> * If the deadline has expired, {@link IRunningQuery#cancel(boolean)} will * be invoked. In order for a compute bound operator to terminate in a - * timely fashion, it MUST periodically test {@link Thread#isInterrupted()}. + * timely fashion, it MUST periodically test {@link Thread#interrupted()}. * <p> * Note: The deadline of a query may be set at most once. Thus, a query * which is entered into the {@link #deadlineQueue} may not have its Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2014-04-21 19:15:16 UTC (rev 8132) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/join/PipelineJoin.java 2014-04-21 20:59:35 UTC (rev 8133) @@ -1834,7 +1834,7 @@ if (bindex++ % 50 == 0) { // Periodically check for an interrupt. - if (Thread.currentThread().isInterrupted()) + if (Thread.interrupted()) throw new InterruptedException(); } Index: branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-21 19:15:16 UTC (rev 8132) +++ branches/RDR/bigdata/src/java/com/bigdata/b... [truncated message content] |
From: <tho...@us...> - 2014-04-21 19:15:20
|
Revision: 8132 http://sourceforge.net/p/bigdata/code/8132 Author: thompsonbry Date: 2014-04-21 19:15:16 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Updated version of the HALoadBalancer. See #624 (HA Load Balancer). Modified Paths: -------------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/IHALoadBalancerPolicy.java Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-04-21 16:35:35 UTC (rev 8131) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/HALoadBalancerServlet.java 2014-04-21 19:15:16 UTC (rev 8132) @@ -23,10 +23,17 @@ package com.bigdata.rdf.sail.webapp; import java.io.IOException; +import java.lang.ref.WeakReference; import java.net.URI; import java.util.Arrays; import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; import java.util.UUID; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import javax.servlet.ServletConfig; @@ -38,8 +45,10 @@ import org.apache.log4j.Logger; import org.eclipse.jetty.proxy.ProxyServlet; +import com.bigdata.counters.AbstractStatisticsCollector; import com.bigdata.ganglia.GangliaService; import com.bigdata.ganglia.HostReportComparator; +import com.bigdata.ganglia.IGangliaMetricMessage; import com.bigdata.ganglia.IHostReport; import com.bigdata.ha.HAGlue; import com.bigdata.ha.QuorumService; @@ -47,46 +56,61 @@ import com.bigdata.journal.IIndexManager; import com.bigdata.journal.PlatformStatsPlugIn; import com.bigdata.journal.jini.ha.HAJournal; +import com.bigdata.journal.jini.ha.HAJournalServer; +import com.bigdata.quorum.AbstractQuorum; import com.bigdata.quorum.Quorum; +import com.bigdata.quorum.QuorumEvent; +import com.bigdata.quorum.QuorumListener; +import com.bigdata.util.InnerCause; +import com.sun.corba.se.impl.orbutil.closure.Future; /** - * - The HA Load Balancer servlet provides a transparent proxy for requests + * The HA Load Balancer servlet provides a transparent proxy for requests * arriving its configured URL pattern (the "external" interface for the load * balancer) to the root of the web application. - * <P> - * The use of the load balancer is entirely optional. If the security rules - * permit, then clients MAY make requests directly against a specific service. - * Thus, no specific provision exists to disable the load balancer servlet, but - * you may choose not to deploy it. * <p> * When successfully deployed, requests having prefix corresponding to the URL * pattern for the load balancer (typically, "/bigdata/LBS/*") are automatically * redirected to a joined service in the met quorum based on the configured load * balancer policy. * <p> + * The use of the load balancer is entirely optional. If the load balancer is + * not properly configured, then it will simply rewrite itself out of any + * request and the request will be handled by the host to which it was directed + * (no proxying). + * <p> + * Note: If the security rules permit, then clients MAY make requests directly + * against a specific service. + * <p> * The load balancer policies are "HA aware." They will always redirect update - * requests to the quorum leader. The default polices will load balance read - * requests over the leader and followers in a manner that reflects the CPU, IO - * Wait, and GC Time associated with each service. The PlatformStatsPlugIn and - * GangliaPlugIn MUST be enabled for the default load balancer policy to - * operate. It depends on those plugins to maintain a model of the load on the - * HA replication cluster. The GangliaPlugIn should be run only as a listener if - * you are are running the real gmond process on the host. If you are not - * running gmond, then the GangliaPlugIn should be configured as both a listener - * and a sender. + * requests to the quorum leader. Read requests will be directed to one of the + * services that is joined with the met quorum. + * <p> * + * <h3>Default Load Balancer Policy Configuration</h3> + * <p> + * The default policy will load balance read requests over the leader and + * followers in a manner that reflects the CPU, IO Wait, and GC Time associated + * with each service. + * <p> + * The {@link PlatformStatsPlugIn}\xCA and {@link GangliaPlugIn} MUST be enabled + * for the default load balancer policy to operate. It depends on those plugins + * to maintain a model of the load on the HA replication cluster. The + * GangliaPlugIn should be run only as a listener if you are are running the + * real gmond process on the host. If you are not running gmond, then the + * {@link GangliaPlugIn} should be configured as both a listener and a sender. + * <p> + * <ul> + * <li>The {@link PlatformStatsPlugIn} must be enabled.</li>. + * <li>The {@link GangliaPlugIn} must be enabled. The service does not need to + * be enabled for {@link GangliaPlugIn.Options#GANGLIA_REPORT}, but it must be + * enabled for {@link GangliaPlugIn.Options#GANGLIA_LISTEN}. + * </ul> + * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> * * @see <a href="http://trac.bigdata.com/ticket/624"> HA Load Balancer </a> * - * TODO Define some interesting load balancer policies. We can start with - * HA aware round robin and an HA aware policy that is load balanced based - * on the ganglia reported metrics model. - * - * All policies must be HA aware - we only want to send requests to - * services that are joined with the met quorum. - * * TODO If the target service winds up not joined with the met quorum by * the time we get there, what should it do? Report an error since we are * already on its internal interface? Will this servlet see that error? If @@ -127,96 +151,132 @@ */ String POLICY = "policy"; - String DEFAULT_POLICY = DefaultLBSPolicy.class.getName(); - /** - * A {@link Comparator} that places {@link IHostReport}s into a total - * ordering from the host with the least load to the host with the - * greatest load (optional). + * FIXME The default must be something that we can override from the + * test suite in order to test these different policies. I've added some + * code to do this based on a System property, but the test suite does + * not allow us to set arbitrary system properties on the child + * processes so that code is not yet having the desired effect. */ - String COMPARATOR = "comparator"; - - String DEFAULT_COMPARATOR = DefaultHostReportComparator.class.getName(); - +// String DEFAULT_POLICY = RoundRobinPolicy.class.getName(); + String DEFAULT_POLICY = NOPLBSPolicy.class.getName(); +// String DEFAULT_POLICY = GangliaLBSPolicy.class.getName(); + } public HALoadBalancerServlet() { super(); } +// /** +// * This servlet request attribute is used to mark a request as either an +// * update or a read-only operation. +// */ +// protected static final String ATTR_LBS_UPDATE_REQUEST = "lbs-update-request"; + + /** + * If the LBS is not enabled, then it will strip its prefix from the URL + * requestURI and do a servlet forward to the resulting requestURI. This + * allows the webapp to start even if the LBS is not correctly configured. + */ private boolean enabled = false; private String prefix = null; private IHALoadBalancerPolicy policy; - private Comparator<IHostReport> comparator; - private GangliaService gangliaService; - private String[] reportOn; - @SuppressWarnings("unchecked") @Override public void init() throws ServletException { super.init(); + // Disabled by default. + enabled = false; + final ServletConfig servletConfig = getServletConfig(); final ServletContext servletContext = servletConfig.getServletContext(); - prefix = servletConfig.getInitParameter(InitParams.PREFIX); - - policy = newInstance(servletConfig, IHALoadBalancerPolicy.class, - InitParams.POLICY, InitParams.DEFAULT_POLICY); - - comparator = newInstance(servletConfig, Comparator.class, - InitParams.COMPARATOR, InitParams.DEFAULT_COMPARATOR); - final IIndexManager indexManager = BigdataServlet .getIndexManager(servletContext); if (!(indexManager instanceof HAJournal)) { - throw new ServletException("Not HA"); + // This is not an error, but the LBS is only for HA. + log.warn("Not HA"); + return; } - final HAJournal journal = (HAJournal) indexManager; + prefix = servletConfig.getInitParameter(InitParams.PREFIX); - if (journal.getPlatformStatisticsCollector() == null) { - throw new ServletException("LBS requires " - + PlatformStatsPlugIn.class.getName()); - } + policy = newInstance(servletConfig, IHALoadBalancerPolicy.class, + InitParams.POLICY, InitParams.DEFAULT_POLICY); - gangliaService = (GangliaService) journal.getGangliaService(); + try { - if (gangliaService == null) { - throw new ServletException("LBS requires " - + GangliaPlugIn.class.getName()); + // Attempt to provision the specified LBS policy. + policy.init(servletConfig, indexManager); + + } catch (Throwable t) { + + /* + * The specified LBS policy could not be provisioned. + */ + + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + // Interrupted. + return; + } + + log.error("Could not setup policy: " + policy, t); + + try { + policy.destroy(); + } catch (Throwable t2) { + if (InnerCause.isInnerCause(t, InterruptedException.class)) { + // Interrupted. + return; + } + log.warn("Problem destroying policy: " + policy, t2); + } finally { + policy = null; + } + + /* + * Fall back onto a NOP policy. Each service will handle a + * read-request itself. Write requests are proxied to the quorum + * leader. + */ + + policy = new NOPLBSPolicy(); + + log.warn("Falling back: policy=" + policy); + + // Initialize the fallback policy. + policy.init(servletConfig, indexManager); + } - reportOn = gangliaService.getDefaultHostReportOn(); - enabled = true; servletContext.setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX, prefix); if (log.isInfoEnabled()) - log.info(servletConfig.getServletName() + " @ " + prefix); + log.info(servletConfig.getServletName() + " @ " + prefix + + " :: policy=" + policy); } @Override public void destroy() { - + enabled = false; prefix = null; - policy = null; + if (policy != null) { + policy.destroy(); + policy = null; + } - comparator = null; - - reportOn = null; - - gangliaService = null; - getServletContext().setAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX, null); @@ -225,7 +285,55 @@ } /** + * Return the configured value of the named parameter. This method checks + * the environment variables first for a fully qualified value for the + * parameter using <code>HALoadBalancerServer</code><i>name</i>. If no value + * is found for that variable, it checks the {@link ServletContext} for + * <i>name</i>. If no value is found again, it returns the default value + * specified by the caller. This makes it possible to configure the behavior + * of the {@link HALoadBalancerServlet} using environment variables. + * + * @param servletConfig + * The {@link ServletConfig}. + * + * @param iface + * The interface that the type must implement. + * @param name + * The name of the servlet init parameter. + * @param def + * The default value for the servlet init parameter. + * @return + */ + private static String getConfigParam(final ServletConfig servletConfig, + final String name, final String def) { + + // Look at environment variables for an override. + String s = System.getProperty(HALoadBalancerServlet.class.getName() + + "." + name); + + if (s == null || s.trim().length() == 0) { + + // Look at ServletConfig for the configured value. + s = servletConfig.getInitParameter(name); + + } + + if (s == null || s.trim().length() == 0) { + + // Use the default value. + s = def; + + } + + return s; + + } + + /** * Create an instance of some type based on the servlet init parameters. + * <p> + * Note: The configuration parameter MAY also be specified as <code> + * com.bigdata.rdf.sail.webapp.HALoadBalancerServlet.<i>name</i></code>. * * @param servletConfig * The {@link ServletConfig}. @@ -246,16 +354,9 @@ final Class<? extends T> iface, final String name, final String def) throws ServletException { + final String s = getConfigParam(servletConfig, name, def); + final T t; - - String s = servletConfig.getInitParameter(name); - - if (s == null || s.trim().length() == 0) { - - s = def; - - } - final Class<? extends T> cls; try { cls = (Class<? extends T>) Class.forName(s); @@ -285,230 +386,130 @@ IOException { if (!enabled) { - // The LBS is not available. - response.sendError(HttpServletResponse.SC_NOT_FOUND); + /* + * LBS is disabled. Strip LBS prefix from the requestURI and forward + * the request to servlet on this host (NOP LBS). + */ + forwardToThisService(request, response); + return; } - - final HostScore[] hosts = hostTable.get(); - if (hosts == null || hosts.length == 0) { + /* + * Decide whether this is a read-only request or an update request. + */ + final boolean isUpdate = isUpdateRequest(request); - // Ensure that the host table exists. - updateHostsTable(); +// // Set the request attribute. +// request.setAttribute(ATTR_LBS_UPDATE_REQUEST, isUpdate); + + /* + * Delegate to policy. This provides a single point during which the + * policy can ensure that it is monitoring any necessary information and + * also provides an opportunity to override the behavior completely. For + * example, as an optimization, the policy can forward the request to a + * servlet in this servlet container rather than proxying it to either + * itself or another service. + */ + if(policy.service(isUpdate, request, response)) { - } - - final HAGlueScore[] services = serviceTable.get(); - - if (services == null || services.length == 0) { - - /* - * Ensure that the service table exists (more correctly, attempt to - * populate it, but we can only do that if the HAQuorumService is - * running.) - */ - - updateServicesTable(); + // Return immediately if the response was committed. + return; } - + /* * TODO if rewriteURL() returns null, then the base class (ProxyServlet) * returns SC_FORBIDDEN. It should return something less ominous, like a - * 404. With an explanation. Or a RETRY. + * 404. With an explanation. Or a RETRY. Or just forward to the local + * service and let it report an appropriate error message (e.g., + * NotReady). */ super.service(request, response); } /** - * Update the per-host scoring table. + * Strip off the <code>/LBS</code> prefix from the requestURI and forward + * the request to the servlet at the resulting requestURI. This forwarding + * effectively disables the LBS but still allows requests which target the + * LBS to succeed against the webapp on the same host. * - * @see #hostTable + * @param request + * The request. + * @param response + * The response. * - * FIXME This MUST be updated on a periodic basis. We can probably - * query the gangliaService to figure out how often it gets updates, or - * we can do this every 5 seconds or so (the ganglia updates are not - * synchronized across a cluster - they just pour in). - * - * TODO For scalability on clusters with a lot of ganglia chatter, we - * should only keep the data from those hosts that are of interest for - * a given HA replication cluster. The load on other hosts has no - * impact on our decision when load balancing within an HA replication - * cluster. + * @throws IOException + * @throws ServletException */ - private void updateHostsTable() { + static protected void forwardToThisService( + final HttpServletRequest request, // + final HttpServletResponse response// + ) throws IOException, ServletException { - /* - * Note: If there is more than one service on the same host, then we - * will have one record per host, not per service. - * - * Note: The actual metrics that are available depend on the OS and on - * whether you are running gmond or having the GangliaPlugIn do its own - * reporting. The policy that ranks the host reports should be robust to - * these variations. - */ - final IHostReport[] hostReport = gangliaService.getHostReport(// - reportOn,// metrics to be reported. - comparator// imposes order on the host reports. - ); + final String path = request.getRequestURI(); - log.warn("hostReport=" + Arrays.toString(hostReport)); + // The prefix for the LBS servlet. + final String prefix = (String) request.getServletContext() + .getAttribute(BigdataServlet.ATTRIBUTE_LBS_PREFIX); - final HostScore[] scores = new HostScore[hostReport.length]; + if (prefix == null) { + // LBS is not running / destroyed. + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + return; + } - for (int i = 0; i < hostReport.length; i++) { - - final IHostReport r = hostReport[i]; + if (!path.startsWith(prefix)) { + // Request should not have reached the LBS. + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + return; + } + // what remains after we strip off the LBS prefix. + final String rest = path.substring(prefix.length()); + + // build up path w/o LBS prefix. + final StringBuilder uri = new StringBuilder(); + + if (!rest.startsWith("/")) { /* - * TODO This is ignoring the metrics for the host and weighting all - * hosts equally. + * The new path must start with '/' and is relative to this + * ServletContext. */ - scores[i++] = new HostScore(r.getHostName(), 1.0, - (double) hostReport.length); + uri.append("/"); + } - } + // append the remainder of the original requestURI + uri.append(rest); - // sort into ascending order (increasing activity). - Arrays.sort(scores); +// // append the query parameters (if any). +// final String query = request.getQueryString(); +// if (query != null) +// uri.append("?").append(query); - for (int i = 0; i < scores.length; i++) { - - scores[i].rank = i; - - scores[i].drank = ((double) i) / scores.length; - - } - - if (log.isDebugEnabled()) { - - log.debug("The most active index was: " + scores[scores.length - 1]); - - log.debug("The least active index was: " + scores[0]); - - } - - this.hostTable.set(scores); - - } - - /** - * Update the per-service table. - * - * @see #serviceTable - * - * FIXME This MUST be maintained by appropriate watchers such that we - * just consult the as maintained information and act immediately on - * it. We can not afford any latency for RMI or even figuring out which - * the host has the least load. That should all be maintained by a - * scheduled thread and listeners. - */ - private void updateServicesTable() { - - final ServletContext servletContext = getServletContext(); - - final HAJournal journal = (HAJournal) BigdataServlet - .getIndexManager(servletContext); - - final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); - + // The new path. + final String newPath = uri.toString(); + /* - * Note: This is the *local* HAGlueService. - * - * This page must be robust to some new failure modes. The ZooKeeper - * client can now be associated with an expired session, River discovery - * can now be disabled, and the HAQuorumService might not be available - * from quorum.getClient(). All of those things can happen if there is a - * zookeeper session expiration that forces us to terminate the - * HAQuorumService. This condition will be cured automatically (unless - * the service is being shutdown), but only limited status information - * can be provided while the HAQuorumService is not running. + * Forward the request to this servlet container now that we have + * stripped off the prefix for the LBS. */ - final QuorumService<HAGlue> quorumService; - { - QuorumService<HAGlue> t; - try { - t = (QuorumService) quorum.getClient(); - } catch (IllegalStateException ex) { - // Note: Not available (quorum.start() not called). - return; - } - quorumService = t; - } - final UUID[] joined = quorum.getJoined(); - final HAGlueScore[] serviceScores = new HAGlueScore[joined.length]; + if (log.isInfoEnabled()) + log.info("forward: " + path + " => " + newPath); - for (int i = 0; i < joined.length; i++) { - final UUID serviceId = joined[i]; - try { + request.getRequestDispatcher(newPath).forward(request, response); - /* - * TODO Scan the existing table before doing an RMI to the - * service. We only need to do the RMI for a new service, not - * one in the table. - * - * TODO A services HashMap<UUID,HAGlueScore> would be much more - * efficient than a table. If we use a CHM, then we can do this - * purely asynchronously as the HAGlue services entire the set - * of joined services. - */ - serviceScores[i] = new HAGlueScore(servletContext, serviceId); - - } catch (RuntimeException ex) { - - /* - * Ignore. Might not be an HAGlue instance. - */ - - if (log.isInfoEnabled()) - log.info(ex, ex); - - continue; - - } - - } - - this.serviceTable.set(serviceScores); - } - - /* - * FIXME Choose among pre-computed and maintained proxy targets based on the - * LBS policy. - */ - private static final String _proxyTo = "http://localhost:8091/bigdata"; - - /** - * The table of pre-scored hosts. - * - * TODO There is an entry for all known hosts, but not all hosts are running - * service that we care about. So we have to run over the table, filtering - * for hosts that have services that we care about. - */ - private final AtomicReference<HostScore[]> hostTable = new AtomicReference<HostScore[]>( - null); /** - * This is the table of known services. We can scan the table for a service - * {@link UUID} and then forward a request to the pre-computed requestURL - * associated with that {@link UUID}. If the requestURL is <code>null</code> - * then we do not know how to reach that service and can not proxy the - * request. - */ - private final AtomicReference<HAGlueScore[]> serviceTable = new AtomicReference<HAGlueScore[]>( - null); - - /** * For update requests, rewrite the requestURL to the service that is the * quorum leader. For read requests, rewrite the requestURL to the service * having the least load. */ @Override - protected URI rewriteURI(final HttpServletRequest request) - { + protected URI rewriteURI(final HttpServletRequest request) { + final String path = request.getRequestURI(); if (!path.startsWith(prefix)) return null; @@ -517,10 +518,10 @@ final String proxyTo; if(isUpdate) { // Proxy to leader. - proxyTo = getLeaderURL(request); + proxyTo = policy.getLeaderURL(request); } else { // Proxy to any joined service. - proxyTo = getReaderURL(request); + proxyTo = policy.getReaderURL(request); } if (proxyTo == null) { // Could not rewrite. @@ -548,193 +549,1208 @@ } /** - * Return <code>true</code> iff this is an UPDATE request that must be - * proxied to the quorum leader. - * - * FIXME How do we identify "UPDATE" requests? DELETE and PUT are update - * requests, but POST is not always an UPDATE. It can also be used for - * QUERY. GET is never an UPDATE request, and that is what this is based on - * right now. + * TODO This offers an opportunity to handle a rewrite failure. It could be + * used to provide a default status code (e.g., 404 versus forbidden) or to + * forward the request to this server rather than proxying to another + * server. */ - private boolean isUpdateRequest(HttpServletRequest request) { + @Override + protected void onRewriteFailed(final HttpServletRequest request, + final HttpServletResponse response) throws IOException { - return !request.getMethod().equalsIgnoreCase("GET"); - + response.sendError(HttpServletResponse.SC_FORBIDDEN); + } - private String getLeaderURL(final HttpServletRequest request) { + /** + * Return <code>true</code> iff this is an UPDATE request that must be + * proxied to the quorum leader. A SPARQL QUERY + */ + private boolean isUpdateRequest(final HttpServletRequest request) { - final ServletContext servletContext = getServletContext(); + final boolean isGet = request.getMethod().equalsIgnoreCase("GET"); - final HAJournal journal = (HAJournal) BigdataServlet - .getIndexManager(servletContext); + if (isGet) { - final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal.getQuorum(); - - final UUID leaderId = quorum.getLeaderId(); + // GET is never an UPDATE request. + return false; - if (leaderId == null) { - // No quorum, so no leader. Can not proxy the request. - return null; } + + final String requestURI = request.getRequestURI(); - /* - * Scan the services table to locate the leader and then proxy the - * request to the pre-computed requestURL for the leader. If that - * requestURL is null then we do not know about a leader and can not - * proxy the request at this time. - */ + if (requestURI.endsWith("/sparql")) { - final HAGlueScore[] services = serviceTable.get(); - - if (services == null) { + /* + * SPARQL end point. + * + * @see QueryServlet#doPost() + */ - // No services. Can't proxy. - return null; + if ( request.getParameter(QueryServlet.ATTR_QUERY) != null || + RESTServlet.hasMimeType(request, + BigdataRDFServlet.MIME_SPARQL_QUERY) + ) { - } + /* + * QUERY against SPARQL end point using POST for visibility, not + * mutability. + */ - for (HAGlueScore s : services) { + return false; // idempotent query using POST. - if (s.serviceUUID.equals(leaderId)) { + } - // Found it. Proxy if the serviceURL is defined. - return s.requestURL; + if (request.getParameter(QueryServlet.ATTR_UUID) != null) { + return false; // UUID request with caching disabled. + + } else if (request.getParameter(QueryServlet.ATTR_ESTCARD) != null) { + + return false; // ESTCARD with caching defeated. + + } else if (request.getParameter(QueryServlet.ATTR_CONTEXTS) != null) { + + // Request for all contexts in the database. + return false; + } - + } - // Not found. Won't proxy. - return null; - + // Anything else must be proxied to the leader. + return true; + } + /** Place into descending order by load_one. */ + public static class DefaultHostReportComparator extends + HostReportComparator implements Comparator<IHostReport> { + + public DefaultHostReportComparator() { + super("load_one", true/* asc */); + } + + } + /** - * Return the requestURL to which we will proxy a read request. + * Abstract base class establishes a listener for quorum events, tracks the + * services that are members of the quorum, and caches metadata about those + * services (especially the requestURL at which they will respond). * - * @param request - * The request. + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> * - * @return The proxyTo URL -or- <code>null</code> if we could not find a - * service to which we could proxy this request. + * FIXME The {@link QuorumListener} is unregistered by + * {@link AbstractQuorum#terminate()}. This happens any time the + * {@link HAJournalServer} goes into the error state. When this + * occurs, we stop getting {@link QuorumEvent}s and the policy stops + * being responsive. We probably need to either NOT clear the quorum + * listener and/or add an event type that is sent when + * {@link Quorum#terminate()} is called. */ - private String getReaderURL(final HttpServletRequest request) { + abstract protected static class AbstractLBSPolicy implements + IHALoadBalancerPolicy, QuorumListener { - final HostScore[] hostScores = this.hostTable.get(); - - if (hostScores == null) { - // Can't proxy to anything. - return null; + public interface InitParams { + } - // Choose a host : TODO This is just a round robin over the hosts. - HostScore hostScore = null; - for (int i = 0; i < hostScores.length; i++) { + /** + * The {@link ServletContext#getContextPath()} is cached in + * {@link #init(ServletConfig, IIndexManager)}. + */ + private final AtomicReference<String> contextPath = new AtomicReference<String>(); + + /** + * A {@link WeakReference} to the {@link HAJournal} avoids pinning the + * {@link HAJournal}. + */ + protected final AtomicReference<WeakReference<HAJournal>> journalRef = new AtomicReference<WeakReference<HAJournal>>(); - final int hostIndex = (i + nextHost) % hostScores.length; + /** + * This is the table of known services. We can scan the table for a service + * {@link UUID} and then forward a request to the pre-computed requestURL + * associated with that {@link UUID}. If the requestURL is <code>null</code> + * then we do not know how to reach that service and can not proxy the + * request. + */ + protected final AtomicReference<HAGlueScore[]> serviceTable = new AtomicReference<HAGlueScore[]>( + null); - hostScore = hostScores[hostIndex]; + /** + * Return the cached reference to the {@link HAJournal}. + * + * @return The reference or <code>null</code> iff the reference has been + * cleared or has not yet been set. + */ + protected HAJournal getJournal() { - if (hostScore == null) - continue; + final WeakReference<HAJournal> ref = journalRef.get(); - nextHost = hostIndex + 1; + if (ref == null) + return null; + + return ref.get(); } + + @Override + public void destroy() { - if (hostScore == null) { + contextPath.set(null); - // No hosts. Can't proxy. - return null; + journalRef.set(null); + + serviceTable.set(null); } + + @Override + public void init(final ServletConfig servletConfig, + final IIndexManager indexManager) throws ServletException { - final HAGlueScore[] services = this.serviceTable.get(); + final ServletContext servletContext = servletConfig + .getServletContext(); - if (services == null) { + contextPath.set(servletContext.getContextPath()); - // No services. Can't proxy. - return null; + final HAJournal journal = (HAJournal) BigdataServlet + .getIndexManager(servletContext); + this.journalRef.set(new WeakReference<HAJournal>(journal)); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + quorum.addListener(this); + } - - /* - * Find a service on that host. - * - * TODO If none found, the try other hosts until we have tried each host - * once and then give up by returning null. This will require pushing - * down the service finder into a method that we call from the hosts - * loop. + + @Override + public boolean service(final boolean isUpdate, + final HttpServletRequest request, + final HttpServletResponse response) throws ServletException, + IOException { + + /* + * Figure out whether the quorum is met and if this is the quorum + * leader. + */ + final HAJournal journal = getJournal(); + Quorum<HAGlue, QuorumService<HAGlue>> quorum = null; + QuorumService<HAGlue> quorumService = null; + long token = Quorum.NO_QUORUM; // assume no quorum. + boolean isLeader = false; // assume false. + boolean isQuorumMet = false; // assume false. + if (journal != null) { + quorum = journal.getQuorum(); + if (quorum != null) { + try { + // Note: This is the *local* HAGlueService. + quorumService = (QuorumService) quorum.getClient(); + token = quorum.token(); + isLeader = quorumService.isLeader(token); + isQuorumMet = token != Quorum.NO_QUORUM; + } catch (IllegalStateException ex) { + // Note: Not available (quorum.start() not + // called). + } + } + } + + if ((isLeader && isUpdate) || !isQuorumMet) { + + /* + * (1) If this service is the leader and the request is an + * UPDATE, then we forward the request to the local service. It + * will handle the UPDATE request. + * + * (2) If the quorum is not met, then we forward the request to + * the local service. It will produce the appropriate error + * message. + * + * FIXME (3) For read-only requests, have a configurable + * preference to forward the request to this service unless + * either (a) there is a clear load imbalance. This will help to + * reduce the latency of the request. If HAProxy is being used + * to load balance over the readers, then we should have a high + * threshold before we send the request somewhere else. + * + * @see #forwardToThisService() + */ + forwardToThisService(request, response); + + // request was handled. + return true; + + } + + /* + * Hook the request to update the service/host tables if they are + * not yet defined. + */ + conditionallyUpdateServiceTable(); + + // request was not handled. + return false; + + } + + /** + * {@inheritDoc} + * <p> + * This implementation rewrites the requestURL such that the request + * will be proxied to the quorum leader. */ - for(HAGlueScore x : services) { + @Override + final public String getLeaderURL(final HttpServletRequest request) { + + final ServletContext servletContext = request.getServletContext(); + + final HAJournal journal = (HAJournal) BigdataServlet + .getIndexManager(servletContext); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + final UUID leaderId = quorum.getLeaderId(); + + if (leaderId == null) { + // No quorum, so no leader. Can not proxy the request. + return null; + } + + /* + * Scan the services table to locate the leader and then proxy the + * request to the pre-computed requestURL for the leader. If that + * requestURL is null then we do not know about a leader and can not + * proxy the request at this time. + */ + + final HAGlueScore[] services = serviceTable.get(); - if (x.hostname == null) { - // Can't use if no hostname. - continue; + if (services == null) { + + // No services. Can't proxy. + return null; + } - if (x.requestURL == null) { - // Can't use if no requestURL. - continue; + for (HAGlueScore s : services) { + + if (s.serviceUUID.equals(leaderId)) { + + // Found it. Proxy if the serviceURL is defined. + return s.requestURL; + + } + } + + // Not found. Won't proxy. + return null; - if (!x.hostname.equals(hostScore.hostname)) { - // This service is not on the host we are looking for. - continue; + } + + /** + * {@inheritDoc} + * <p> + * The services table is updated if a services joins or leaves the + * quorum. + */ + @Override + public void notify(final QuorumEvent e) { + switch(e.getEventType()) { + case SERVICE_JOIN: + case SERVICE_LEAVE: + updateServiceTable(); + break; } + } + + /** + * Conditionally update the {@link #serviceTable} iff it does not exist + * or is empty. + */ + protected void conditionallyUpdateServiceTable() { - return x.requestURL; - + final HAGlueScore[] services = serviceTable.get(); + + if (services == null || services.length == 0) { + + /* + * Ensure that the service table exists (more correctly, attempt + * to populate it, but we can only do that if the + * HAQuorumService is running.) + * + * FIXME This should be robust even when the HAQuorumService is + * not running. We do not want to be unable to proxy to another + * service just because this one is going through an error + * state. Would it make more sense to have a 2nd Quorum object + * for this purpose - one that is not started and stopped by the + * HAJournalServer? + * + * Note: Synchronization here is used to ensure only one thread + * runs this logic if the table does not exist and we get a + * barrage of requests. + */ + synchronized (serviceTable) { + + updateServiceTable(); + + } + + } + } + + /** + * Update the per-service table. + * + * @see #serviceTable + */ + protected void updateServiceTable() { - // No service found on that host. - return null; - + final HAJournal journal = getJournal(); + + final Quorum<HAGlue, QuorumService<HAGlue>> quorum = journal + .getQuorum(); + + final UUID[] joined = quorum.getJoined(); + + final HAGlueScore[] serviceScores = new HAGlueScore[joined.length]; + + for (int i = 0; i < joined.length; i++) { + + final UUID serviceId = joined[i]; + + try { + + /* + * TODO Scan the existing table before doing an RMI to the + * service. We only need to do the RMI for a new service, + * not one in the table. + * + * TODO A services HashMap<UUID,HAGlueScore> would be much + * more efficient than a table. If we use a CHM, then we can + * do this purely asynchronously as the HAGlue services + * entire the set of joined services. + */ + serviceScores[i] = new HAGlueScore(journal, + contextPath.get(), serviceId); + + } catch (RuntimeException ex) { + + /* + * Ignore. Might not be an HAGlue instance. + */ + + if (log.isInfoEnabled()) + log.info(ex, ex); + + continue; + + } + + } + + if (log.isInfoEnabled()) + log.info("Updated servicesTable: #services=" + + serviceScores.length); + + this.serviceTable.set(serviceScores); + + } + } - int nextHost = 0; + + /** + * This policy proxies all requests for update operations to the leader but + * forwards read requests to the local service. Thus, it does not provide a + * load balancing strategy, but it does allow update requests to be directed + * to any service in an non-HA aware manner. This policy can be combined + * with an external round-robin strategy to load balance the read-requests + * over the cluster. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * TODO A service that is not joined with the met quorum can not + * answer a read-request. In order to be generally useful (and not + * just as a debugging policy), we need to proxy a read-request when + * this service is not joined with the met quorum. If there is no + * met quorum, then we can just forward the request to the local + * service and it will report the NoQuorum error. + */ + public static class NOPLBSPolicy extends AbstractLBSPolicy { - /** Place into descending order by load_one. */ - public static class DefaultHostReportComparator extends - HostReportComparator implements Comparator<IHostReport> { + @Override + public boolean service(final boolean isUpdate, + final HttpServletRequest request, + final HttpServletResponse response) throws IOException, + ServletException { - public DefaultHostReportComparator() { - super("load_one", true/* asc */); + if (!isUpdate) { + + // Always handle read requests locally. + forwardToThisService(request, response); + + // Request was handled. + return true; + + } + + // Proxy update requests to the quorum leader. + return super.service(isUpdate, request, response); + } + /** + * Note: This method is not invoked. + */ + @Override + public String getReaderURL(final HttpServletRequest req) { + + throw new UnsupportedOperationException(); + + } + } /** - * Stochastically proxy the request to the services based on their load. + * Policy implements a round-robin over the services that are joined with + * the met quorum. * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> */ - public static class DefaultLBSPolicy implements IHALoadBalancerPolicy { + public static class RoundRobinPolicy extends AbstractLBSPolicy { + /** + * {@inheritDoc} + * <p> + * This imposes a round-robin policy over the discovered services. If + * the service is discovered and appears to be joined with the met + * quorum, then the request can be proxied to that service. + */ @Override - public String proxyTo(HttpServletRequest req) { - // TODO Auto-generated method stub - return null; + public String getReaderURL(final HttpServletRequest request) { + + final HAGlueScore[] serviceScores = this.serviceTable.get(); + + if (serviceScores == null) { + + // Nothing discovered. Can't proxy. + return null; + + } + + /* + * Choose a service. + * + * Note: This is a round robin over the services. Any service that + * is joined with the met quorum can be selected as a target for the + * read request. + * + * Note: The round-robin is atomic with respect to each request. The + * request obtains a starting point in the serviceScores[] and then + * finds the next service in that array using a round-robin. The + * [nextService] starting point is not updated until the round-robin + * is complete - this is necessary in order to avoid skipping over + * services that are being checked by a concurrent request. + * + * The [nextService] is updated only after the round-robin decision + * has been made. As soon as it has been updated, a new round-robin + * decision will be made with respect to the new value for + * [nextService] but any in-flight decisions will be made against + * the value of [nextService] that they observed on entry. + */ + + // The starting offset for the round-robin. + final long startIndex = nextService.longValue(); + + // The selected service. + HAGlueScore serviceScore = null; + + for (int i = 0; i < serviceScores.length; i++) { + + /* + * Find the next host index. + * + * Note: We need to ensure that the hostIndex stays in the legal + * range, even with concurrent requests and when wrapping around + * MAX_VALUE. + */ + final int hostIndex = (int) Math + .abs(((i + startIndex) % serviceScores.length)); + + serviceScore = serviceScores[hostIndex]; + + if (serviceScore == null) + continue; + + if (serviceScore.hostname == null) { + // Can't use if no hostname. + continue; + } + + if (serviceScore.requestURL == null) { + // Can't use if no requestURL. + continue; + } + + } + + // Bump the nextService counter. + nextService.incrementAndGet(); + + if (serviceScore == null) { + + // No service. Can't proxy. + return null; + + } + + return serviceScore.requestURL; + } - + + /** + * Note: This could be a hot spot. We can have concurrent requests and + * we need to increment this counter for each such request. + */ + private final AtomicLong nextService = new AtomicLong(0L); + } /** - * Always proxy the request to the local service even if it is not HA ready - * (this policy defeats the load balancer). + * Stochastically proxy the request to the services based on their load. + * <p> + * Note: This {@link IHALoadBalancerPolicy} has a dependency on the + * {@link GangliaPlugIn}. The {@link GangliaPlugIn} must be setup to listen + * to the Ganglia protocol and build up an in-memory model of the load on + * each host. Ganglia must be reporting metrics for each host running an + * {@link HAJournalServer} instance. This can be achieved either using the + * <code>gmond</code> utility from the ganglia distribution or using the + * {@link GangliaPlugIn}. * * @author <a href="mailto:tho...@us...">Bryan * Thompson</a> */ - public static class NOPLBSPolicy implements IHALoadBalancerPolicy { + public static class GangliaLBSPolicy extends AbstractLBSPolicy { + public interface InitParams extends AbstractLBSPolicy.InitParams { + +// /** +// * A {@link Comparator} that places {@link IHostReport}s into a +// * total ordering from the host with the least load to the host with +// * the greatest load (optional). +// */ +// String COMPARATOR = "comparator"; +// +// String DEFAULT_COMPARATOR = DefaultHostReportComparator.class +// .getName(); + + /** + * The {@link IHostScoringRule} that will be used to score the + * {@link IHostReport}s. The {@link IHostReport}s are obtained + * periodically from the {@link GangliaPlugIn}. The reports reflect + * the best local knowledge of the metrics on each of the hosts. The + * hosts will each self-report their metrics periodically using the + * ganglia protocol. + * <p> + * The purpose of the scoring rule is to compute a single workload + * number based on those host metrics. The resulting scores are then + * normalized. Load balancing decisions are made based on those + * normalized scores. + */ + String HOST_SCORING_RULE = "hostScoringRule"; + + String DEFAULT_HOST_SCORING_RULE = DefaultHostScoringRule.class + .getName(); + + } + + /** + * Interface for scoring the load on a host. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public interface IHostScoringRule { + + /** + * Return a score for the given {@link IHostReport}. + * + * @param hostReport + * The {@link IHostReport}. + * + * @return The score. + */ + public double getScore(final IHostReport hostReport); + + } + + /** + * Returns ONE for each host (all hosts appear to have an equal + * workload). + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + */ + public static class NOPHostScoringRule implements IHostScoringRule { + + @Override + public double getScore(final IHostReport hostReport) { + + return 1d; + + } + + } + + /** + * Best effort computation of a workload score based on CPU Utilization, + * IO Wait, and GC time. + * <p> + * Note: Not all platforms report all metrics. For example, OSX does not + * report IO Wait, which is a key metric for the workload of a database. + * + * @author <a href="mailto:tho...@us...">Bryan + * Thompson</a> + * + * FIXME GC time is a JVM metric. It will only get reported by + * the {@link GangliaPlugIn} if it is setup to self-report that + * data. And it may not report it correctly if there is more + * than one {@link HAJournalService} per host. It is also + * available from /counters and could be exposed as a JMX MBean. + */ + public static class DefaultHostScoringRule implements IHostScoringRule { + + @Override + public double getScore(final IHostReport hostReport) { + + final Map<String, IGangliaMetricMessage> metrics = hostReport + .getMetrics(); + + /* + * TODO Use "load_one" if we can't get both "cpu_system" and + * "cpu_user". + */ +// final double cpu_system; +// { +// +// final IGangliaMetricMessage m = metrics.get("cpu_system"); +// +// if (m != null) +// cpu_system = m.getNumericValue().doubleValue(); +// else +// cpu_system = .25d; +// +// } +// +// final double cpu_user; +// { +// +// final IGangliaMetricMessage m = metrics.get("cpu_user"); +// +// if (m != null) +// cpu_user = m.getNumericValue().doubleValue(); +// else +// cpu_user = .25d; +// +// } + + final double cpu_idle; + { + + final IGangliaMetricMessage m = metrics.get("cpu_idle"); + + if (m != null) + cpu_idle = m.getNumericValue().doubleValue(); + else + cpu_idle = .5d; + + } + + final double cpu_wio; + { + + final IGangliaMetricMessage m = metrics.get("cpu_wio"); + + if (m != null) + cpu_wio = m.getNumericValue().doubleValue(); + else + cpu_wio = .05d; + + } + + final double hostScore = (1d + cpu_wio * 100d) + / (1d + cpu_idle); + + return hostScore; + + } + + } + + /** + * Place into descending order by load_one. + * <p> + * Note: We do not rely on the ordering imposed by this comparator. + * Instead, we filter the hosts for those that correspond to the joined + * services in the met quorum, compute a score for each such host, and + * then normalize those scores. + */ + private final Comparator<IHostReport> comparator = new HostReportComparator( + "load_one", false/* asc */); + + /** + * The ganglia service - it must be configured at least as a listener. + */ + private GangliaService gangliaService; + + /** + * The set of metrics that we are requesting in the ganglia host + * reports. + */ + private String[] reportOn; + + /** + * The {@link Future} of a task that periodically queries the ganglia + * peer for its up to date host counters for each discovered host. + */ + private ScheduledFuture<?> scheduledFuture; + + /** + * The table of pre-scored hosts. + * <P> + * Note: There is an entry for all known hosts, but not all hosts are + * running services that we care about. This means that we have to run + * over the table, filtering for hosts that have services that we care + * about. + */ + private final AtomicReference<HostScore[]> hostTable = new AtomicReference<HostScore[]>( + null); + + /** + * The most recent score for this host. + */ + private final AtomicReference<HostScore> thisHostScore = new AtomicReference<HostScore>(); + + /** + * The rule used to score the host reports. + */ + private IHostScoringRule scoringRule; + +// @SuppressWarnings("unchecked") @Override - public String proxyTo(HttpServletRequest req) { - // TODO Auto-generated method stub - return null; + public void init(final ServletConfig servletConfig, + final IIndexManager indexManager) throws ServletException { + + super.init(servletConfig, indexManager); + +// comparator = newInstance(servletConfig, Comparator.class, +// ... [truncated message content] |
From: <tob...@us...> - 2014-04-21 16:35:39
|
Revision: 8131 http://sourceforge.net/p/bigdata/code/8131 Author: tobycraig Date: 2014-04-21 16:35:35 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Added pagination to query results (forgotten files) Modified Paths: -------------- branches/RDR/bigdata-war/src/html/css/style.css branches/RDR/bigdata-war/src/html/index.html Modified: branches/RDR/bigdata-war/src/html/css/style.css =================================================================== --- branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 14:33:56 UTC (rev 8130) +++ branches/RDR/bigdata-war/src/html/css/style.css 2014-04-21 16:35:35 UTC (rev 8131) @@ -207,19 +207,27 @@ border: none; } -#advanced-features, #query-response, #query-explanation, #query-tab .bottom *, #load-response, #load-clear, #explore-results, #namespace-properties { +#advanced-features, #query-response, #query-pagination, #query-explanation, #query-tab .bottom *, #load-response, #load-clear, #explore-results, #namespace-properties { display: none; } -td { +th, td { border: 1px solid; padding: 5px; } +th { + font-weight: bold; +} + pre { font-family: monospace; } +#page-selector { + float: right; +} + #running-queries li { margin: 10px 0; } Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-21 14:33:56 UTC (rev 8130) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-21 16:35:35 UTC (rev 8131) @@ -103,6 +103,21 @@ <div id="query-response" class="box"> </div> + <div id="query-pagination" class="box"> + <span id="current-results"></span> + <select id="results-per-page"> + <option>10</option> + <option>25</option> + <option>50</option> + <option>100</option> + </select> per page + <div id="page-selector"> + <button id="previous-page"><</button> + Page <input type="text" id="current-page"> of <span id="result-pages"></span> + <button id="next-page">></button> + </div> + </div> + <div id="query-explanation" class="box"> </div> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-21 14:33:59
|
Revision: 8130 http://sourceforge.net/p/bigdata/code/8130 Author: thompsonbry Date: 2014-04-21 14:33:56 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Added logging to RESTServlet. Javadoc inside of QueryServlet. Modified Paths: -------------- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-04-21 14:33:06 UTC (rev 8129) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/QueryServlet.java 2014-04-21 14:33:56 UTC (rev 8130) @@ -148,6 +148,11 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + /* + * Note: HALoadBalancerServlet MUST be maintained if idempotent methods + * are added to doPost() in order to ensure that they are load balanced + * rather than always directed to the quorum leader. + */ if (req.getParameter(ATTR_UPDATE) != null) { Modified: branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java =================================================================== --- branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-04-21 14:33:06 UTC (rev 8129) +++ branches/RDR/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/RESTServlet.java 2014-04-21 14:33:56 UTC (rev 8130) @@ -30,6 +30,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import org.apache.log4j.Logger; import org.openrdf.model.URI; import org.openrdf.model.impl.URIImpl; @@ -42,8 +43,8 @@ */ public class RESTServlet extends BigdataRDFServlet { -// private static final transient Logger log = Logger -// .getLogger(RESTServlet.class); + private static final transient Logger log = Logger + .getLogger(RESTServlet.class); /** * @@ -137,6 +138,9 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + if (log.isInfoEnabled()) + log.info(req.toString()); + /* * Look for linked data GET requests. * @@ -201,7 +205,10 @@ protected void doPost(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { - if (req.getParameter(QueryServlet.ATTR_QUERY) != null + if (log.isInfoEnabled()) + log.info(req.toString()); + + if (req.getParameter(QueryServlet.ATTR_QUERY) != null || req.getParameter(QueryServlet.ATTR_UPDATE) != null || req.getParameter(QueryServlet.ATTR_UUID) != null || req.getParameter(QueryServlet.ATTR_ESTCARD) != null @@ -249,9 +256,14 @@ } - static boolean hasMimeType(final HttpServletRequest req, String mimeType) { - String contentType = req.getContentType(); - return contentType != null && mimeType.equals(new MiniMime(contentType).getMimeType()); + static boolean hasMimeType(final HttpServletRequest req, + final String mimeType) { + + final String contentType = req.getContentType(); + + return contentType != null + && mimeType.equals(new MiniMime(contentType).getMimeType()); + } /** @@ -264,6 +276,9 @@ protected void doPut(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + if (log.isInfoEnabled()) + log.info(req.toString()); + m_updateServlet.doPut(req, resp); } @@ -275,6 +290,9 @@ protected void doDelete(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { + if (log.isInfoEnabled()) + log.info(req.toString()); + m_deleteServlet.doDelete(req, resp); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-21 14:33:09
|
Revision: 8129 http://sourceforge.net/p/bigdata/code/8129 Author: thompsonbry Date: 2014-04-21 14:33:06 +0000 (Mon, 21 Apr 2014) Log Message: ----------- Rolling forward to jetty 9.1.4 for HA load balancer support. See #624 (HA Load Balancer) Modified Paths: -------------- branches/RDR/.classpath branches/RDR/build.properties branches/RDR/pom.xml Added Paths: ----------- branches/RDR/bigdata/lib/jetty/README.TXT branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar Removed Paths: ------------- branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-http-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-io-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-security-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-server-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-util-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.3.v20140225.jar branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.3.v20140225.jar Modified: branches/RDR/.classpath =================================================================== --- branches/RDR/.classpath 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/.classpath 2014-04-21 14:33:06 UTC (rev 8129) @@ -58,8 +58,7 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/fastutil-5.1.5.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-analyzers-3.0.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/lucene/lucene-core-3.0.0.jar"/> - <classpathentry kind="lib" path="bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> - <classpathentry kind="lib" path="bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/jackson-core-2.3.3-20140314.203554-3.jar"/> <classpathentry exported="true" kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/high-scale-lib-v1.1.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-ext-1.1-b3-dev.jar"/> @@ -79,16 +78,17 @@ <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/nxparser-1.2.3.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-proxy-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-rewrite-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-security-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-server-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-servlet-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-util-9.1.3.v20140225.jar" sourcepath="/Users/bryan/Documents/workspace/org.eclipse.jetty.project-jetty-9.1-wsbatch"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.3.v20140225.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.3.v20140225.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar" sourcepath="/Users/bryan/Downloads/org.eclipse.jetty.project-jetty-9.1.4.v20140401"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar"/> + <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Added: branches/RDR/bigdata/lib/jetty/README.TXT =================================================================== --- branches/RDR/bigdata/lib/jetty/README.TXT (rev 0) +++ branches/RDR/bigdata/lib/jetty/README.TXT 2014-04-21 14:33:06 UTC (rev 8129) @@ -0,0 +1,7 @@ +You can download the jetty source from: + +http://git.eclipse.org/c/jetty/org.eclipse.jetty.project.git/ + +You can download the jetty binary distributions from: + +http://download.eclipse.org/jetty/ Deleted: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-http-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-http-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-io-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-io-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-proxy-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-rewrite-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-security-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-security-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-server-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-server-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-servlet-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-util-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-util-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-webapp-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Deleted: branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.3.v20140225.jar =================================================================== (Binary files differ) Added: branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar =================================================================== (Binary files differ) Index: branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar =================================================================== --- branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar 2014-04-21 14:33:06 UTC (rev 8129) Property changes on: branches/RDR/bigdata/lib/jetty/jetty-xml-9.1.4.v20140401.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/RDR/build.properties =================================================================== --- branches/RDR/build.properties 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/build.properties 2014-04-21 14:33:06 UTC (rev 8129) @@ -49,7 +49,7 @@ zookeeper.version=3.4.5 sesame.version=2.6.10 slf4j.version=1.6.1 -jetty.version=9.1.3.v20140225 +jetty.version=9.1.4.v20140401 #jetty.version=7.2.2.v20101205 #servlet.version=2.5 servlet.version=3.1.0 Modified: branches/RDR/pom.xml =================================================================== --- branches/RDR/pom.xml 2014-04-21 14:29:33 UTC (rev 8128) +++ branches/RDR/pom.xml 2014-04-21 14:33:06 UTC (rev 8129) @@ -78,7 +78,7 @@ <zookeeper.version>3.3.3</zookeeper.version> <sesame.version>2.6.10</sesame.version> <slf4j.version>1.6.1</slf4j.version> - <jetty.version>9.1.3.v20140225</jetty.version> + <jetty.version>9.1.4.v20140401</jetty.version> <!--jetty.version>7.2.2.v20101205</jetty.version--> <servlet.version>3.1.0</servlet.version> <lucene.version>3.0.0</lucene.version> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-21 14:29:37
|
Revision: 8128 http://sourceforge.net/p/bigdata/code/8128 Author: thompsonbry Date: 2014-04-21 14:29:33 +0000 (Mon, 21 Apr 2014) Log Message: ----------- @Override tags. Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java Modified: branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-19 00:57:07 UTC (rev 8127) +++ branches/RDR/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-21 14:29:33 UTC (rev 8128) @@ -2247,6 +2247,7 @@ /** * Invokes {@link #shutdownNow()}. */ + @Override synchronized public void close() { // Note: per contract for close(). @@ -2260,6 +2261,7 @@ } + @Override synchronized public void destroy() { if (log.isInfoEnabled()) @@ -2324,6 +2326,7 @@ * journal until the quorum has met and {@link #init()} has been invoked for * the {@link Quorum}. */ + @Override public boolean isOpen() { return _bufferStrategy != null && _bufferStrategy.isOpen(); @@ -2335,6 +2338,7 @@ * if {@link #closeForWrites(long)} was used to seal the journal against * further writes. */ + @Override public boolean isReadOnly() { if (readOnly) { @@ -2443,12 +2447,14 @@ } + @Override public boolean isStable() { return _bufferStrategy.isStable(); } + @Override public boolean isFullyBuffered() { return _bufferStrategy.isFullyBuffered(); @@ -2497,6 +2503,7 @@ * through a concurrent {@link #abort()} or {@link #commitNow(long)}. The * {@link IRootBlockView} itself is an immutable data structure. */ + @Override final public IRootBlockView getRootBlockView() { // final ReadLock lock = _fieldReadWriteLock.readLock(); @@ -2564,6 +2571,7 @@ } + @Override final public long getLastCommitTime() { // final ReadLock lock = _fieldReadWriteLock.readLock(); @@ -2597,6 +2605,7 @@ * @param committer * The commiter. */ + @Override final public void setCommitter(final int rootSlot, final ICommitter committer) { assertOpen(); Modified: branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java 2014-04-19 00:57:07 UTC (rev 8127) +++ branches/RDR/bigdata/src/java/com/bigdata/journal/Journal.java 2014-04-21 14:29:33 UTC (rev 8128) @@ -3147,6 +3147,7 @@ * {@link #executorService}, then the {@link IConcurrencyManager}, the * {@link ITransactionService} and finally the {@link IResourceLockService}. */ + @Override synchronized public void shutdown() { if (!isOpen()) @@ -3227,6 +3228,7 @@ new ShutdownHelper(executorService, 1000/* logTimeout */, TimeUnit.MILLISECONDS) { + @Override protected void logTimeout() { log.warn("Waiting on task(s)" @@ -3266,6 +3268,7 @@ * Note: The {@link IConcurrencyManager} is shutdown first, then the * {@link ITransactionService} and finally the {@link IResourceManager}. */ + @Override synchronized public void shutdownNow() { if (!isOpen()) @@ -3373,12 +3376,14 @@ } + @Override public <T> Future<T> submit(AbstractTask<T> task) { return concurrencyManager.submit(task); } + @Override @SuppressWarnings("rawtypes") public List<Future> invokeAll( Collection<? extends AbstractTask> tasks, long timeout, @@ -3388,6 +3393,7 @@ } + @Override public <T> List<Future<T>> invokeAll( Collection<? extends AbstractTask<T>> tasks) throws InterruptedException { @@ -3396,12 +3402,14 @@ } + @Override public IResourceManager getResourceManager() { return concurrencyManager.getResourceManager(); } + @Override public ILocalTransactionManager getTransactionManager() { // return concurrencyManager.getTransactionManager(); @@ -3416,6 +3424,7 @@ } + @Override public WriteExecutorService getWriteService() { return concurrencyManager.getWriteService(); @@ -3434,6 +3443,7 @@ * @return This implementation returns <code>false</code> since overflow * is NOT supported. */ + @Override public boolean shouldOverflow() { return false; @@ -3443,12 +3453,14 @@ /** * Note: This implementation always returns <code>false</code>. */ + @Override public boolean isOverflowEnabled() { return false; } + @Override public Future<Object> overflow() { throw new UnsupportedOperationException(); @@ -3476,6 +3488,7 @@ * @throws UnsupportedOperationException * since {@link #overflow()} is not supported. */ + @Override public File getIndexSegmentFile(IndexMetadata indexMetadata) { throw new UnsupportedOperationException(); @@ -3486,6 +3499,7 @@ * @throws UnsupportedOperationException * always. */ + @Override public IBigdataFederation<?> getFederation() { throw new UnsupportedOperationException(); @@ -3496,6 +3510,7 @@ * @throws UnsupportedOperationException * always. */ + @Override public DataService getDataService() { throw new UnsupportedOperationException(); @@ -3506,6 +3521,7 @@ * @throws UnsupportedOperationException * always. */ + @Override public UUID getDataServiceUUID() { throw new UnsupportedOperationException(); @@ -3516,6 +3532,7 @@ * Always returns <code>null</code> since index partition moves are not * supported. */ + @Override public StaleLocatorReason getIndexPartitionGone(String name) { return null; @@ -3525,6 +3542,7 @@ /* * global row store. */ + @Override public SparseRowStore getGlobalRowStore() { return getGlobalRowStoreHelper().getGlobalRowStore(); @@ -3541,6 +3559,7 @@ // * @return The global row store view -or- <code>null</code> if no view // * exists as of that timestamp. // */ + @Override public SparseRowStore getGlobalRowStore(final long timestamp) { return getGlobalRowStoreHelper().get(timestamp); @@ -3591,6 +3610,7 @@ * Note: An atomic reference provides us with a "lock" object which doubles * as a reference. We are not relying on its CAS properties. */ + @Override public BigdataFileSystem getGlobalFileSystem() { GlobalFileSystemHelper t = globalFileSystemHelper.get(); @@ -3623,6 +3643,7 @@ } final private AtomicReference<GlobalFileSystemHelper> globalFileSystemHelper = new AtomicReference<GlobalFileSystemHelper>(); + @Override protected void discardCommitters() { super.discardCommitters(); @@ -3651,6 +3672,7 @@ } + @Override public TemporaryStore getTempStore() { return tempStoreFactory.getTempStore(); @@ -3658,6 +3680,7 @@ } private final TemporaryStoreFactory tempStoreFactory; + @Override public IResourceLocator<?> getResourceLocator() { assertOpen(); @@ -3667,6 +3690,7 @@ } private final IResourceLocator<?> resourceLocator; + @Override public IResourceLockService getResourceLockService() { assertOpen(); @@ -3676,6 +3700,7 @@ } private final ResourceLockService resourceLockManager; + @Override public ExecutorService getExecutorService() { assertOpen(); @@ -3785,6 +3810,7 @@ private StartDeferredTasksTask() { } + @Override public void run() { try { @@ -3875,6 +3901,7 @@ } // class StartDeferredTasks + @Override public ScheduledFuture<?> addScheduledTask(final Runnable task, final long initialDelay, final long delay, final TimeUnit unit) { @@ -3896,6 +3923,7 @@ * * @see Options#COLLECT_PLATFORM_STATISTICS */ + @Override final public boolean getCollectPlatformStatistics() { return Boolean.valueOf(properties.getProperty( Options.COLLECT_PLATFORM_STATISTICS, @@ -3907,6 +3935,7 @@ * * @see Options#COLLECT_QUEUE_STATISTICS */ + @Override final public boolean getCollectQueueStatistics() { return Boolean.valueOf(properties.getProperty( Options.COLLECT_QUEUE_STATISTICS, @@ -3918,6 +3947,7 @@ * * @see Options#HTTPD_PORT */ + @Override final public int getHttpdPort() { return Integer.valueOf(properties.getProperty(Options.HTTPD_PORT, Options.DEFAULT_HTTPD_PORT)); @@ -3941,6 +3971,7 @@ */ final private ConcurrentHashMap<String/* name */, BTreeCounters> indexCounters = new ConcurrentHashMap<String, BTreeCounters>(); + @Override public BTreeCounters getIndexCounters(final String name) { if (name == null) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-19 00:57:16
|
Revision: 8127 http://sourceforge.net/p/bigdata/code/8127 Author: tobycraig Date: 2014-04-19 00:57:07 +0000 (Sat, 19 Apr 2014) Log Message: ----------- Added pagination to query results Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-17 15:01:17 UTC (rev 8126) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-19 00:57:07 UTC (rev 8127) @@ -2,6 +2,7 @@ // global variables var DEFAULT_NAMESPACE, NAMESPACE, NAMESPACE_URL, NAMESPACES_READY, NAMESPACE_SHORTCUTS, FILE_CONTENTS, QUERY_RESULTS; +var PAGE_SIZE=10, TOTAL_PAGES, CURRENT_PAGE; /* Modal functions */ @@ -454,7 +455,8 @@ error: queryResultsError } - $('#query-response').show().html('Query running...'); + $('#query-response').show().html('Query running...'); + $('#query-pagination').hide(); $.ajax(NAMESPACE_URL, settings); @@ -475,7 +477,7 @@ $('#query-response-clear').click(function() { $('#query-response, #query-explanation').empty(''); - $('#query-response, #query-explanation, #query-tab .bottom *').hide(); + $('#query-response, #query-pagination, #query-explanation, #query-tab .bottom *').hide(); }); $('#query-export').click(function() { updateExportFileExtension(); showModal('query-export-modal'); }); @@ -606,7 +608,7 @@ function showQueryResults(data) { $('#query-response').empty(); $('#query-export-rdf').hide(); - $('#query-response, #query-tab .bottom *').show(); + $('#query-response, #query-pagination, #query-tab .bottom *').show(); var table = $('<table>').appendTo($('#query-response')); if(this.dataTypes[1] == 'xml') { // RDF @@ -634,29 +636,125 @@ } } else { // JSON - // save data for export + // save data for export and pagination QUERY_RESULTS = data; + if(typeof(data.boolean) != 'undefined') { // ASK query table.append('<tr><td>' + data.boolean + '</td></tr>').addClass('boolean'); return; } + + // see if we have RDF data + var isRDF = false; + if(data.head.vars.length == 3 && data.head.vars[0] == 's' && data.head.vars[1] == 'p' && data.head.vars[2] == 'o') { + isRDF = true; + } else if(data.head.vars.length == 4 && data.head.vars[0] == 's' && data.head.vars[1] == 'p' && data.head.vars[2] == 'o' && data.head.vars[3] == 'c') { + // see if c is used or not + for(var i=0; i<data.results.bindings.length; i++) { + if('c' in data.results.bindings[i]) { + isRDF = false; + break; + } + } + + if(isRDF) { + // remove (unused) c variable from JSON + data.head.vars.pop(); + } + } + + if(isRDF) { + $('#rdf-formats').prop('disabled', false); + } else { + $('#rdf-formats').prop('disabled', true); + if($('#rdf-formats option:selected').length == 1) { + $('#non-rdf-formats option:first').prop('selected', true); + } + } + + // put query variables in table header var thead = $('<thead>').appendTo(table); - var vars = []; - var varsUsed = {} var tr = $('<tr>'); for(var i=0; i<data.head.vars.length; i++) { - tr.append('<td>' + data.head.vars[i] + '</td>'); - vars.push(data.head.vars[i]); + tr.append('<th>' + data.head.vars[i] + '</th>'); } thead.append(tr); table.append(thead); - for(var i=0; i<data.results.bindings.length; i++) { + + setNumberOfPages(); + showPage(1); + + $('#query-response a').click(function(e) { + e.preventDefault(); + explore(this.textContent); + }); + } +} + +function showQueryExplanation(data) { + $('#query-explanation').html(data).show(); +} + +function queryResultsError(jqXHR, textStatus, errorThrown) { + $('#query-response, #query-tab .bottom *').show(); + $('#query-response').text('Error! ' + textStatus + ' ' + errorThrown); +} + +/* Pagination */ + +function setNumberOfPages() { + TOTAL_PAGES = Math.ceil(QUERY_RESULTS.results.bindings.length / PAGE_SIZE); + $('#result-pages').html(TOTAL_PAGES); +} + +function setPageSize(n) { + n = parseInt(n, 10); + if(typeof n != 'number' || n % 1 != 0 || n < 1 || n == PAGE_SIZE) { + return; + } + + PAGE_SIZE = n; + setNumberOfPages(); + // TODO: show page containing current first result + showPage(1); +} + +$('#results-per-page').change(function() { setPageSize(this.value); }); +$('#previous-page').click(function() { showPage(CURRENT_PAGE - 1); }); +$('#next-page').click(function() { showPage(CURRENT_PAGE + 1); }); +$('#current-page').keyup(function(e) { + if(e.which == 13) { + var n = parseInt(this.value, 10); + if(typeof n != 'number' || n % 1 != 0 || n < 1 || n > TOTAL_PAGES) { + this.value = CURRENT_PAGE; + } else { + showPage(n); + } + } +}); + +function showPage(n) { + if(typeof n != 'number' || n % 1 != 0 || n < 1 || n > TOTAL_PAGES) { + return; + } + + CURRENT_PAGE = n; + + // clear table results, leaving header + $('#query-response tbody tr').remove(); + + // work out indices for this page + var start = (CURRENT_PAGE - 1) * PAGE_SIZE; + var end = Math.min(CURRENT_PAGE * PAGE_SIZE, QUERY_RESULTS.results.bindings.length); + + // add matching bindings + var table = $('#query-response table'); + for(var i=start; i<end; i++) { var tr = $('<tr>'); - for(var j=0; j<vars.length; j++) { - if(vars[j] in data.results.bindings[i]) { - varsUsed[vars[j]] = true; - var binding = data.results.bindings[i][vars[j]]; + for(var j=0; j<QUERY_RESULTS.head.vars.length; j++) { + if(QUERY_RESULTS.head.vars[j] in QUERY_RESULTS.results.bindings[i]) { + var binding = QUERY_RESULTS.results.bindings[i][QUERY_RESULTS.head.vars[j]]; if(binding.type == 'sid') { var text = getSID(binding); } else { @@ -684,39 +782,13 @@ } } table.append(tr); - } - - // see if we have RDF data - if((vars.length == 3 && vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') || - (vars.length == 4 && vars[0] == 's' && vars[1] == 'p' && vars[2] == 'o') && vars[3] == 'c' && !('c' in varsUsed)) { - if(vars.length == 4) { - // remove (unused) c variable from JSON - QUERY_RESULTS.head.vars.pop() - } - $('#rdf-formats').prop('disabled', false); - } else { - $('#rdf-formats').prop('disabled', true); - if($('#rdf-formats option:selected').length == 1) { - $('#non-rdf-formats option:first').prop('selected', true); - } - } - - $('#query-response a').click(function(e) { - e.preventDefault(); - explore(this.textContent); - }); } -} -function showQueryExplanation(data) { - $('#query-explanation').html(data).show(); + // update current results numbers + $('#current-results').html((start + 1) + ' - ' + end); + $('#current-page').val(n); } -function queryResultsError(jqXHR, textStatus, errorThrown) { - $('#query-response, #query-tab .bottom *').show(); - $('#query-response').text('Error! ' + textStatus + ' ' + errorThrown); -} - /* Explore */ $('#explore-form').submit(function(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-17 15:01:22
|
Revision: 8126 http://sourceforge.net/p/bigdata/code/8126 Author: mrpersonick Date: 2014-04-17 15:01:17 +0000 (Thu, 17 Apr 2014) Log Message: ----------- added an interface to allow service calls to bypass their normal hash index+join in the query plan Modified Paths: -------------- branches/RDR/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java Added Paths: ----------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java Modified: branches/RDR/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java =================================================================== --- branches/RDR/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2014-04-17 14:54:41 UTC (rev 8125) +++ branches/RDR/bigdata/src/java/com/bigdata/bop/controller/ServiceCallJoin.java 2014-04-17 15:01:17 UTC (rev 8126) @@ -58,6 +58,7 @@ import com.bigdata.rdf.model.BigdataURI; import com.bigdata.rdf.sparql.ast.service.BigdataServiceCall; import com.bigdata.rdf.sparql.ast.service.ExternalServiceCall; +import com.bigdata.rdf.sparql.ast.service.IDoNotJoinService; import com.bigdata.rdf.sparql.ast.service.RemoteServiceCall; import com.bigdata.rdf.sparql.ast.service.ServiceCall; import com.bigdata.rdf.sparql.ast.service.ServiceCallUtility; @@ -585,6 +586,52 @@ : new UnsyncLocalOutputBuffer<IBindingSet>( op.getChunkCapacity(), sink2); + if (serviceCall instanceof IDoNotJoinService) { + + // The iterator draining the subquery + ICloseableIterator<IBindingSet[]> serviceSolutionItr = null; + try { + + /* + * Invoke the service. + * + * Note: Returns [null] IFF SILENT and SERVICE ERROR. + */ + + serviceSolutionItr = doServiceCall(serviceCall, chunk); + + if (serviceSolutionItr != null) { + + while (serviceSolutionItr.hasNext()) { + + final IBindingSet[] bsets = + serviceSolutionItr.next(); + + for (IBindingSet bs : bsets) { + + unsyncBuffer.add(bs); + + } + + } + + } + + } finally { + + // ensure the service call iterator is closed. + if (serviceSolutionItr != null) + serviceSolutionItr.close(); + + } + + unsyncBuffer.flush(); + + // done. + return null; + + } + final JVMHashJoinUtility state = new JVMHashJoinUtility(op, silent ? JoinTypeEnum.Optional : JoinTypeEnum.Normal ); Added: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java (rev 0) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java 2014-04-17 15:01:17 UTC (rev 8126) @@ -0,0 +1,35 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.rdf.sparql.ast.service; + +/** + * Service calls can implement this interface and they will not be routed + * through a hash join in the query plan. They will be responsible for their + * own join internally. + * + * @author mikepersonick + */ +public interface IDoNotJoinService { + +} Property changes on: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/service/IDoNotJoinService.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-04-17 14:54:46
|
Revision: 8125 http://sourceforge.net/p/bigdata/code/8125 Author: mrpersonick Date: 2014-04-17 14:54:41 +0000 (Thu, 17 Apr 2014) Log Message: ----------- fixed some bogus logic related to RDR Modified Paths: -------------- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java Modified: branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java =================================================================== --- branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java 2014-04-17 04:24:36 UTC (rev 8124) +++ branches/RDR/bigdata-rdf/src/java/com/bigdata/rdf/graph/impl/bd/BigdataGASEngine.java 2014-04-17 14:54:41 UTC (rev 8125) @@ -409,27 +409,39 @@ // Bind O for this key-range scan. IVUtility.encode(keyBuilder, u); - } else { + } else if (!inEdges) { /* - * SPO(C) or OSP(C) - * - * Note: For RDR link attribute access, the keys are formed - * differently. Lower case letters are used for variables. - * Upper case letters for constants. - * - * For SPO(C): S:=SID(Spo(c)), P:=linkAttributeType (must - * filter), O:=linkAttributeValue (read it off the index - * when the filter is satisfied). - * - * For OSP(C): OL=SID(Osp(c)), P:=linkAttributeType (must - * filter), S:=linkAttributeValue (read it off the index - * when the filter is satisfied). - * - * TODO RDR should also be supported in the SAIL and RAM GAS - * engine implementations. The statements about statements - * would be modeled as reified statement models. - */ + * SPO(C) or OSP(C) + * + * Note: For RDR link attribute access, the keys are formed + * differently. Lower case letters are used for variables. + * Upper case letters for constants. + * + * For SPO(C): S:=SID(Spo(c)), P:=linkAttributeType (must + * filter), O:=linkAttributeValue (read it off the index + * when the filter is satisfied). + * + * For OSP(C): OL=SID(Osp(c)), P:=linkAttributeType (must + * filter), S:=linkAttributeValue (read it off the index + * when the filter is satisfied). WRONG WRONG WRONG + * linkAttributeValue will always always always be an O + * + * TODO RDR should also be supported in the SAIL and RAM GAS + * engine implementations. The statements about statements + * would be modeled as reified statement models. + * + * TODO This logic is completely wrong for reverse + * traversal. Sids are always stored in SPO order regardless + * of what index they are in. So when we are traversing in + * reverse, this logic will attempt to decode the reverse + * links from the Sid in the OSP index but will instead find + * nothing at all, because first of all the Sid is in the S + * position of the link weight statement and secondly even + * if it were in the O position (which its not), we would + * end up with its forward links instead since the sid is + * always stored in SPO order. + */ keyOrder = getKeyOrder(kb, inEdges); @@ -452,6 +464,33 @@ // Append [u] to the key. IVUtility.encode(keyBuilder, u); + } else { + + keyOrder = getKeyOrder(kb, inEdges); + + ndx = kb.getSPORelation().getIndex(keyOrder); + + keyBuilder = ndx.getIndexMetadata().getKeyBuilder(); + + keyBuilder.reset(); + + /* + * Just have to ignore it. We need to introduce a join to + * handle it. + */ +// if (linkAttrTypeIV != null) { +// +// /* +// * Restrict to the SID region of the index. See +// * SidIV.encode(). +// */ +// keyBuilder.appendSigned(SidIV.toFlags()); +// +// } + + // Append [u] to the key. + IVUtility.encode(keyBuilder, u); + } fromKey = keyBuilder.getKey(); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-17 04:24:41
|
Revision: 8124 http://sourceforge.net/p/bigdata/code/8124 Author: tobycraig Date: 2014-04-17 04:24:36 +0000 (Thu, 17 Apr 2014) Log Message: ----------- Reduced export buttons to just one, with all RDF/non-RDF options in the dropdown Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-15 17:08:37 UTC (rev 8123) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-17 04:24:36 UTC (rev 8124) @@ -107,20 +107,20 @@ </div> <div class="bottom"> - <button id="query-export-rdf">Export RDF</button> - <button id="query-export-csv">Export CSV</button> - <button id="query-export-json">Export JSON</button> - <button id="query-export-xml">Export XML</button> + <button id="query-export">Export</button> <button id="query-response-clear">Clear</button> </div> </div> - <div id="query-export" class="modal"> + <div id="query-export-modal" class="modal"> <h1>Export</h1> <p> <label for="export-format">Format: </label> - <select id="export-format"></select> + <select id="export-format"> + <optgroup id="rdf-formats" label="RDF"></optgroup> + <optgroup id="non-rdf-formats" label="Other"></optgroup> + </select> </p> <p> <label for="export-filename">Filename: </label> @@ -128,7 +128,7 @@ .<span id="export-filename-extension"></span> </p> <p> - <button id="query-download-rdf">Export</button> + <button id="query-download">Export</button> <button class="modal-cancel">Cancel</button> </p> </div> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-15 17:08:37 UTC (rev 8123) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-17 04:24:36 UTC (rev 8124) @@ -478,58 +478,66 @@ $('#query-response, #query-explanation, #query-tab .bottom *').hide(); }); -$('#query-export-rdf').click(function() { updateExportFileExtension(); showModal('query-export'); }); -$('#query-export-csv').click(exportCSV); -$('#query-export-json').click(exportJSON); -$('#query-export-xml').click(exportXML); +$('#query-export').click(function() { updateExportFileExtension(); showModal('query-export-modal'); }); -var rdf_extensions = { - "application/rdf+xml": ['RDF/XML', 'rdf'], - "application/x-turtle": ['N-Triples', 'nt'], - "application/x-turtle": ['Turtle', 'ttl'], - "text/rdf+n3": ['N3', 'n3'], - "application/trix": ['TriX', 'trix'], - "application/x-trig": ['TRIG', 'trig'], - "text/x-nquads": ['NQUADS', 'nq'] +var export_extensions = { + "application/rdf+xml": ['RDF/XML', 'rdf', true], + "application/x-turtle": ['N-Triples', 'nt', true], + "application/x-turtle": ['Turtle', 'ttl', true], + "text/rdf+n3": ['N3', 'n3', true], + "application/trix": ['TriX', 'trix', true], + "application/x-trig": ['TRIG', 'trig', true], + "text/x-nquads": ['NQUADS', 'nq', true], + + "text/csv": ['CSV', 'csv', false, exportCSV], + "application/sparql-results+json": ['JSON', 'json', false, exportJSON], + // "text/tab-separated-values": ['TSV', 'tsv', false, exportTSV], + "application/sparql-results+xml": ['XML', 'xml', false, exportXML] }; -for(var contentType in rdf_extensions) { - $('#export-format').append('<option value="' + contentType + '">' + rdf_extensions[contentType][0] + '</option>'); +for(var contentType in export_extensions) { + var optgroup = export_extensions[contentType][2] ? '#rdf-formats' : '#non-rdf-formats'; + $(optgroup).append('<option value="' + contentType + '">' + export_extensions[contentType][0] + '</option>'); } +$('#export-format option:first').prop('selected', true); + $('#export-format').change(updateExportFileExtension); function updateExportFileExtension() { - $('#export-filename-extension').html(rdf_extensions[$('#export-format').val()][1]); + $('#export-filename-extension').html(export_extensions[$('#export-format').val()][1]); } -$('#query-download-rdf').click(function() { +$('#query-download').click(function() { var dataType = $('#export-format').val(); - var settings = { - type: 'POST', - data: JSON.stringify(QUERY_RESULTS), - contentType: 'application/sparql-results+json', - headers: { 'Accept': dataType }, - success: function(data) { downloadRDFSuccess(data, dataType, $('#export-filename').val()); }, - error: downloadRDFError - }; - $.ajax('/bigdata/sparql?workbench&convert', settings); - $(this).siblings('.modal-cancel').click(); -}); - -function downloadRDFSuccess(data, dataType, filename) { + var filename = $('#export-filename').val(); if(filename == '') { filename = 'export'; } - filename += '.' + rdf_extensions[dataType][1]; - downloadFile(data, dataType, filename); -} + filename += '.' + export_extensions[dataType][1]; + if(export_extensions[dataType][2]) { + // RDF + var settings = { + type: 'POST', + data: JSON.stringify(QUERY_RESULTS), + contentType: 'application/sparql-results+json', + headers: { 'Accept': dataType }, + success: function() { downloadFile(data, dataType, filename); }, + error: downloadRDFError + }; + $.ajax('/bigdata/sparql?workbench&convert', settings); + } else { + // not RDF + export_extensions[dataType][3](filename); + } + $(this).siblings('.modal-cancel').click(); +}); function downloadRDFError(jqXHR, textStatus, errorThrown) { alert(errorThrown); } -function exportXML() { +function exportXML(filename) { var xml = '<?xml version="1.0"?>\n<sparql xmlns="http://www.w3.org/2005/sparql-results#">\n\t<head>\n'; var bindings = []; $('#query-response thead tr td').each(function(i, td) { @@ -561,27 +569,32 @@ xml += '\t\t</result>\n'; }); xml += '\t</results>\n</sparql>\n'; - downloadFile(xml, 'application/sparql-results+xml', 'export.xml'); + downloadFile(xml, 'application/sparql-results+xml', filename); } -function exportJSON() { +function exportJSON(filename) { var json = JSON.stringify(QUERY_RESULTS); - downloadFile(json, 'application/sparql-results+json', 'export.json'); + downloadFile(json, 'application/sparql-results+json', filename); } -function exportCSV() { - // FIXME: escape commas +function exportCSV(filename) { var csv = ''; $('#query-response table tr').each(function(i, tr) { $(tr).find('td').each(function(j, td) { if(j > 0) { csv += ','; } - csv += td.textContent; + var val = td.textContent; + // quote value if it contains " , \n or \r + // replace " with "" + if(val.match(/[",\n\r]/)) { + val = '"' + val.replace('"', '""') + '"'; + } + csv += val; }); csv += '\n'; }); - downloadFile(csv, 'application/csv', 'export.csv'); + downloadFile(csv, 'text/csv', filename); } function downloadFile(data, type, filename) { @@ -680,9 +693,12 @@ // remove (unused) c variable from JSON QUERY_RESULTS.head.vars.pop() } - $('#query-export-rdf').show(); + $('#rdf-formats').prop('disabled', false); } else { - $('#query-export-rdf').hide(); + $('#rdf-formats').prop('disabled', true); + if($('#rdf-formats option:selected').length == 1) { + $('#non-rdf-formats option:first').prop('selected', true); + } } $('#query-response a').click(function(e) { @@ -1048,6 +1064,16 @@ return '<' + uri + '>'; } +function unabbreviate(uri) { + if(uri.charAt(0) == '<') { + // not abbreviated + return uri; + } + // get namespace + var namespace = uri.split(':', 1)[0]; + return '<' + uri.replace(namespace, NAMESPACE_SHORTCUTS[namespace]) + '>'; +} + function parseSID(sid) { // var re = /<< <([^<>]*)> <([^<>]*)> <([^<>]*)> >>/; var re = /<< *([^ ]+) +([^ ]+) +([^ ]+) *>>/; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-15 17:08:46
|
Revision: 8123 http://sourceforge.net/p/bigdata/code/8123 Author: thompsonbry Date: 2014-04-15 17:08:37 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Merge of the HA1/HA5 branch back to the main development branch. This closes out #722 (HA1). #723 remains open with one test that fails in CI. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/StatusServlet.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-D.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-E.properties Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-B.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-C.properties Property Changed: ---------------- branches/BIGDATA_RELEASE_1_3_0/ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd/ branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config/ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/btc/src/resources/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/lubm/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/ branches/BIGDATA_RELEASE_1_3_0/bigdata-perf/uniprot/src/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/samples/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/BIGDATA_RELEASE_1_3_0/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/LEGAL/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/lib/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/java/it/unimi/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/ branches/BIGDATA_RELEASE_1_3_0/dsi-utils/src/test/it/unimi/dsi/ branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_3_0/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_RELEASE_1_3_0/osgi/ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/config/ Index: branches/BIGDATA_RELEASE_1_3_0 =================================================================== --- branches/BIGDATA_RELEASE_1_3_0 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 /branches/BTREE_BUFFER_BRANCH:2004-2045 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -137,6 +137,13 @@ } + /** + * Used to zero pad slots in buffered writes. + * + * @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a> + */ + static private final byte[] s_zeros = new byte[256]; + /** * Buffer a write. * @@ -188,6 +195,19 @@ } // copy the caller's record into the buffer. m_data.put(data); + + // if data_len < slot_len then clear remainder of buffer + int padding = slot_len - data_len; + while (padding > 0) { + if (padding > s_zeros.length) { + m_data.put(s_zeros); + padding -= s_zeros.length; + } else { + m_data.put(s_zeros, 0, padding); + break; + } + } + // update the file offset by the size of the allocation slot m_endAddr += slot_len; // update the buffer position by the size of the allocation slot. @@ -250,8 +270,9 @@ final ByteBuffer m_data = tmp.buffer(); // reset the buffer state. - m_data.position(0); - m_data.limit(m_data.capacity()); + //m_data.position(0); + //m_data.limit(m_data.capacity()); + m_data.clear(); m_startAddr = -1; m_endAddr = 0; Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -310,7 +310,7 @@ /** * When a record is used as a read cache then the readCount is - * maintained as a metric on its access. �This could be used to + * maintained as a metric on its access. This could be used to * determine eviction/compaction. * <p> * Note: volatile to guarantee visibility of updates. Might do better @@ -509,7 +509,8 @@ * @param isHighlyAvailable * when <code>true</code> the whole record checksum is maintained * for use when replicating the write cache along the write - * pipeline. + * pipeline. This needs to be <code>true</code> for HA1 as well + * since we need to write the HALog. * @param bufferHasData * when <code>true</code> the caller asserts that the buffer has * data (from a replicated write), in which case the position Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -1151,6 +1151,7 @@ done = WriteCache.transferTo(cache/* src */, curCompactingCache/* dst */, serviceMap, 0/*threshold*/); if (done) { + // Everything was compacted. Send just the address metadata (empty cache block). sendAddressMetadata(cache); if (log.isDebugEnabled()) @@ -1231,7 +1232,7 @@ * been allocated on the leader in the same order in which the leader * made those allocations. This information is used to infer the order * in which the allocators for the different allocation slot sizes are - * created. This method will synchronous send those address notices and + * created. This method will synchronously send those address notices and * and also makes sure that the followers see the recycled addresses * records so they can keep both their allocators and the actual * allocations synchronized with the leader. @@ -1244,13 +1245,15 @@ * @throws InterruptedException * @throws ExecutionException * @throws IOException + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ private void sendAddressMetadata(final WriteCache cache) throws IllegalStateException, InterruptedException, ExecutionException, IOException { - if (quorum == null || !quorum.isHighlyAvailable() - || !quorum.getClient().isLeader(quorumToken)) { + if (quorum == null) { //|| !quorum.isHighlyAvailable() +// || !quorum.getClient().isLeader(quorumToken)) { return; } @@ -1344,20 +1347,15 @@ private void writeCacheBlock(final WriteCache cache) throws InterruptedException, ExecutionException, IOException { - /* - * IFF HA + /** + * IFF HA and this is the quorum leader. * - * TODO isHA should be true even if the quorum is not highly - * available since there still could be other services in the write - * pipeline (e.g., replication to an offline HAJournalServer prior - * to changing over into an HA3 quorum or off-site replication). The - * unit tests need to be updated to specify [isHighlyAvailable] for - * ALL quorum based test runs. + * Note: This is true for HA1 as well. The code path enabled by this + * is responsible for writing the HALog files. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ - final boolean isHA = quorum != null && quorum.isHighlyAvailable(); - - // IFF HA and this is the quorum leader. - final boolean isHALeader = isHA + final boolean isHALeader = quorum != null && quorum.getClient().isLeader(quorumToken); /* @@ -1438,15 +1436,25 @@ * then clean up the documentation here (see the commented * out version of this line below). */ - quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); - - // ASYNC MSG RMI + NIO XFER. - remoteWriteFuture = quorumMember.replicate(null/* req */, pkg.getMessage(), - pkg.getData().duplicate()); - - counters.get().nsend++; + quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); /* + * TODO Do we want to always support the replication code path + * when a quorum exists (that is, also for HA1) in case there + * are pipeline listeners that are not HAJournalServer + * instances? E.g., for offsite replication? + */ + if (quorum.replicationFactor() > 1) { + + // ASYNC MSG RMI + NIO XFER. + remoteWriteFuture = quorumMember.replicate(null/* req */, + pkg.getMessage(), pkg.getData().duplicate()); + + counters.get().nsend++; + + } + + /* * The quorum leader logs the write cache block here. For the * followers, the write cache blocks are currently logged by * HAJournalServer. Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -2473,18 +2473,18 @@ } - /** - * Return <code>true</code> if the journal is configured for high - * availability. - * - * @see QuorumManager#isHighlyAvailable() - */ - public boolean isHighlyAvailable() { +// /** +// * Return <code>true</code> if the journal is configured for high +// * availability. +// * +// * @see Quorum#isHighlyAvailable() +// */ +// public boolean isHighlyAvailable() { +// +// return quorum == null ? false : quorum.isHighlyAvailable(); +// +// } - return quorum == null ? false : quorum.isHighlyAvailable(); - - } - /** * {@inheritDoc} * <p> @@ -3428,8 +3428,10 @@ if (quorum == null) return; - if (!quorum.isHighlyAvailable()) + if (!quorum.isHighlyAvailable()) { + // Gather and 2-phase commit are not used in HA1. return; + } /** * CRITICAL SECTION. We need obtain a distributed consensus for the @@ -3542,6 +3544,25 @@ // reload the commit record from the new root block. store._commitRecord = store._getCommitRecord(); + if (quorum != null) { + /** + * Write the root block on the HALog file, closing out that + * file. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> + */ + final QuorumService<HAGlue> localService = quorum.getClient(); + if (localService != null) { + // Quorum service not asynchronously closed. + try { + // Write the closing root block on the HALog file. + localService.logRootBlock(newRootBlock); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + if (txLog.isInfoEnabled()) txLog.info("COMMIT: commitTime=" + commitTime); @@ -3846,9 +3867,9 @@ // Prepare the new root block. cs.newRootBlock(); - if (quorum == null) { + if (quorum == null || quorum.replicationFactor() == 1) { - // Non-HA mode. + // Non-HA mode (including HA1). cs.commitSimple(); } else { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/RWStrategy.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -146,6 +146,7 @@ } + @Override public ByteBuffer read(final long addr) { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/WORMStrategy.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -272,7 +272,7 @@ * which use this flag to conditionally track the checksum of the entire * write cache buffer). */ - private final boolean isHighlyAvailable; + private final boolean isQuorumUsed; /** * The {@link UUID} which identifies the journal (this is the same for each @@ -970,11 +970,11 @@ com.bigdata.journal.Options.HALOG_COMPRESSOR, com.bigdata.journal.Options.DEFAULT_HALOG_COMPRESSOR); - isHighlyAvailable = quorum != null && quorum.isHighlyAvailable(); + isQuorumUsed = quorum != null; // && quorum.isHighlyAvailable(); final boolean useWriteCacheService = fileMetadata.writeCacheEnabled && !fileMetadata.readOnly && fileMetadata.closeTime == 0L - || isHighlyAvailable; + || isQuorumUsed; if (useWriteCacheService) { /* @@ -1049,7 +1049,7 @@ final long fileExtent) throws InterruptedException { - super(baseOffset, buf, useChecksum, isHighlyAvailable, + super(baseOffset, buf, useChecksum, isQuorumUsed, bufferHasData, opener, fileExtent); } @@ -1379,6 +1379,7 @@ * to get the data from another node based on past experience for that * record. */ + @Override public ByteBuffer read(final long addr) { try { Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/FixedAllocator.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -839,6 +839,7 @@ m_statsBucket.allocate(size); } + return value; } else { StringBuilder sb = new StringBuilder(); @@ -1300,4 +1301,33 @@ return count; } + /** + * Determines if the provided physical address is within an allocated slot + * @param addr + * @return + */ + public boolean verifyAllocatedAddress(long addr) { + if (log.isTraceEnabled()) + log.trace("Checking Allocator " + m_index + ", size: " + m_size); + + final Iterator<AllocBlock> blocks = m_allocBlocks.iterator(); + final long range = m_size * m_bitSize * 32; + while (blocks.hasNext()) { + final int startAddr = blocks.next().m_addr; + if (startAddr != 0) { + final long start = RWStore.convertAddr(startAddr); + final long end = start + range; + + if (log.isTraceEnabled()) + log.trace("Checking " + addr + " between " + start + " - " + end); + + if (addr >= start && addr < end) + return true; + } else { + break; + } + } + return false; + } + } Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWStore.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -688,7 +688,7 @@ throws InterruptedException { super(buf, useChecksum, m_quorum != null - && m_quorum.isHighlyAvailable(), bufferHasData, opener, + /*&& m_quorum.isHighlyAvailable()*/, bufferHasData, opener, fileExtent, m_bufferedWrite); @@ -1080,16 +1080,17 @@ private RWWriteCacheService newWriteCacheService() { try { - final boolean highlyAvailable = m_quorum != null - && m_quorum.isHighlyAvailable(); +// final boolean highlyAvailable = m_quorum != null +// && m_quorum.isHighlyAvailable(); - final boolean prefixWrites = highlyAvailable; + final boolean prefixWrites = m_quorum != null; // highlyAvailable return new RWWriteCacheService(m_writeCacheBufferCount, m_minCleanListSize, m_readCacheBufferCount, prefixWrites, m_compactionThreshold, m_hotCacheSize, m_hotCacheThreshold, convertAddr(m_fileSize), m_reopener, m_quorum, this) { - + + @Override @SuppressWarnings("unchecked") public WriteCache newWriteCache(final IBufferAccess buf, final boolean useChecksum, @@ -6962,7 +6963,7 @@ if (log.isDebugEnabled()) log.debug("writeRaw: " + offset); - + // Guard IO against concurrent file extension. final Lock lock = m_extensionLock.readLock(); @@ -7067,6 +7068,22 @@ } } + /** + * Can be used to determine if an address is within an allocated slot. + * + * @param addr + * @return whether addr is within slot allocated area + */ + public boolean verifyAllocatedAddress(final long addr) { + for (int index = 0; index < m_allocs.size(); index++) { + final FixedAllocator xfa = m_allocs.get(index); + if (xfa.verifyAllocatedAddress(addr)) + return true; + } + + return false; + } + public StoreState getStoreState() { final RWStoreState ret = new RWStoreState(this); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/rwstore/RWWriteCacheService.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -80,8 +80,9 @@ final long fileExtent) throws InterruptedException { - final boolean highlyAvailable = getQuorum() != null - && getQuorum().isHighlyAvailable(); +// final boolean highlyAvailable = getQuorum() != null +// && getQuorum().isHighlyAvailable(); + final boolean highlyAvailable = getQuorum() != null; return new FileChannelScatteredWriteCache(buf, true/* useChecksum */, highlyAvailable, Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/joinGraph:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/joinGraph:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/joinGraph:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/util:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/bop/util:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/bop/util:4486-4522 \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -35,6 +35,7 @@ import java.util.HashMap; import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.TimeUnit; import junit.framework.AssertionFailedError; @@ -48,7 +49,6 @@ import com.bigdata.quorum.QuorumActor; import com.bigdata.rwstore.RWWriteCacheService; import com.bigdata.util.ChecksumUtility; -import com.bigdata.util.InnerCause; /** * Test suite for the {@link WriteCacheService} using scattered writes on a @@ -138,6 +138,19 @@ actor.castVote(0); fixture.awaitDeque(); + // Await quorum meet. + assertCondition(new Runnable() { + @Override + public void run() { + try { + assertEquals(0L, quorum.token()); + } catch (Exception e) { + fail(); + } + } + + }, 5000/*timeout*/, TimeUnit.MILLISECONDS); + file = File.createTempFile(getName(), ".rw.tmp"); opener = new ReopenFileChannel(file, "rw"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -521,7 +521,7 @@ final int nbuffers = 1; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -574,7 +574,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; final int k = 1; final long lastCommitTime = 0L; @@ -619,7 +619,7 @@ final int nbuffers = 2; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -672,7 +672,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -681,8 +681,7 @@ final String logicalServiceId = "logicalService_"+getName(); final MockQuorum<HAPipelineGlue, MyMockQuorumMember<HAPipelineGlue>> quorum = new MockQuorum<HAPipelineGlue, MyMockQuorumMember<HAPipelineGlue>>( k, fixture); - try { - + try { fixture.start(); quorum.start(new MyMockQuorumMember<HAPipelineGlue>(fixture,logicalServiceId)); @@ -718,7 +717,7 @@ final int nbuffers = 6; final boolean useChecksums = true; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -771,7 +770,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = true; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -2120,6 +2119,19 @@ + ", isHighlyAvailable=" + isHighlyAvailable); } + // Await quorum meet. + assertCondition(new Runnable() { + @Override + public void run() { + try { + assertEquals(0L, quorum.token()); + } catch (Exception e) { + fail(); + } + } + + }, 5000/*timeout*/, TimeUnit.MILLISECONDS); + File file = null; ReopenFileChannel opener = null; WriteCacheService writeCacheService = null; Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166 =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/jsr166:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/jsr166:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/jsr166:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/util/httpd ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/util/httpd:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/test/com/bigdata/util/httpd:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/test/com/bigdata/util/httpd:6766-7380 /branches/INT64_BRANCH/bigdata/src/test/com/bigdata/util/httpd:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-compatibility ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata-compatibility:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-compatibility:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-compatibility:6766-7380 /branches/INT64_BRANCH/bigdata-compatibility:4486-4522 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/attr ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/attr:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/attr:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/attr:6766-7380 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/attr:2004-2045 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/disco ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/disco:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/disco:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/disco:6766-7380 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/disco:2004-2045 \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -4659,7 +4659,7 @@ throw new IllegalStateException("Server is not running"); return tmp.getConnectors()[0].getLocalPort(); - + } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -1096,6 +1096,8 @@ + haLogBytesOnDisk// + ", journalSize=" + journalSize// + + ", thresholdPercentLogSize=" + + thresholdPercentLogSize// + ", percentLogSize=" + actualPercentLogSize// + "%, takeSnapshot=" + takeSnapshot // Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/util/config ___________________________________________________________________ Modified: svn:mergeinfo ## -1,3 +1,4 ## +/branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/util/config:8025-8122 /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata-jini/src/java/com/bigdata/util/config:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata-jini/src/java/com/bigdata/util/config:6766-7380 /branches/BTREE_BUFFER_BRANCH/bigdata-jini/src/java/com/bigdata/util/config:2004-2045 \ No newline at end of property Index: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha 2014-04-15 17:08:37 UTC (rev 8123) Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha ___________________________________________________________________ Modified: svn:ignore ## -1,3 +1,4 ## log4j.properties logging.properties results.txt +TestRWStoreAddress.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -43,6 +43,7 @@ import com.bigdata.ha.msg.HARootBlockRequest; import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IHABufferStrategy; +import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.Journal; import com.bigdata.rdf.sail.webapp.client.ConnectOptions; import com.bigdata.rdf.sail.webapp.client.RemoteRepository; @@ -62,6 +63,13 @@ super(name); } + @Override + protected int replicationFactor() { + + return 3; + + } + /** * Issue HTTP request to a service to take a snapshot. * @@ -146,10 +154,12 @@ * The current commit counter on the server. This is the commit point * that should be restored. */ - final long commitCounterM = serverA - .getRootBlock(new HARootBlockRequest(null/* storeUUID */)) - .getRootBlock().getCommitCounter(); + + final IRootBlockView serverARootBlock = serverA.getRootBlock( + new HARootBlockRequest(null/* storeUUID */)).getRootBlock(); + final long commitCounterM = serverARootBlock.getCommitCounter(); + final File snapshotFile = SnapshotManager.getSnapshotFile( getSnapshotDirA(), commitCounterN); @@ -198,10 +208,29 @@ */ rest.restore(false/* listCommitPoints */, Long.MAX_VALUE/* haltingCommitCounter */); + /* + * FIXME For some reason, we need to close and reopen the + * journal before it can be used. See HARestore. + */ + if (true) { + jnl.close(); + + // reopen. + jnl = new Journal(p); + } + + // Verify can dump journal after restore. + dumpJournal(jnl); + // Verify journal now at the expected commit point. assertEquals(commitCounterM, jnl.getRootBlockView() .getCommitCounter()); + if (!serverARootBlock.equals(jnl.getRootBlockView())) { + fail("Root blocks differ: serverA=" + serverARootBlock + + ", restored=" + jnl.getRootBlockView()); + } + /* * Compute digest of the restored journal. The digest should * agree with the digest of the Journal on A since we rolled @@ -214,14 +243,17 @@ new HADigestRequest(null/* storeUUID */)) .getDigest(); - final MessageDigest digest = MessageDigest - .getInstance("MD5"); + final byte[] digest2; + { + final MessageDigest digest = MessageDigest + .getInstance("MD5"); - // digest of restored journal. - ((IHABufferStrategy) (jnl.getBufferStrategy())) - .computeDigest(null/* snapshot */, digest); + // digest of restored journal. + ((IHABufferStrategy) (jnl.getBufferStrategy())) + .computeDigest(null/* snapshot */, digest); - final byte[] digest2 = digest.digest(); + digest2 = digest.digest(); + } if (!BytesUtil.bytesEqual(digestA, digest2)) { @@ -238,20 +270,6 @@ } - /* - * FIXME For some reason, we need to close and reopen the - * journal before it can be used. See HARestore. - */ - if (true) { - jnl.close(); - - // reopen. - jnl = new Journal(p); - } - - // Verify can dump journal after restore. - dumpJournal(jnl); - } finally { if (jnl != null) { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -87,7 +87,6 @@ import com.bigdata.jini.util.JiniUtil; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.StoreState; -import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.AbstractQuorumClient; import com.bigdata.quorum.AsynchronousQuorumCloseException; @@ -110,7 +109,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class AbstractHA3JournalServerTestCase extends +public abstract class AbstractHA3JournalServerTestCase extends AbstractHAJournalServerTestCase implements DiscoveryListener { /** Quorum client used to monitor (or act on) the logical service quorum. */ @@ -133,7 +132,7 @@ * Implementation listens for the death of the child process and can be used * to decide when the child process is no longer executing. */ - private static class ServiceListener implements IServiceListener { + static class ServiceListener implements IServiceListener { private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; @@ -152,13 +151,14 @@ this.haGlue = haGlue; } - @SuppressWarnings("unused") - public HAGlue getHAGlue() { +// @SuppressWarnings("unused") +// public HAGlue getHAGlue() { +// +// return haGlue; +// +// } - return haGlue; - - } - + @Override public void add(final ProcessHelper processHelper) { if (processHelper == null) @@ -218,31 +218,41 @@ * The {@link Remote} interfaces for these services (if started and * successfully discovered). */ - private HAGlue serverA = null, serverB = null, serverC = null; + protected HAGlue serverA = null; + protected HAGlue serverB = null; + + protected HAGlue serverC = null; + /** * {@link UUID}s for the {@link HAJournalServer}s. */ - private UUID serverAId = UUID.randomUUID(), serverBId = UUID.randomUUID(), - serverCId = UUID.randomUUID(); + private UUID serverAId = UUID.randomUUID(); + private UUID serverBId = UUID.randomUUID(); + + private UUID serverCId = UUID.randomUUID(); + /** * The HTTP ports at which the services will respond. * * @see <a href="http://trac.bigdata.com/ticket/730" > Allow configuration * of embedded NSS jetty server using jetty-web.xml </a> */ - private final int A_JETTY_PORT = 8090, B_JETTY_PORT = A_JETTY_PORT + 1, - C_JETTY_PORT = B_JETTY_PORT + 1; + protected final int A_JETTY_PORT = 8090; + protected final int B_JETTY_PORT = A_JETTY_PORT + 1; + protected final int C_JETTY_PORT = B_JETTY_PORT + 1; /** * These {@link IServiceListener}s are used to reliably detect that the * corresponding process starts and (most importantly) that it is really * dies once it has been shutdown or destroyed. */ - private ServiceListener serviceListenerA = null, serviceListenerB = null; + protected ServiceListener serviceListenerA = null; - private ServiceListener serviceListenerC = null; + protected ServiceListener serviceListenerB = null; + + protected ServiceListener serviceListenerC = null; private LookupDiscoveryManager lookupDiscoveryManager = null; @@ -1143,14 +1153,14 @@ } - private void safeShutdown(final HAGlue haGlue, final File serviceDir, + void safeShutdown(final HAGlue haGlue, final File serviceDir, final ServiceListener serviceListener) { safeShutdown(haGlue, serviceDir, serviceListener, false/* now */); } - private void safeShutdown(final HAGlue haGlue, final File serviceDir, + protected void safeShutdown(final HAGlue haGlue, final File serviceDir, final ServiceListener serviceListener, final boolean now) { if (haGlue == null) @@ -1369,6 +1379,32 @@ } /** + * Return the zookeeper client configuration file. + */ + final protected String getZKConfigFile() { + + return "zkClient.config"; + + } + + /** + * The as-configured replication factor. + * <p> + * Note: This is defined in the HAJournal.config file, which is where the + * {@link HAJournalServer} gets the correct value. We also need to have the + * replicationFactor on hand for the test suite so we can setup the quorum + * in the test fixture correctly. However, it is difficult to reach the + * appropriate HAJournal.config file from the text fixture during + * {@link #setUp()}. Therefore, for the test setup, this is achieved by + * overriding this abstract method in the test class. + */ + protected int replicationFactor() { + + return 3; + + } + + /** * Return Zookeeper quorum that can be used to reflect (or act on) the * distributed quorum state for the logical service. * @@ -1382,7 +1418,7 @@ KeeperException, IOException { final Configuration config = ConfigurationProvider - .getInstance(new String[] { SRC_PATH + "zkClient.config" }); + .getInstance(new String[] { SRC_PATH + getZKConfigFile() }); zkClientConfig = new ZookeeperClientConfig(config); @@ -1393,7 +1429,7 @@ // Note: Save reference. this.zookeeper = new ZooKeeper(zoohosts, sessionTimeout, new Watcher() { @Override - public void process(WatchedEvent event) { + public void process(final WatchedEvent event) { if (log.isInfoEnabled()) log.info(event); } @@ -1443,9 +1479,19 @@ logicalServiceZPath = logicalServiceZPathPrefix + "/" + logicalServiceId; - final int replicationFactor = (Integer) config.getEntry( - ZookeeperClientConfig.Options.NAMESPACE, - ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); + /** + * Note: This is defined in the HAJournal.config file, which is where + * the HAJournalServer gets the correct value. + * + * However, we also need to have the replicationFactor on hand for the + * test suite so we can setup the quorum in the test fixture correctly. + */ + final int replicationFactor = replicationFactor(); +// { +// replicationFactor = (Integer) config.getEntry( +// ConfigurationOptions.COMPONENT, +// ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); +// } // if (!zka.awaitZookeeperConnected(10, TimeUnit.SECONDS)) { // @@ -1551,7 +1597,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ - abstract private class StartServerTask implements Callable<HAGlue> { + abstract class StartServerTask implements Callable<HAGlue> { private final String name; private final String configName; Copied: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java (from rev 8122, branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 17:08:37 UTC (rev 8123) @@ -0,0 +1,489 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +package com.bigdata.journal.jini.ha; + +import java.io.File; +import java.io.IOException; +import java.rmi.Remote; +import java.security.DigestException; +import java.security.NoSuchAlgorithmException; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent... [truncated message content] |
From: <tho...@us...> - 2014-04-15 14:16:19
|
Revision: 8122 http://sourceforge.net/p/bigdata/code/8122 Author: thompsonbry Date: 2014-04-15 14:16:12 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Reconciled changes in the main development branch into the HA1/HA5 branch in preparation for merge back to the main development branch. See #722 and #723. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java Removed Paths: ------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/BufferedWrite.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -142,7 +142,7 @@ * * @see <a href="http://trac.bigdata.com/ticket/721#comment:10"> HA1 </a> */ - private final byte[] s_zeros = new byte[256]; + static private final byte[] s_zeros = new byte[256]; /** * Buffer a write. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCache.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -310,7 +310,7 @@ /** * When a record is used as a read cache then the readCount is - * maintained as a metric on its access. ???This could be used to + * maintained as a metric on its access. This could be used to * determine eviction/compaction. * <p> * Note: volatile to guarantee visibility of updates. Might do better Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/io/writecache/WriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1165,7 +1165,7 @@ */ if (flush) { /* - * Send out the full cache block. FIXME Why are we not calling sendAddressMetadata() here? + * Send out the full cache block. */ writeCacheBlock(curCompactingCache); addClean(curCompactingCache, true/* addFirst */); @@ -1245,6 +1245,8 @@ * @throws InterruptedException * @throws ExecutionException * @throws IOException + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ private void sendAddressMetadata(final WriteCache cache) throws IllegalStateException, InterruptedException, @@ -1345,19 +1347,14 @@ private void writeCacheBlock(final WriteCache cache) throws InterruptedException, ExecutionException, IOException { -// /* -// * IFF HA -// * -// * TODO isHA should be true even if the quorum is not highly -// * available since there still could be other services in the write -// * pipeline (e.g., replication to an offline HAJournalServer prior -// * to changing over into an HA3 quorum or off-site replication). The -// * unit tests need to be updated to specify [isHighlyAvailable] for -// * ALL quorum based test runs. -// */ -// final boolean isHA = quorum != null && quorum.isHighlyAvailable(); - - // IFF HA and this is the quorum leader. + /** + * IFF HA and this is the quorum leader. + * + * Note: This is true for HA1 as well. The code path enabled by this + * is responsible for writing the HALog files. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> + */ final boolean isHALeader = quorum != null && quorum.getClient().isLeader(quorumToken); @@ -1441,6 +1438,12 @@ */ quorumMember.logWriteCacheBlock(pkg.getMessage(), pkg.getData().duplicate()); + /* + * TODO Do we want to always support the replication code path + * when a quorum exists (that is, also for HA1) in case there + * are pipeline listeners that are not HAJournalServer + * instances? E.g., for offsite replication? + */ if (quorum.replicationFactor() > 1) { // ASYNC MSG RMI + NIO XFER. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -3429,13 +3429,8 @@ return; if (!quorum.isHighlyAvailable()) { - // FIXME: Find the reason why this delay is needed to pass HA1 snapshot tests -// try { -// Thread.sleep(1000); -// } catch (InterruptedException e) { -// e.printStackTrace(); -// } - return; + // Gather and 2-phase commit are not used in HA1. + return; } /** @@ -3550,9 +3545,11 @@ store._commitRecord = store._getCommitRecord(); if (quorum != null) { - /* + /** * Write the root block on the HALog file, closing out that * file. + * + * @see <a href="http://trac.bigdata.com/ticket/721"> HA1 </a> */ final QuorumService<HAGlue> localService = quorum.getClient(); if (localService != null) { @@ -3872,7 +3869,7 @@ if (quorum == null || quorum.replicationFactor() == 1) { - // Non-HA mode. + // Non-HA mode (including HA1). cs.commitSimple(); } else { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -521,7 +521,7 @@ final int nbuffers = 1; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -619,7 +619,7 @@ final int nbuffers = 2; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -672,7 +672,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -717,7 +717,7 @@ final int nbuffers = 6; final boolean useChecksums = true; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; @@ -770,7 +770,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = true; - final boolean isHighlyAvailable = true; // for HA1! false; + final boolean isHighlyAvailable = true; // No write pipeline. final int k = 1; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -4574,7 +4574,7 @@ // } - log.warn("Starting NSS from " + jettyXml); + log.warn("Starting NSS"); // Start the server. jettyServer.start(); @@ -4658,9 +4658,8 @@ if (tmp == null) throw new IllegalStateException("Server is not running"); - final int port = tmp.getConnectors()[0].getLocalPort(); - haLog.warn("Returning NSSPort: " + port); - return port; + return tmp.getConnectors()[0].getLocalPort(); + } /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3BackupTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -63,6 +63,13 @@ super(name); } + @Override + protected int replicationFactor() { + + return 3; + + } + /** * Issue HTTP request to a service to take a snapshot. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -87,7 +87,6 @@ import com.bigdata.jini.util.JiniUtil; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.StoreState; -import com.bigdata.journal.jini.ha.HAJournalServer.ConfigurationOptions; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; import com.bigdata.quorum.AbstractQuorumClient; import com.bigdata.quorum.AsynchronousQuorumCloseException; @@ -110,7 +109,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class AbstractHA3JournalServerTestCase extends +public abstract class AbstractHA3JournalServerTestCase extends AbstractHAJournalServerTestCase implements DiscoveryListener { /** Quorum client used to monitor (or act on) the logical service quorum. */ @@ -133,7 +132,7 @@ * Implementation listens for the death of the child process and can be used * to decide when the child process is no longer executing. */ - public static class ServiceListener implements IServiceListener { + static class ServiceListener implements IServiceListener { private volatile HAGlue haGlue; private volatile ProcessHelper processHelper; @@ -152,13 +151,14 @@ this.haGlue = haGlue; } - @SuppressWarnings("unused") - public HAGlue getHAGlue() { +// @SuppressWarnings("unused") +// public HAGlue getHAGlue() { +// +// return haGlue; +// +// } - return haGlue; - - } - + @Override public void add(final ProcessHelper processHelper) { if (processHelper == null) @@ -1378,11 +1378,33 @@ } - protected String getZKConfigFile() { - return "zkClient.config"; + /** + * Return the zookeeper client configuration file. + */ + final protected String getZKConfigFile() { + + return "zkClient.config"; + } /** + * The as-configured replication factor. + * <p> + * Note: This is defined in the HAJournal.config file, which is where the + * {@link HAJournalServer} gets the correct value. We also need to have the + * replicationFactor on hand for the test suite so we can setup the quorum + * in the test fixture correctly. However, it is difficult to reach the + * appropriate HAJournal.config file from the text fixture during + * {@link #setUp()}. Therefore, for the test setup, this is achieved by + * overriding this abstract method in the test class. + */ + protected int replicationFactor() { + + return 3; + + } + + /** * Return Zookeeper quorum that can be used to reflect (or act on) the * distributed quorum state for the logical service. * @@ -1407,7 +1429,7 @@ // Note: Save reference. this.zookeeper = new ZooKeeper(zoohosts, sessionTimeout, new Watcher() { @Override - public void process(WatchedEvent event) { + public void process(final WatchedEvent event) { if (log.isInfoEnabled()) log.info(event); } @@ -1457,9 +1479,19 @@ logicalServiceZPath = logicalServiceZPathPrefix + "/" + logicalServiceId; - final int replicationFactor = (Integer) config.getEntry( - ZookeeperClientConfig.Options.NAMESPACE, - ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); + /** + * Note: This is defined in the HAJournal.config file, which is where + * the HAJournalServer gets the correct value. + * + * However, we also need to have the replicationFactor on hand for the + * test suite so we can setup the quorum in the test fixture correctly. + */ + final int replicationFactor = replicationFactor(); +// { +// replicationFactor = (Integer) config.getEntry( +// ConfigurationOptions.COMPONENT, +// ConfigurationOptions.REPLICATION_FACTOR, Integer.TYPE); +// } // if (!zka.awaitZookeeperConnected(10, TimeUnit.SECONDS)) { // @@ -1565,7 +1597,7 @@ * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ - public abstract class StartServerTask implements Callable<HAGlue> { + abstract class StartServerTask implements Callable<HAGlue> { private final String name; private final String configName; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA5JournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.io.File; @@ -16,17 +39,13 @@ import com.bigdata.ha.HAGlue; import com.bigdata.jini.start.IServiceListener; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownATask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownBTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownCTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.SafeShutdownTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ServiceListener; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartATask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartBTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartCTask; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.StartServerTask; import com.bigdata.quorum.AsynchronousQuorumCloseException; +/** + * Test suite for HA5. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class AbstractHA5JournalServerTestCase extends AbstractHA3JournalServerTestCase { @@ -53,10 +72,6 @@ protected final int D_JETTY_PORT = C_JETTY_PORT + 1; protected final int E_JETTY_PORT = D_JETTY_PORT + 1; - protected String getZKConfigFile() { - return "zkClient5.config"; // 5 stage pipeline - } - /** * These {@link IServiceListener}s are used to reliably detect that the * corresponding process starts and (most importantly) that it is really @@ -88,6 +103,13 @@ return new File(getServiceDirE(), "HALog"); } + @Override + protected int replicationFactor() { + + return 5; + + } + /** * Start A then B then C. As each service starts, this method waits for that * service to appear in the pipeline in the proper position. @@ -141,7 +163,7 @@ } /** - * Start of 3 HA services (this happens in the ctor). + * Start of 5 HA services (this happens in the ctor). * * @param sequential * True if the startup should be sequential or false if @@ -362,6 +384,7 @@ super(name); } + @Override protected void destroyAll() throws AsynchronousQuorumCloseException, InterruptedException, TimeoutException { /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHAJournalServerTestCase.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1259,35 +1259,34 @@ } - /** - * The effective name for this test as used to name the directories in which - * we store things. - * - * TODO If there are method name collisions across the different test - * classes then the test suite name can be added to this. Also, if there are - * file naming problems, then this value can be munged before it is - * returned. - */ - private final String effectiveTestFileName = getClass().getSimpleName() - + "." + getName(); +// /** +// * The effective name for this test as used to name the directories in which +// * we store things. +// * +// * TODO If there are method name collisions across the different test +// * classes then the test suite name can be added to this. Also, if there are +// * file naming problems, then this value can be munged before it is +// * returned. +// */ +// private final String effectiveTestFileName = getClass().getSimpleName() +// + "." + getName(); +// +// /** +// * The directory that is the parent of each {@link HAJournalServer}'s +// * individual service directory. +// */ +// protected File getTestDir() { +// return new File(TGT_PATH, getEffectiveTestFileName()); +// } +// +// /** +// * The effective name for this test as used to name the directories in which +// * we store things. +// */ +// protected String getEffectiveTestFileName() { +// +// return effectiveTestFileName; +// +// } - /** - * The directory that is the parent of each {@link HAJournalServer}'s - * individual service directory. - */ - protected File getTestDir() { - return new File(TGT_PATH, getEffectiveTestFileName()); - } - - /** - * The effective name for this test as used to name the directories in which - * we store things. - */ - protected String getEffectiveTestFileName() { - - return effectiveTestFileName; - - } - - } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-D.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -67,7 +67,7 @@ private static haPort = ConfigMath.add(9090,3); // The #of services in the write pipeline. - private static replicationFactor = 5; + private static replicationFactor = 5; // Note: overridden in the HA5 test suites. // The logical service identifier shared by all members of the quorum. private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/HAJournal-E.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -67,7 +67,7 @@ private static haPort = ConfigMath.add(9090,4); // The #of services in the write pipeline. - private static replicationFactor = 5; + private static replicationFactor = 5; // Note: overridden in the HA5 test suites. // The logical service identifier shared by all members of the quorum. private static logicalServiceId = System.getProperty("test.logicalServiceId","CI-HAJournal-1"); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/StressTestHA3JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -75,6 +75,13 @@ super(name); } + @Override + protected int replicationFactor() { + + return 3; + + } + /** * Complex hack to override the {@link HAJournal} properties. * Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -74,6 +74,11 @@ // Basic tests for a single HAJournalServer (quorum does not meet) suite.addTestSuite(TestHAJournalServer.class); + // HA1 test suite. + suite.addTestSuite(TestHA1JournalServer.class); + suite.addTestSuite(TestHA1SnapshotPolicy.class); + suite.addTestSuite(TestHA1SnapshotPolicy2.class); + // HA2 test suite (k=3, but only 2 services are running). suite.addTestSuite(TestHA2JournalServer.class); @@ -108,17 +113,16 @@ // Verify ability to override the HAJournal implementation class. suite.addTestSuite(TestHAJournalServerOverride.class); - // Test suite of longer running stress tests for an HA3 cluster. - suite.addTestSuite(StressTestHA3JournalServer.class); - - // Test suite of longer running stress tests for an HA5 cluster. + // HA5 test suite. suite.addTestSuite(TestHA5JournalServer.class); suite.addTestSuite(TestHA5JournalServerWithHALogs.class); - // Test suite of longer running stress tests for an HA1 cluster. - suite.addTestSuite(TestHA1JournalServer.class); - suite.addTestSuite(TestHA1SnapshotPolicy.class); - suite.addTestSuite(TestHA1SnapshotPolicy2.class); + /* + * Stress tests. + */ + + // Test suite of longer running stress tests for an HA3 cluster. + suite.addTestSuite(StressTestHA3JournalServer.class); return suite; Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.concurrent.TimeUnit; @@ -3,10 +26,13 @@ import java.util.concurrent.TimeoutException; -import com.bigdata.ha.HAGlue; -import com.bigdata.ha.HAStatusEnum; - import net.jini.config.Configuration; +import com.bigdata.ha.HAGlue; +/** + * Test suite for HA1. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA1JournalServer extends AbstractHA3JournalServerTestCase { @@ -27,15 +53,11 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline - } - public TestHA1JournalServer() { } @@ -43,40 +65,47 @@ super(name); } + @Override + protected int replicationFactor() { + + return 1; + + } + public void testStartA() throws Exception { - doStartA(); + doStartA(); } - + protected void doStartA() throws Exception { try { - quorum.awaitQuorum(awaitQuorumTimeout, - TimeUnit.MILLISECONDS); - - fail("HA1 requires quorum of 1!"); + quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); + + fail("HA1 requires quorum of 1!"); } catch (TimeoutException te) { - // expected + // expected } // Start 1 service. final HAGlue serverA = startA(); - + // this should succeed final long token = quorum.awaitQuorum(awaitQuorumTimeout, TimeUnit.MILLISECONDS); - + assertEquals(token, awaitFullyMetQuorum()); - + final HAGlue leader = quorum.getClient().getLeader(token); - + assertEquals(serverA, leader); } - + public void testSimpleTransaction() throws Exception { - doStartA(); - + + doStartA(); + serverA.awaitHAReady(2, TimeUnit.SECONDS); - + /* * Awaiting HAReady is not sufficient since the service may still * writing the initial transaction. @@ -85,30 +114,27 @@ * status of a new journal being ready too soon to process an NSS * request */ - - awaitCommitCounter(1, new HAGlue[] { serverA}); - - // Thread.sleep(100); - - // serverA. - - log.warn("Calling SimpleTransaction"); - simpleTransaction(); - - awaitCommitCounter(2, new HAGlue[] { serverA}); + + awaitCommitCounter(1, new HAGlue[] { serverA }); + + simpleTransaction(); + + awaitCommitCounter(2, new HAGlue[] { serverA }); + } - + public void testMultiTransaction() throws Exception { - doStartA(); - - awaitCommitCounter(1, new HAGlue[] { serverA}); + doStartA(); + + awaitCommitCounter(1, new HAGlue[] { serverA }); // Thread.sleep(1000); - + final int NTRANS = 10; - for (int t = 0; t < NTRANS; t++) { - simpleTransaction(); - } + for (int t = 0; t < NTRANS; t++) { + simpleTransaction(); + } - awaitCommitCounter(NTRANS+1, new HAGlue[] { serverA}); + awaitCommitCounter(NTRANS + 1, new HAGlue[] { serverA }); + } + } -} Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -30,10 +30,13 @@ super(name); } - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline + @Override + protected int replicationFactor() { + + return 1; + } - + /** * {@inheritDoc} * <p> @@ -60,7 +63,7 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", // "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA1SnapshotPolicy2.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,13 +1,37 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; -import java.util.concurrent.TimeUnit; - import net.jini.config.Configuration; import com.bigdata.ha.HAGlue; -import com.bigdata.ha.halog.HALogWriter; -import com.bigdata.ha.msg.HARootBlockRequest; +/** + * Test suite for HA1 online backups and point in time restore. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA1SnapshotPolicy2 extends AbstractHA3BackupTestCase { public TestHA1SnapshotPolicy2() { @@ -17,11 +41,11 @@ super(name); } - /** How long to wait for snapshots to appear. */ - private final long awaitSnapshotMillis = 5000; - - protected String getZKConfigFile() { - return "zkClient1.config"; // 1 stage pipeline + @Override + protected int replicationFactor() { + + return 1; + } /** @@ -46,7 +70,7 @@ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy(0L,1,0)", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=1" + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor() }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -181,9 +181,6 @@ assertDigestsEquals(new HAGlue[] { serverA, serverB }); // Verify can not write on follower. - log.warn("ServerA port: " + serverA.getNSSPort()); - log.warn("ServerB port: " + serverB.getNSSPort()); - assertWriteRejected(serverB); // Start 3rd service. Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServer.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.util.concurrent.Callable; @@ -10,9 +33,12 @@ import com.bigdata.ha.HAGlue; import com.bigdata.ha.HAStatusEnum; import com.bigdata.ha.msg.HARootBlockRequest; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.LargeLoadTask; +/** + * HA5 test suite. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA5JournalServer extends AbstractHA5JournalServerTestCase { /** @@ -31,11 +57,18 @@ "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.NoSnapshotPolicy()", // "com.bigdata.journal.jini.ha.HAJournalServer.HAJournalClass=\""+HAJournalTest.class.getName()+"\"", "com.bigdata.journal.jini.ha.HAJournalServer.onlineDisasterRecovery=true", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=5", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } + @Override + protected int replicationFactor() { + + return 5; + + } + public TestHA5JournalServer() { } @@ -49,9 +82,11 @@ * @throws Exception */ public void testStartABC_DE() throws Exception { - doStartABC_DE(); + + doStartABC_DE(); + } - + protected void doStartABC_DE() throws Exception { // Start 3 services. @@ -171,117 +206,115 @@ * HA5 is fully met after 5 services are started simultaneously */ public void testABCDESimultaneous() throws Exception { - - final ABCDE startup = new ABCDE(false); - - awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(false); + + awaitFullyMetQuorum(); + startup.assertDigestsEqual(); } - + /** * HA5 is fully met after 5 services are started sequentially */ public void testABCDESequential() throws Exception { - - final ABCDE startup = new ABCDE(true); - - awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + awaitFullyMetQuorum(); + startup.assertDigestsEqual(); } - + /** * HA5 remains met with 1 service failure */ public void testABCDEShutdownC() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownC(); - - awaitPipeline(new HAGlue[] {serverA, serverB, serverD, serverE}); - + + awaitPipeline(new HAGlue[] { serverA, serverB, serverD, serverE }); + assertEquals(token, awaitMetQuorum()); } - + /** * HA5 remains met with 2 service failures */ public void testABCDEShutdownBD() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownB(); shutdownD(); - - awaitPipeline(new HAGlue[] {serverA, serverC, serverE}); - + + awaitPipeline(new HAGlue[] { serverA, serverC, serverE }); + assertEquals(token, awaitMetQuorum()); } - + /** - * HA5 breaks with 3 service failures and re-meets when one - * is restarted + * HA5 breaks with 3 service failures and re-meets when one is restarted */ public void testABCDEShutdownBCD() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownB(); shutdownC(); shutdownD(); - + // Non-deterministic pipeline order // awaitPipeline(new HAGlue[] {serverA, serverE}); - + try { - awaitMetQuorum(); - fail("Quorum should not be met"); + awaitMetQuorum(); + fail("Quorum should not be met"); } catch (TimeoutException te) { - // expected + // expected } - + startC(); - + assertFalse(token == awaitMetQuorum()); } - + /** - * HA5 breaks when leader fails, meets on new token - * then fully meets on same token when previous leader - * is restarted + * HA5 breaks when leader fails, meets on new token then fully meets on same + * token when previous leader is restarted */ public void testABCDEShutdownLeader() throws Exception { - - final ABCDE startup = new ABCDE(true); - - final long token = awaitFullyMetQuorum(); - + + final ABCDE startup = new ABCDE(true); + + final long token = awaitFullyMetQuorum(); + startup.assertDigestsEqual(); - + shutdownA(); - + // pipeline order is non-deterministic - + final long token2 = awaitMetQuorum(); - - assertFalse(token==token2); - + + assertFalse(token == token2); + startA(); - + assertTrue(token2 == awaitFullyMetQuorum()); } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA5JournalServerWithHALogs.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,3 +1,26 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ package com.bigdata.journal.jini.ha; import java.io.File; @@ -3,13 +26,16 @@ import java.util.Calendar; +import net.jini.config.Configuration; + import com.bigdata.ha.HAGlue; import com.bigdata.ha.halog.HALogReader; import com.bigdata.ha.halog.IHALogReader; import com.bigdata.journal.CommitCounterUtility; -import com.bigdata.journal.jini.ha.AbstractHA3JournalServerTestCase.ABC; -import net.jini.config.Configuration; -import junit.framework.TestCase; - +/** + * HA5 test suite. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ public class TestHA5JournalServerWithHALogs extends AbstractHA5JournalServerTestCase { @@ -46,6 +72,13 @@ } + @Override + protected int replicationFactor() { + + return 5; + + } + /** * {@inheritDoc} * <p> @@ -67,7 +100,7 @@ return new String[]{ "com.bigdata.journal.jini.ha.HAJournalServer.restorePolicy=new com.bigdata.journal.jini.ha.DefaultRestorePolicy()", "com.bigdata.journal.jini.ha.HAJournalServer.snapshotPolicy=new com.bigdata.journal.jini.ha.DefaultSnapshotPolicy("+neverRun+",0)", - "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor=5", + "com.bigdata.journal.jini.ha.HAJournalServer.replicationFactor="+replicationFactor(), }; } Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/log4j-template-A.properties 2014-04-15 14:16:12 UTC (rev 8122) @@ -11,7 +11,7 @@ #log4j.logger.com.bigdata.service.jini.lookup=ALL #log4j.logger.com.bigdata.quorum=INFO log4j.logger.com.bigdata.quorum.zk=INFO -log4j.logger.com.bigdata.io.writecache=INFO +#log4j.logger.com.bigdata.io.writecache=INFO #log4j.logger.com.bigdata.zookeeper=INFO #log4j.logger.com.bigdata.zookeeper.ZooHelper=ALL log4j.logger.com.bigdata.rdf.internal.LexiconConfiguration=FATAL Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -30,9 +30,6 @@ private static fedname = "benchmark"; - // The #of services in the write pipeline. - private static replicationFactor = 3; - /* The logical service identifier shared by all members of the quorum. * * Note: The test fixture ignores this value. For the avoidance of Deleted: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient1.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,91 +0,0 @@ -/* Zookeeper client only configuration. - */ -import java.io.File; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.UUID; - -import com.bigdata.util.NV; -import com.bigdata.util.config.NicUtil; -import com.bigdata.journal.Options; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.jini.ha.HAJournal; -import com.bigdata.jini.lookup.entry.*; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.service.jini.*; -import com.bigdata.service.jini.lookup.DataServiceFilter; -import com.bigdata.service.jini.master.ServicesTemplate; -import com.bigdata.jini.start.config.*; -import com.bigdata.jini.util.ConfigMath; - -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; - -/* - * Globals. - */ -bigdata { - - private static fedname = "benchmark"; - - // The #of services in the write pipeline. - private static replicationFactor = 1; - - /* The logical service identifier shared by all members of the quorum. - * - * Note: The test fixture ignores this value. For the avoidance of - * doubt, the value is commented out. - */ - //private static logicalServiceId = "CI-HAJournal-1"; - - // zookeeper - static private sessionTimeout = (int)ConfigMath.s2ms(20); - -} - -/* - * Zookeeper client configuration. - */ -org.apache.zookeeper.ZooKeeper { - - /* Root znode for the federation instance. */ - zroot = "/" + bigdata.fedname; - - /* A comma separated list of host:port pairs, where the port is - * the CLIENT port for the zookeeper server instance. - */ - // standalone. - servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; - - /* Session timeout (optional). */ - sessionTimeout = bigdata.sessionTimeout; - - /* - * ACL for the zookeeper nodes created by the bigdata federation. - * - * Note: zookeeper ACLs are not transmitted over secure channels - * and are placed into plain text Configuration files by the - * ServicesManagerServer. - */ - acl = new ACL[] { - - new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) - - }; - - /* - * Note: Normally on the HAJournalServer component. Hacked in the test - * suite setup to look at the ZooKeeper component instead. - */ - - logicalServiceId = bigdata.logicalServiceId; - - replicationFactor = bigdata.replicationFactor; -} Deleted: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/zkClient5.config 2014-04-15 14:16:12 UTC (rev 8122) @@ -1,91 +0,0 @@ -/* Zookeeper client only configuration. - */ -import java.io.File; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.UUID; - -import com.bigdata.util.NV; -import com.bigdata.util.config.NicUtil; -import com.bigdata.journal.Options; -import com.bigdata.journal.BufferMode; -import com.bigdata.journal.jini.ha.HAJournal; -import com.bigdata.jini.lookup.entry.*; -import com.bigdata.service.IBigdataClient; -import com.bigdata.service.AbstractTransactionService; -import com.bigdata.service.jini.*; -import com.bigdata.service.jini.lookup.DataServiceFilter; -import com.bigdata.service.jini.master.ServicesTemplate; -import com.bigdata.jini.start.config.*; -import com.bigdata.jini.util.ConfigMath; - -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; - -/* - * Globals. - */ -bigdata { - - private static fedname = "benchmark"; - - // The #of services in the write pipeline. - private static replicationFactor = 5; - - /* The logical service identifier shared by all members of the quorum. - * - * Note: The test fixture ignores this value. For the avoidance of - * doubt, the value is commented out. - */ - //private static logicalServiceId = "CI-HAJournal-1"; - - // zookeeper - static private sessionTimeout = (int)ConfigMath.s2ms(20); - -} - -/* - * Zookeeper client configuration. - */ -org.apache.zookeeper.ZooKeeper { - - /* Root znode for the federation instance. */ - zroot = "/" + bigdata.fedname; - - /* A comma separated list of host:port pairs, where the port is - * the CLIENT port for the zookeeper server instance. - */ - // standalone. - servers = "localhost:2081"; - // ensemble -// servers = bigdata.zoo1+":2181" -// + ","+bigdata.zoo2+":2181" -// + ","+bigdata.zoo3+":2181" -// ; - - /* Session timeout (optional). */ - sessionTimeout = bigdata.sessionTimeout; - - /* - * ACL for the zookeeper nodes created by the bigdata federation. - * - * Note: zookeeper ACLs are not transmitted over secure channels - * and are placed into plain text Configuration files by the - * ServicesManagerServer. - */ - acl = new ACL[] { - - new ACL(ZooDefs.Perms.ALL, new Id("world", "anyone")) - - }; - - /* - * Note: Normally on the HAJournalServer component. Hacked in the test - * suite setup to look at the ZooKeeper component instead. - */ - - logicalServiceId = bigdata.logicalServiceId; - - replicationFactor = bigdata.replicationFactor; -} Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-15 13:01:24 UTC (rev 8121) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/SD.java 2014-04-15 14:16:12 UTC (rev 8122) @@ -184,7 +184,7 @@ * withstand a failure. */ static public final URI ReplicationFactor = new URIImpl(BDFNS - + "replicationCount"); + + "replicationFactor"); /** * An {@link IBigdataFederation}. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-15 13:01:37
|
Revision: 8121 http://sourceforge.net/p/bigdata/code/8121 Author: thompsonbry Date: 2014-04-15 13:01:24 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Caught up the HA1/HA5 branch with changes in the main development branch prior to bringing back the HA1/HA5 branch to the main development branch. See #722 (HA1) See #723 (HA5) Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/cache/StressTestGlobalLRU.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithRedirect.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/service/ndx/pipeline/TestMasterTaskWithSplits.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HARestore.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3JournalServer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/ASTSearchOptimizer.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/eval/SearchServiceFactory.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/sparql/ast/optimizers/ASTBindingAssigner.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/store/BDS.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestUnions.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/MultiTenancyServlet.java branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/BIGDATA_MGC_HA1_HA5/build.xml branches/BIGDATA_MGC_HA1_HA5/src/resources/HAJournal/HAJournal.config branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/startHAServices branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3CancelQuery.java branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_831.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.srx branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/ticket_874b.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/831.ttl branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.rq branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/874.ttl branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/HARestore branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA Removed Paths: ------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_MGC_HA1_HA5/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdata/ branches/BIGDATA_MGC_HA1_HA5/src/resources/etc/default/bigdataHA Property Changed: ---------------- branches/BIGDATA_MGC_HA1_HA5/ branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/joinGraph/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/bop/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/jsr166/ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/util/httpd/ branches/BIGDATA_MGC_HA1_HA5/bigdata-compatibility/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/attr/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/disco/ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/java/com/bigdata/util/config/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/btc/src/resources/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/lubm/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/ branches/BIGDATA_MGC_HA1_HA5/bigdata-perf/uniprot/src/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/changesets/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/error/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/internal/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/relation/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/java/com/bigdata/rdf/util/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/samples/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/bop/rdf/aggregate/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/internal/ branches/BIGDATA_MGC_HA1_HA5/bigdata-rdf/src/test/com/bigdata/rdf/relation/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/bench/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/changesets/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/bench/ branches/BIGDATA_MGC_HA1_HA5/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/LEGAL/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/lib/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/java/it/unimi/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/ branches/BIGDATA_MGC_HA1_HA5/dsi-utils/src/test/it/unimi/dsi/ branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/java/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_MGC_HA1_HA5/lgpl-utils/src/test/it/unimi/dsi/fastutil/bytes/custom/ branches/BIGDATA_MGC_HA1_HA5/osgi/ branches/BIGDATA_MGC_HA1_HA5/src/resources/bin/config/ Index: branches/BIGDATA_MGC_HA1_HA5 =================================================================== --- branches/BIGDATA_MGC_HA1_HA5 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE:6769-6785 /branches/BIGDATA_RELEASE_1_2_0:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0:8025-8120 /branches/BTREE_BUFFER_BRANCH:2004-2045 /branches/DEV_BRANCH_27_OCT_2009:2270-2546,2548-2782 /branches/INT64_BRANCH:4486-4522 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/lib/jetty ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/lib/jetty:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/lib/jetty:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/lib/jetty:8025-8120 /branches/INT64_BRANCH/bigdata/lib/jetty:4486-4522 /branches/MGC_1_3_0/bigdata/lib/jetty:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/lib/jetty:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/aggregate ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/aggregate:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/aggregate:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/aggregate:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/aggregate:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/joinGraph ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/joinGraph:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/joinGraph:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/joinGraph:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/joinGraph:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/bop/util ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/bop/util:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/bop/util:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/bop/util:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/bop/util:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/bop/util:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/bop/util:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/ProcessReaderHelper.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -84,11 +84,11 @@ */ public String readLine() throws IOException, InterruptedException { - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); while(getActiveProcess().isAlive()) { - if(t.isInterrupted()) { + if(Thread.interrupted()) { throw new InterruptedException(); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/counters/win/TypeperfCollector.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -31,6 +31,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.LineNumberReader; +import java.nio.channels.ClosedByInterruptException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Arrays; @@ -39,6 +40,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.CancellationException; import org.apache.log4j.Logger; @@ -52,6 +54,7 @@ import com.bigdata.counters.IRequiredHostCounters; import com.bigdata.util.CSVReader; import com.bigdata.util.CSVReader.Header; +import com.bigdata.util.InnerCause; /** * Collects per-host performance counters on a Windows platform using @@ -68,19 +71,19 @@ */ public class TypeperfCollector extends AbstractProcessCollector { - static protected final Logger log = Logger.getLogger(TypeperfCollector.class); + static private final Logger log = Logger.getLogger(TypeperfCollector.class); - /** - * True iff the {@link #log} level is INFO or less. - */ - final protected static boolean INFO = log.isInfoEnabled(); +// /** +// * True iff the {@link #log} level is INFO or less. +// */ +// final protected static boolean INFO = log.isInfoEnabled(); +// +// /** +// * True iff the {@link #log} level is DEBUG or less. +// */ +// final protected static boolean DEBUG = log.isDebugEnabled(); /** - * True iff the {@link #log} level is DEBUG or less. - */ - final protected static boolean DEBUG = log.isDebugEnabled(); - - /** * Updated each time a new row of data is read from the process and reported * as the last modified time for counters based on that process and * defaulted to the time that we begin to collect performance data. @@ -175,6 +178,7 @@ } + @Override public Double getValue() { final Double value = (Double) vals.get(path); @@ -189,6 +193,7 @@ } + @Override public long lastModified() { return lastModified; @@ -199,7 +204,8 @@ * @throws UnsupportedOperationException * always. */ - public void setValue(Double value, long timestamp) { + @Override + public void setValue(final Double value, final long timestamp) { throw new UnsupportedOperationException(); @@ -225,6 +231,7 @@ * * @throws IOException */ + @Override public List<String> getCommand() { // make sure that our counters have been declared. @@ -243,7 +250,7 @@ // counter names need to be double quoted for the command line. command.add("\"" + decl.getCounterNameForWindows() + "\""); - if(INFO) log.info("Will collect: \"" + if(log.isInfoEnabled()) log.info("Will collect: \"" + decl.getCounterNameForWindows() + "\" as " + decl.getPath()); @@ -255,6 +262,7 @@ } + @Override public AbstractProcessReader getProcessReader() { return new ProcessReader(); @@ -290,9 +298,10 @@ } + @Override public void run() { - if(INFO) + if(log.isInfoEnabled()) log.info(""); try { @@ -300,27 +309,34 @@ // run read(); - } catch (InterruptedException e) { + } catch (Exception e) { - // Note: This is a normal exit. - if(INFO) - log.info("Interrupted - will terminate"); + if (InnerCause.isInnerCause(e, InterruptedException.class)|| + InnerCause.isInnerCause(e, ClosedByInterruptException.class)|| + InnerCause.isInnerCause(e, CancellationException.class) + ) { - } catch (Exception e) { + // Note: This is a normal exit. + if (log.isInfoEnabled()) + log.info("Interrupted - will terminate"); - // Unexpected error. - log.fatal(e.getMessage(), e); + } else { + // Unexpected error. + log.fatal(e.getMessage(), e); + + } + } - if(INFO) + if(log.isInfoEnabled()) log.info("Terminated"); } private void read() throws Exception { - if(INFO) + if(log.isInfoEnabled()) log.info(""); long nsamples = 0; @@ -345,33 +361,34 @@ */ csvReader.setTailDelayMillis(100/* ms */); - try { +// try { - // read headers from the file. - csvReader.readHeaders(); + // read headers from the file. + csvReader.readHeaders(); - } catch (IOException ex) { +// } catch (IOException ex) { +// +// /* +// * Note: An IOException thrown out here often indicates an +// * asynchronous close of of the reader. A common and benign +// * cause of that is closing the input stream because the service +// * is shutting down. +// */ +// +// if (!Thread.interrupted()) +// throw ex; +// +// throw new InterruptedException(); +// +// } - /* - * Note: An IOException thrown out here often indicates an - * asynchronous close of of the reader. A common and benign - * cause of that is closing the input stream because the service - * is shutting down. - */ - - if (!Thread.currentThread().isInterrupted()) - throw ex; - - throw new InterruptedException(); - - } - /* * replace the first header definition so that we get clean * timestamps. */ csvReader.setHeader(0, new Header("Timestamp") { - public Object parseValue(String text) { + @Override + public Object parseValue(final String text) { try { return f.parse(text); @@ -390,7 +407,7 @@ */ { - if(INFO) + if(log.isInfoEnabled()) log.info("setting up headers."); int i = 1; @@ -400,7 +417,7 @@ final String path = decl.getPath(); // String path = hostPathPrefix + decl.getPath(); - if (INFO) + if (log.isInfoEnabled()) log.info("setHeader[i=" + i + "]=" + path); csvReader.setHeader(i++, new Header(path)); @@ -409,13 +426,20 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("starting row reads"); - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); - while (!t.isInterrupted() && csvReader.hasNext()) { + while (true) { + if (Thread.interrupted()) + throw new InterruptedException(); + + if (!csvReader.hasNext()) { + break; + } + try { final Map<String, Object> row = csvReader.next(); @@ -455,7 +479,7 @@ } - if(INFO) + if(log.isInfoEnabled()) log.info("done."); } @@ -466,6 +490,7 @@ * Declares the performance counters to be collected from the Windows * platform. */ + @Override public CounterSet getCounters() { // if (root == null) { Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/htree/raba ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/htree/raba:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/htree/raba:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/htree/raba:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/htree/raba:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/htree/raba:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Index: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 2014-04-15 13:01:24 UTC (rev 8121) Property changes on: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/jsr166 ___________________________________________________________________ Modified: svn:mergeinfo ## -1,5 +1,6 ## /branches/BIGDATA_OPENRDF_2_6_9_UPDATE/bigdata/src/java/com/bigdata/jsr166:6769-6785 /branches/BIGDATA_RELEASE_1_2_0/bigdata/src/java/com/bigdata/jsr166:6766-7380 +/branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/jsr166:8025-8120 /branches/INT64_BRANCH/bigdata/src/java/com/bigdata/jsr166:4486-4522 /branches/MGC_1_3_0/bigdata/src/java/com/bigdata/jsr166:7609-7752 /branches/QUADS_QUERY_BRANCH/bigdata/src/java/com/bigdata/jsr166:4525-4531,4533-4548,4550-4584,4586-4609,4611-4632,4634-4643,4646-4672,4674-4685,4687-4693,4697-4735,4737-4782,4784-4792,4794-4796,4798-4801 \ No newline at end of property Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/resources/AsynchronousOverflowTask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -2848,7 +2848,7 @@ /** * Note: This task is interrupted by {@link OverflowManager#shutdownNow()}. - * Therefore is tests {@link Thread#isInterrupted()} and returns immediately + * Therefore it tests {@link Thread#isInterrupted()} and returns immediately * if it has been interrupted. * * @return The return value is always null. @@ -3374,7 +3374,10 @@ static protected boolean isNormalShutdown( final ResourceManager resourceManager, final Throwable t) { - if(Thread.currentThread().isInterrupted()) return true; + if (Thread.interrupted()) { + // Note: interrupt status of thread was cleared. + return true; + } if (!resourceManager.isRunning() || !resourceManager.getConcurrencyManager() Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/FullTextIndex.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -955,35 +955,137 @@ } + /** + * Perform a range count on a full text query. + */ public int count(final FullTextQuery query) { - final Hit[] a = _search(query); + if (cache.containsKey(query)) { + + if (log.isInfoEnabled()) + log.info("found hits in cache"); + + return cache.get(query).length; + + } else { + + if (log.isInfoEnabled()) + log.info("did not find hits in cache"); + + } + + // tokenize the query. + final TermFrequencyData<V> qdata = tokenize(query); + + // No terms after stopword extraction + if (qdata == null) { + + cache.put(query, new Hit[] {}); + + return 0; + + } + + /* + * We can run an optimized version of this (just a quick range count) + * but only if the caller does not care about exact match and has + * not specified a regex. + */ + if (qdata.distinctTermCount() == 1 && + !query.isMatchExact() && query.getMatchRegex() == null) { + + final boolean prefixMatch = query.isPrefixMatch(); + + final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, + prefixMatch, md.getLocalTermWeight(), this); + + return (int) task1.getRangeCount(); + + } else { + + final Hit<V>[] a = _search(query); + + return a.length; + + } - return a.length; - } - public Hit<V>[] _search(final FullTextQuery q) { + protected TermFrequencyData<V> tokenize(final FullTextQuery query) { - final String query = q.getQuery(); - final String languageCode = q.getLanguageCode(); - final boolean prefixMatch = q.isPrefixMatch(); - final double minCosine = q.getMinCosine(); - final double maxCosine = q.getMaxCosine(); - final int minRank = q.getMinRank(); - final int maxRank = q.getMaxRank(); - final boolean matchAllTerms = q.isMatchAllTerms(); - final boolean matchExact = q.isMatchExact(); - final String regex = q.getMatchRegex(); - long timeout = q.getTimeout(); - final TimeUnit unit = q.getTimeUnit(); + final String q = query.getQuery(); + final String languageCode = query.getLanguageCode(); + final boolean prefixMatch = query.isPrefixMatch(); + // tokenize the query. + final TermFrequencyData<V> qdata; + { + + final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this); + + /* + * If we are using prefix match ('*' operator) then we don't want to + * filter stopwords from the search query. + */ + final boolean filterStopwords = !prefixMatch; + + index(buffer, // + null, // docId // was Long.MIN_VALUE + Integer.MIN_VALUE, // fieldId + languageCode,// + new StringReader(q), // + filterStopwords// + ); + + if (buffer.size() == 0) { + + /* + * There were no terms after stopword extration. + */ + + log.warn("No terms after stopword extraction: query=" + query); + + return null; + + } + + qdata = buffer.get(0); + + qdata.normalize(); + + } + + return qdata; + + } + + public Hit<V>[] _search(final FullTextQuery query) { + + final String queryStr = query.getQuery(); + final String languageCode = query.getLanguageCode(); + final boolean prefixMatch = query.isPrefixMatch(); + final double minCosine = query.getMinCosine(); + final double maxCosine = query.getMaxCosine(); + final int minRank = query.getMinRank(); + final int maxRank = query.getMaxRank(); + final boolean matchAllTerms = query.isMatchAllTerms(); + final boolean matchExact = query.isMatchExact(); + final String regex = query.getMatchRegex(); + long timeout = query.getTimeout(); + final TimeUnit unit = query.getTimeUnit(); + final long begin = System.currentTimeMillis(); // if (languageCode == null) // throw new IllegalArgumentException(); - if (query == null) + if (queryStr == null) throw new IllegalArgumentException(); if (minCosine < 0d || minCosine > 1d) @@ -1002,7 +1104,7 @@ throw new IllegalArgumentException(); if (log.isInfoEnabled()) - log.info("languageCode=[" + languageCode + "], text=[" + query + log.info("languageCode=[" + languageCode + "], text=[" + queryStr + "], minCosine=" + minCosine + ", maxCosine=" + maxCosine + ", minRank=" + minRank @@ -1018,7 +1120,7 @@ } - final FullTextQuery cacheKey = q; + final FullTextQuery cacheKey = query; Hit<V>[] a; @@ -1034,145 +1136,24 @@ if (log.isInfoEnabled()) log.info("did not find hits in cache"); - // tokenize the query. - final TermFrequencyData<V> qdata; - { - - final TokenBuffer<V> buffer = new TokenBuffer<V>(1, this); - - /* - * If we are using prefix match ('*' operator) then we don't want to - * filter stopwords from the search query. - */ - final boolean filterStopwords = !prefixMatch; - - index(buffer, // - null, // docId // was Long.MIN_VALUE - Integer.MIN_VALUE, // fieldId - languageCode,// - new StringReader(query), // - filterStopwords// - ); - - if (buffer.size() == 0) { - - /* - * There were no terms after stopword extration. - */ - - log.warn("No terms after stopword extraction: query=" + query); - - a = new Hit[] {}; - - cache.put(cacheKey, a); - - return a; - - } - - qdata = buffer.get(0); - - qdata.normalize(); - - } - - final IHitCollector<V> hits; - - if (qdata.distinctTermCount() == 1) { - - final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); - - final String termText = e.getKey(); + // tokenize the query. + final TermFrequencyData<V> qdata = tokenize(query); + + // No terms after stopword extraction + if (qdata == null) { - final ITermMetadata md = e.getValue(); - - final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, prefixMatch, md - .getLocalTermWeight(), this); - - hits = new SingleTokenHitCollector<V>(task1); - - } else { - - final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>( - qdata.distinctTermCount()); - - int i = 0; - for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { - - final String termText = e.getKey(); - - final ITermMetadata md = e.getValue(); - - tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(), prefixMatch, md - .getLocalTermWeight(), this)); - - } - - hits = new MultiTokenHitCollector<V>(tasks); - - } - - // run the queries. - { - - final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>( - qdata.distinctTermCount()); - - int i = 0; - for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { - - final String termText = e.getKey(); - - final ITermMetadata md = e.getValue(); - - tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(), - prefixMatch, md.getLocalTermWeight(), this, hits)); - - } - - final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>( - getExecutorService(), timeout, unit); - - try { - - final long start = System.currentTimeMillis(); - - executionHelper.submitTasks(tasks); - - if (log.isInfoEnabled()) { - final long readTime = System.currentTimeMillis() - start; - log.info("read time: " + readTime); - } - - } catch (InterruptedException ex) { - - if (log.isInfoEnabled()) { - // TODO Should we wrap and toss this interrupt instead? - log.info("Interrupted - only partial results will be returned."); - } - - /* - * Yes, let's toss it. We were getting into a situation - * where the ExecutionHelper above received an interrupt - * but we still went through the heavy-weight filtering - * operations below (matchExact or matchRegex). - */ - throw new RuntimeException(ex); - - } catch (ExecutionException ex) { - - throw new RuntimeException(ex); - - } - - } - - a = hits.getHits(); - + cache.put(cacheKey, a = new Hit[] {}); + + return a; + + } + + a = executeQuery(qdata, prefixMatch, timeout, unit); + if (a.length == 0) { log.info("No hits: languageCode=[" + languageCode + "], query=[" - + query + "]"); + + queryStr + "]"); cache.put(cacheKey, a); @@ -1223,14 +1204,14 @@ */ if (matchExact) { - a = matchExact(a, query); + a = matchExact(a, queryStr); } if (a.length == 0) { log.warn("No hits after matchAllTerms pruning: languageCode=[" + languageCode + "], query=[" - + query + "]"); + + queryStr + "]"); cache.put(cacheKey, a); @@ -1260,7 +1241,7 @@ if (a.length == 0) { log.warn("No hits after regex pruning: languageCode=[" + languageCode + "], query=[" - + query + "], regex=[" + regex + "]"); + + queryStr + "], regex=[" + regex + "]"); cache.put(cacheKey, a); @@ -1299,6 +1280,27 @@ } + /* + * Take a slice of the hits based on min/max cosine and min/max rank. + */ + a = slice(query, a); + + final long elapsed = System.currentTimeMillis() - begin; + + if (log.isInfoEnabled()) + log.info("Done: " + a.length + " hits in " + elapsed + "ms"); + + return a; + + } + + protected Hit<V>[] slice(final FullTextQuery query, Hit<V>[] a) { + + final double minCosine = query.getMinCosine(); + final double maxCosine = query.getMaxCosine(); + final int minRank = query.getMinRank(); + final int maxRank = query.getMaxRank(); + // if (log.isDebugEnabled()) { // log.debug("before min/max cosine/rank pruning:"); // for (Hit<V> h : a) @@ -1422,13 +1424,106 @@ } - final long elapsed = System.currentTimeMillis() - begin; + return a; - if (log.isInfoEnabled()) - log.info("Done: " + a.length + " hits in " + elapsed + "ms"); + } + + protected Hit<V>[] executeQuery(final TermFrequencyData<V> qdata, + final boolean prefixMatch, final long timeout, final TimeUnit unit) { + + final IHitCollector<V> hits; + + if (qdata.distinctTermCount() == 1) { + + final Map.Entry<String, ITermMetadata> e = qdata.getSingletonEntry(); + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); - return a; + final CountIndexTask<V> task1 = new CountIndexTask<V>(termText, 0, 1, + prefixMatch, md.getLocalTermWeight(), this); + + hits = new SingleTokenHitCollector<V>(task1); + + } else { + + final List<CountIndexTask<V>> tasks = new ArrayList<CountIndexTask<V>>( + qdata.distinctTermCount()); + + int i = 0; + for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + tasks.add(new CountIndexTask<V>(termText, i++, qdata.terms.size(), + prefixMatch, md.getLocalTermWeight(), this)); + + } + + hits = new MultiTokenHitCollector<V>(tasks); + + } + // run the queries. + { + + final List<Callable<Object>> tasks = new ArrayList<Callable<Object>>( + qdata.distinctTermCount()); + + int i = 0; + for (Map.Entry<String, ITermMetadata> e : qdata.terms.entrySet()) { + + final String termText = e.getKey(); + + final ITermMetadata md = e.getValue(); + + tasks.add(new ReadIndexTask<V>(termText, i++, qdata.terms.size(), + prefixMatch, md.getLocalTermWeight(), this, hits)); + + } + + final ExecutionHelper<Object> executionHelper = new ExecutionHelper<Object>( + getExecutorService(), timeout, unit); + + try { + + final long start = System.currentTimeMillis(); + + executionHelper.submitTasks(tasks); + + if (log.isInfoEnabled()) { + final long readTime = System.currentTimeMillis() - start; + log.info("read time: " + readTime); + } + + } catch (InterruptedException ex) { + + if (log.isInfoEnabled()) { + // TODO Should we wrap and toss this interrupt instead? + log.info("Interrupted - only partial results will be returned."); + } + + /* + * Yes, let's toss it. We were getting into a situation + * where the ExecutionHelper above received an interrupt + * but we still went through the heavy-weight filtering + * operations below (matchExact or matchRegex). + */ + throw new RuntimeException(ex); + + } catch (ExecutionException ex) { + + throw new RuntimeException(ex); + + } + + } + + return hits.getHits(); + } /** Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/search/ReadIndexTask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -10,8 +10,6 @@ import com.bigdata.btree.ISimpleSplitHandler; import com.bigdata.btree.ITuple; import com.bigdata.btree.ITupleIterator; -import com.bigdata.btree.keys.IKeyBuilder; -import com.bigdata.btree.keys.SuccessorUtil; /** * Procedure reads on the terms index, aggregating data on a per-{@link Hit} @@ -131,12 +129,12 @@ log.debug("queryTerm=" + queryTerm + ", termWeight=" + queryTermWeight); - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); while (itr.hasNext()) { // don't test for interrupted on each result -- too much work. - if (nhits % 1000 == 0 && t.isInterrupted()) { + if (nhits % 1000 == 0 && Thread.interrupted()) { // if (log.isInfoEnabled()) log.warn("Interrupted: queryTerm=" + queryTerm + ", nhits=" Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/ndx/pipeline/AbstractSubtask.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -337,8 +337,8 @@ public boolean hasNext() throws InterruptedException { - // The thread in which this method runs. - final Thread t = Thread.currentThread(); +// // The thread in which this method runs. +// final Thread t = Thread.currentThread(); // when we start looking for a chunk. final long begin = System.nanoTime(); @@ -349,7 +349,7 @@ master.halted(); // interrupted? - if (t.isInterrupted()) { + if (Thread.interrupted()) { throw master.halt(new InterruptedException(toString())); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/service/proxy/ClientAsynchronousIterator.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -268,10 +268,13 @@ */ private class ReaderTask implements Callable<Void> { + @Override public Void call() throws Exception { - final Thread t = Thread.currentThread(); +// final Thread t = Thread.currentThread(); + boolean interrupted = false; + try { /* @@ -299,10 +302,11 @@ if (trace) System.err.print('~'); - if (t.isInterrupted()) { + if (Thread.interrupted()) { // thread interrupted, so we are done. - break; + interrupted = true; + break; // break out of while(true) } @@ -344,10 +348,11 @@ */ // don't call blocking method next() if we were interrupted. - if (t.isInterrupted()) { + if (Thread.interrupted()) { // thread interrupted, so we are done. - break; + interrupted = true; + break; // break out of while(true) } @@ -392,7 +397,7 @@ } if (INFO) - log.info("Reader is done."); + log.info("Reader is done: interrupted" + interrupted); return null; @@ -448,7 +453,8 @@ } } - + + @Override public void close() { if (future == null) { Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java 2014-04-15 12:51:02 UTC (rev 8120) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/java/com/bigdata/util/CSVReader.java 2014-04-15 13:01:24 UTC (rev 8121) @@ -71,9 +71,9 @@ */ public class CSVReader implements Iterator<Map<String, Object>> { - protected static final Logger log = Logger.getLogger(CSVReader.class); + private static final Logger log = Logger.getLogger(CSVReader.class); - protected static final boolean INFO = log.isInfoEnabled(); +// protected static final boolean INFO = log.isInfoEnabled(); /** * The #of characters to buffer in the reader. @@ -168,7 +168,7 @@ } - public Header(String name) { + public Header(final String name) { if (name == null) throw new IllegalArgumentException(); @@ -191,13 +191,13 @@ * * @return The parsed value. */ - public Object parseValue(String text) { + public Object parseValue(final String text) { for (int i = 0; i < formats.length; i++) { try { - Format f = formats[i]; + final Format f = formats[i]; if (f instanceof DateFormat) { @@ -229,23 +229,41 @@ /** * Equal if the headers have the same data. */ - public boolean equals(Header o) { - - if(this==o) return true; - - return name.equals(o.name); - + @Override + public boolean equals(final Object o) { + + if (this == o) + return true; + + if (!(o instanceof Header)) { + + return false; + + } + + return name.equals(((Header) o).name); + } +// public boolean equals(final Header o) { +// +// if(this==o) return true; +// +// return name.equals(o.name); +// +// } + /** * Based on the header name. */ + @Override public int hashCode() { return name.hashCode(); } + @Override public String toString() { return name; @@ -293,7 +311,8 @@ */ protected Header[] headers; - public CSVReader(InputStream is, String charSet) throws IOException { + public CSVReader(final InputStream is, final String charSet) + throws IOException { if (is == null) throw new IllegalArgumentException(); @@ -306,7 +325,7 @@ } - public CSVReader(Reader r) throws IOException { + public CSVReader(final Reader r) throws IOException { if (r == null) throw new IllegalArgumentException(); @@ -340,9 +359,9 @@ } - public boolean setSkipBlankLines(boolean skipBlankLines) { + public boolean setSkipBlankLines(final boolean skipBlankLines) { - boolean tmp = this.skipBlankLines; + final boolean tmp = this.skipBlankLines; this.skipBlankLines = skipBlankLines; @@ -356,9 +375,9 @@ } - public boolean setTrimWhitespace(boolean trimWhitespace) { + public boolean setTrimWhitespace(final boolean trimWhitespace) { - boolean tmp = this.trimWhitespace; + final boolean tmp = this.trimWhitespace; this.trimWhitespace = trimWhitespace; @@ -384,10 +403,11 @@ } - public long setTailDelayMillis(long tailDelayMillis) { - - if(tailDelayMillis<0) throw new IllegalArgumentException(); - + public long setTailDelayMillis(final long tailDelayMillis) { + + if (tailDelayMillis < 0) + throw new IllegalArgumentException(); + long tmp = this.tailDelayMillis; this.tailDelayMillis = tailDelayMillis; @@ -396,9 +416,11 @@ } + @Override public boolean hasNext() { - if(exhausted) return false; + if (exhausted) + return false; if (line != null) { @@ -406,17 +428,19 @@ } - final Thread currentThread = Thread.currentThread(); +// final Thread currentThread = Thread.currentThread(); try { while (true) { - if(currentThread.isInterrupted()) { + if (Thread.interrupted()) { - if(INFO) + if (log.isInfoEnabled()) log.info("Interrupted"); + exhausted = true; + return false; } @@ -469,6 +493,7 @@ } + @Override public Map<String, Object> next() { if ... [truncated message content] |
From: <tho...@us...> - 2014-04-15 12:51:05
|
Revision: 8120 http://sourceforge.net/p/bigdata/code/8120 Author: thompsonbry Date: 2014-04-15 12:51:02 +0000 (Tue, 15 Apr 2014) Log Message: ----------- Added test case that demonstrates #888 and a test case that demonstrates a workaround for #888. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java 2014-04-14 18:44:26 UTC (rev 8119) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/TestNamedGraphs.java 2014-04-15 12:51:02 UTC (rev 8120) @@ -641,4 +641,45 @@ } + /** + * Note: This is a duplicate of <a href="http://trac.bigdata.com/ticket/792> + * GRAPH ?g { FILTER NOT EXISTS { ?s ?p ?o } } not respecting ?g </a> + * + * @see <a href="http://trac.bigdata.com/ticket/888> GRAPH ignored by FILTER + * NOT EXISTS </a> + */ + public void test_named_graphs_ticket_888() throws Exception { + + if(!store.isQuads()) + return; + + new TestHelper( + "named-graphs-ticket-888",// testURI + "named-graphs-ticket-888.rq", // queryURI + "named-graphs-ticket-888.trig", // dataURI + "named-graphs-ticket-888.srx" // resultURI + ).runTest(); + + } + + /** + * Unit test of a work around for {@link #test_named_graphs_ticket_888()}. + * + * @see <a href="http://trac.bigdata.com/ticket/888> GRAPH ignored by FILTER + * NOT EXISTS </a> + */ + public void test_named_graphs_ticket_888b() throws Exception { + + if(!store.isQuads()) + return; + + new TestHelper( + "named-graphs-ticket-888",// testURI + "named-graphs-ticket-888b.rq", // queryURI + "named-graphs-ticket-888.trig", // dataURI + "named-graphs-ticket-888.srx" // resultURI + ).runTest(); + + } + } Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.rq 2014-04-15 12:51:02 UTC (rev 8120) @@ -0,0 +1,9 @@ +PREFIX : <http://sample.com/> + +SELECT DISTINCT ?g +WHERE { + GRAPH ?g { + ?s ?p ?o . + FILTER NOT EXISTS { <http://sample.com/I> <http://sample.com/live> "a_live". } + } +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.srx 2014-04-15 12:51:02 UTC (rev 8120) @@ -0,0 +1,16 @@ +<?xml version="1.0"?> +<sparql + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:xs="http://www.w3.org/2001/XMLSchema#" + xmlns="http://www.w3.org/2005/sparql-results#" > + <head> + <variable name="g"/> + </head> + <results> + <result> + <binding name="g"> + <uri>http://graph.com/test_1</uri> + </binding> + </result> + </results> +</sparql> \ No newline at end of file Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888.trig 2014-04-15 12:51:02 UTC (rev 8120) @@ -0,0 +1,14 @@ +@prefix : <http://sample.com/> . + +<http://graph.com/test> { + :I :am "a" . + :I :live "a_live" . + :I :work "a_work" . +} + +<http://graph.com/test_1> { + :I :am "a" . + :I :am "b" . + :I :work "a_work" . + :I :work "b_work" . +} Added: branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/named-graphs-ticket-888b.rq 2014-04-15 12:51:02 UTC (rev 8120) @@ -0,0 +1,7 @@ +PREFIX : <http://sample.com/> + +SELECT DISTINCT ?g +WHERE { + GRAPH ?g { ?s ?p ?o } + FILTER NOT EXISTS { GRAPH ?g { <http://sample.com/I> <http://sample.com/live> "a_live". }} +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-14 18:44:30
|
Revision: 8119 http://sourceforge.net/p/bigdata/code/8119 Author: dmekonnen Date: 2014-04-14 18:44:26 +0000 (Mon, 14 Apr 2014) Log Message: ----------- added environment variable export comment for vagrant commands Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-14 12:59:10 UTC (rev 8118) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-14 18:44:26 UTC (rev 8119) @@ -53,7 +53,7 @@ % easy_install boto -If while running the python scripts the error message appears “ImportError: No module named boto”, you will need to set the +If while running the python scripts the error message appears "ImportError: No module named boto", you will need to set the PYTHONPATH environment variable, for example: % export PYTHONPATH=/usr/local/lib/python2.7/site-packages @@ -72,6 +72,7 @@ SSH to a specific node: + % source aws.rc # all vagrant commands will depend on exported AWS environment variables % vagrant ssh bigdataA This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-14 12:59:13
|
Revision: 8118 http://sourceforge.net/p/bigdata/code/8118 Author: dmekonnen Date: 2014-04-14 12:59:10 +0000 (Mon, 14 Apr 2014) Log Message: ----------- Initial commit for BigdataHA cluster deployment automation. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,12 @@ +site :opscode + +cookbook "apt" +cookbook 'java', '~> 1.22.0' +cookbook 'ant' +cookbook 'subversion' +cookbook 'lvm' +cookbook "hadoop" +cookbook "emacs" +# cookbook "ganglia" + +metadata Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,8 @@ +# CHANGELOG for systap-aws-bigdata-ha + +This file is used to list changes made in each version of systap-aws-bigdata-ha. + +## 0.1.0: + +* Initial release of systap-aws-bigdata-ha + Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,3 @@ +source 'https://rubygems.org' + +gem 'berkshelf' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,94 @@ +REQUIREMENTS +============ +This Vagrant resource has been tested against the following versions of required resources: + + Vagrant: 1.4.3 + Vagrant Plugins: + * nugrant (1.4.2) + * vagrant-aws (0.4.1) + * vagrant-berkshelf (1.3.7) + + Chef: 11.10.4 +Berkshelf: 2.0.10 + Python: 2.7.5 + Ruby: 1.9.3p448 (2013-06-27 revision 41675) [x86_64-darwin12.3.0] + Boto: 2.27.0 + + + +CONFIGURATION +============= + +AWS +--- +Your organization's AWS access credentials are essential to launching the cluster. Please retreive them before attempting to bring up the cluster: + + * AWS Access Key ID + * AWS Secreet Access Key + * AWS Keypair Name + * The SSH Private Key file corresponding to the keypair + * AWS Security Group for the cluster nodes to join [must minimally allow public TCP access to ports 22 and 8080] + + +All AWS settings reside in the "aws.rc" file. You must edit this file and set AWS values accordingly. + + +Vagrant +------- +Vagrant will need the required plugins (see above), if not already installed, they may be added with: + + % vagrant plugin install nugrant + % vagrant plugin install vagrant-aws + % vagrant plugin install vagrant-berkshelf + + +Boto: AWS API +------------- +The "Boto" python library for the AWS API must be installed in order to instantiate the cluster. If not already installed: + + % pip install boto + +alternately: + + % easy_install boto + + +If while running the python scripts the error message appears “ImportError: No module named boto”, you will need to set the +PYTHONPATH environment variable, for example: + + % export PYTHONPATH=/usr/local/lib/python2.7/site-packages + + + +LAUNCHING BIGDATA HA CLUSTER +============================ + +The cluster may be brought up with: + + % ./bin/createCluster.sh + +Launching the cluster may take up to 10 minutes. When complete the cluster creation script will present + + +SSH to a specific node: + + % vagrant ssh bigdataA + + +Stop & Start the cluster: + + % vagrant halt + % vagrant up + + +Terminating the cluster: + + % vagrant destroy + + +Trouble Shooting +---------------- +If a host is slow to startup there can be an initial connection failure. For example, the bigdataA "status" page may not +appear if bigdataB or bigdataC is slow to start up. In this case log into bigdataA ("vagrant ssh bigdataA") and restart +the service ("sudo /etc/init.d/bigdataA restart") and the host shall connect as expected. + Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,5 @@ +# encoding: utf-8 + +require 'bundler' +require 'bundler/setup' +require 'berkshelf/thor' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,227 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +ENV['VAGRANT_DEFAULT_PROVIDER'] = 'aws' + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + # All Vagrant configuration is done here. The most common configuration + # options are documented and commented below. For a complete reference, + # please see the online documentation at vagrantup.com. + +config.vm.define :bigdataA do |bigdataA| + bigdataA.vm.box = "dummy" + bigdataA.vm.hostname = ENV['BIGDATA_HA_HOST_A'] + + bigdataA.berkshelf.enabled = true + + bigdataA.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_A'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataA.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataA.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataA + + +config.vm.define :bigdataB do |bigdataB| + bigdataB.vm.box = "dummy" + bigdataB.vm.hostname = ENV['BIGDATA_HA_HOST_B'] + + bigdataB.berkshelf.enabled = true + + bigdataB.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_B'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataB.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataB.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataB + + +config.vm.define :bigdataC do |bigdataC| + bigdataC.vm.box = "dummy" + bigdataC.vm.hostname = ENV['BIGDATA_HA_HOST_C'] + + bigdataC.berkshelf.enabled = true + + bigdataC.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_C'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataC.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataC.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataC + +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,32 @@ +# Who runs bigdata? +default['systap-bigdataHA'][:bigdata_user] = "bigdata" +default['systap-bigdataHA'][:bigdata_group] = "bigdata" + +# Where to find and build bigdata code +default['systap-bigdataHA'][:svn] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" +default['systap-bigdataHA'][:source] = "/home/ubuntu/bigdata-code" + +# Name of the federation of services (controls the Apache River GROUPS). +default['systap-bigdataHA'][:fedname] = 'my-cluster-1' + +# Path for local storage for this federation of services. +default['systap-bigdataHA'][:fed_dir] = '/var/lib/bigdata' + +# Where the bigdata-ha.jnl file will live: +default['systap-bigdataHA'][:data_dir] = node['systap-bigdataHA'][:fed_dir] + "/data" + +# Where the log files will live: +default['systap-bigdataHA'][:log_dir] = node['systap-bigdataHA'][:fed_dir] + "/logs" + +# Name of the replication cluster to which this HAJournalServer will belong. +default['systap-bigdataHA'][:logical_service_id] = 'HA-Replication-Cluster-1' + +# Where to find the Apache River service registrars (can also use multicast). +default['systap-bigdataHA'][:river_locator1] = 'bigdataA' +default['systap-bigdataHA'][:river_locator2] = 'bigdataB' +default['systap-bigdataHA'][:river_locator3] = 'bigdataC' + +# Where to find the Apache Zookeeper ensemble. +default['systap-bigdataHA'][:zk_server1] = 'bigdataA' +default['systap-bigdataHA'][:zk_server2] = 'bigdataB' +default['systap-bigdataHA'][:zk_server3] = 'bigdataC' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,45 @@ +# +# Set your organization's AWS access credentials here: +# +export AWS_ACCESS_KEY_ID="YOUR AWS ACCESS KEY ID" +export AWS_SECRET_ACCESS_KEY="YOUR AWS SECRET ACCESS KEY" +export AWS_SSH_PRIVATE_KEY="/path/to/your/private_key.pem" +export AWS_KEYPAIR_NAME="YOUR AWS KEYPAIR NAME" + + +# +# Add a single security group here (a list will be supported later). +# The security group must minimally allow outside access to ports 22 and 8080. +# +# SSH TCP 22 0.0.0.0/0 +# Custom TCP Rule TCP 8080 0.0.0.0/0 +# +export AWS_SECURITY_GROUPS="YOUR AWS SECURITY GROUP" + + +# +# Adjust as desired: +# +export AWS_REGION="us-east-1" + + +# +# Ubuntu 12.04 settings: +# +export AWS_AMI="ami-59a4a230" +export AWS_AMI_USERNAME="ubuntu" + + +# +# The SSD configuration assumes the m3.xlarge size. The SSD recipe should work for +# SSD devices (not more than two) of any size, but has not been tested. +# +export AWS_INSTANCE_TYPE="m3.xlarge" + + +# +# Default host names, adjust as desired: +# +export BIGDATA_HA_HOST_A="bigdataA" +export BIGDATA_HA_HOST_B="bigdataB" +export BIGDATA_HA_HOST_C="bigdataC" Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,11 @@ +#! /bin/sh + +export PYTHONPATH=/usr/local/lib/python2.7/site-packages + +source aws.rc +python ./bin/createSecurityGroup.py +source .aws_security_group +rm .aws_security_group +vagrant up +echo "Vagrant up completed. Setting host names..." +python ./bin/setHosts.py Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,22 @@ +#! /usr/bin/python + +import os +from boto import ec2 +from boto.manage.cmdshell import sshclient_from_instance +import paramiko +from datetime import datetime + + +if __name__ == '__main__': + + # create a security group fo this cluster only. Just create it now so that it can be associated with the new + # instance at create time. Add rules to this group once the instance IP addresses are known. + + ec2conn = ec2.connection.EC2Connection( os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] ) + + group_name = "BDHA " + str( datetime.utcnow() ) + + group = ec2conn.create_security_group( group_name, "BigdataHA Security Group" ) + + envFile = open( ".aws_security_group", "w" ) + envFile.write( 'export AWS_SECURITY_GROUP_PRIVATE="' + group_name + '"') Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,91 @@ +#! /usr/bin/python + +import os +import sys +from boto import ec2 +from boto.manage.cmdshell import sshclient_from_instance +import paramiko + +bigdataA = os.environ["BIGDATA_HA_HOST_A"] +bigdataB = os.environ["BIGDATA_HA_HOST_B"] +bigdataC = os.environ["BIGDATA_HA_HOST_C"] + +hostMap = {} +bigdataHosts = [None] * 3 + +def createHostAdditions( instances ): + hostsAdd = "\n" + for instance in instances: + data = instance.__dict__ + if bigdataA in data['tags']['Name']: + bigdataHosts[0] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataA + "\\n" + hostMap[ bigdataA ] = data[ 'private_ip_address' ] + elif bigdataB in data['tags']['Name']: + bigdataHosts[1] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataB + "\\n" + hostMap[ bigdataB ] = data[ 'private_ip_address' ] + elif bigdataC in data['tags']['Name']: + bigdataHosts[2] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataC + "\\n" + hostMap[ bigdataC ] = data[ 'private_ip_address' ] + + return hostsAdd + +def createZookeeperSubstitution( index, host, ipAddress ): + return "sudo sed -i 's|server." + index + "=" + host + "|server." + index + "=" + ipAddress + "|' /etc/zookeeper/conf/zoo.cfg" + +def createBigdataHASubstitution( host, ipAddress ): + return "sudo sed -i 's|" + host + "|" + ipAddress + "|' /etc/defaults/bigdataHA" + +if __name__ == '__main__': + + ec2conn = ec2.connection.EC2Connection( os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] ) + runningFilter = {'instance-state-name':'running'} # only running states + reservations = ec2conn.get_all_instances( filters=runningFilter ) + instances = [i for r in reservations for i in r.instances] + + hostsAdd = createHostAdditions( instances ) + + # Create an SSH client for our instance + # key_path is the path to the SSH private key associated with instance + # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) + key_path = os.environ["AWS_SSH_PRIVATE_KEY"] + + private_security_group_name = os.environ["AWS_SECURITY_GROUP_PRIVATE"] + group = ec2conn.get_all_security_groups( private_security_group_name )[0] + + i = 1 + for host in bigdataHosts: + ssh_client = sshclient_from_instance( host, key_path, user_name='ubuntu' ) + # ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Run the command. Returns a tuple consisting of: + # The integer status of the command + # A string containing the output of the command + # A string containing the stderr output of the command + status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo \"" + hostsAdd + "\" >> /etc/hosts'" ) + status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo " + str(i) + " > /var/lib/zookeeper/myid'" ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "1", bigdataA, hostMap[ bigdataA ] ) ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "2", bigdataB, hostMap[ bigdataB ] ) ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "3", bigdataC, hostMap[ bigdataC ] ) ) + + name = host.__dict__['tags']['Name'] + hostAddress = host.__dict__['private_ip_address'] + # status, stdin, stderr = ssh_client.run( createBigdataHASubstitution( name, hostAddress ) ) + + hostAddress = hostAddress + "/32" + group.authorize( ip_protocol="tcp", from_port="0", to_port="65535", cidr_ip=hostAddress, src_group=None ) + + i += 1 + # + # startHAServices does not exit as expected, so remote restart commands will hang. + # As a work around, we restart the host: + # + # status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/bigdataHA restart" ) + # status, stdin, stderr = ssh_client.run( "sudo service bigdataHA restart" ) + host.reboot() + + print "The hosts are now rebooting, this may take several minutes. \nOnce back up, you may confirm status by visiting:\n" + for host in bigdataHosts: + print "\thttp://" + host.__dict__['ip_address'] + ":8080/status\n" Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,96 @@ +# Put files/directories that should be ignored in this file when uploading +# or sharing to the community site. +# Lines that start with '# ' are comments. + +# OS generated files # +###################### +.DS_Store +Icon? +nohup.out +ehthumbs.db +Thumbs.db + +# SASS # +######## +.sass-cache + +# EDITORS # +########### +\#* +.#* +*~ +*.sw[a-z] +*.bak +REVISION +TAGS* +tmtags +*_flymake.* +*_flymake +*.tmproj +.project +.settings +mkmf.log + +## COMPILED ## +############## +a.out +*.o +*.pyc +*.so +*.com +*.class +*.dll +*.exe +*/rdoc/ + +# Testing # +########### +.watchr +.rspec +spec/* +spec/fixtures/* +test/* +features/* +Guardfile +Procfile + +# SCM # +####### +.git +*/.git +.gitignore +.gitmodules +.gitconfig +.gitattributes +.svn +*/.bzr/* +*/.hg/* +*/.svn/* + +# Berkshelf # +############# +Berksfile +Berksfile.lock +cookbooks/* +tmp + +# Cookbooks # +############# +CONTRIBUTING +CHANGELOG* + +# Strainer # +############ +Colanderfile +Strainerfile +.colander +.strainer + +# Vagrant # +########### +.vagrant +Vagrantfile + +# Travis # +########## +.travis.yml Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,6 @@ +require 'minitest/spec' +describe_recipe 'systap-bigdata::test' do + it "is running the tomcat server" do + service('tomcat').must_be_running + end +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,15 @@ +name 'systap-bigdataHA' +maintainer 'Daniel Mekonnen' +maintainer_email 'daniel<no-spam-at>systap.com' +license 'All rights reserved' +description 'Installs/Configures Systap Bigdata High Availability' +long_description IO.read(File.join(File.dirname(__FILE__), 'README.txt')) +version '0.1.0' + +depends 'apt' +depends 'java', '>= 1.22.0' +depends 'ant' +depends 'subversion' +depends 'lvm' +depends 'hadoop' +depends 'emacs' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,142 @@ +# +# Cookbook Name:: systap-bigdataHA +# Recipe:: default +# +# Copyright 2014, Systap +# +# + + +group "bigdata" do + action :create + append true +end + +user "#{node['systap-bigdataHA'][:bigdata_user]}" do + gid "#{node['systap-bigdataHA'][:bigdata_group]}" + supports :manage_home => true + shell "/bin/false" + home "#{node['systap-bigdataHA'][:fed_dir]}" + system true + action :create +end + +# directory node['systap-bigdataHA'][:fed_dir] do +execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do + user "root" + group "root" + cwd "#{node['systap-bigdataHA'][:fed_dir]}" + command "chown -R #{node['systap-bigdataHA'][:bigdata_user]}:#{node['systap-bigdataHA'][:bigdata_group]} ." +end + +execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0 #{node['systap-bigdataHA'][:source]}" +end + +execute "ant deploy-artifact" do + user 'ubuntu' + group 'ubuntu' + cwd "#{node['systap-bigdataHA'][:source]}" + command "ant deploy-artifact" +end + +execute "deflate REL tar" do + user 'bigdata' + group 'bigdata' + cwd "#{node['systap-bigdataHA'][:fed_dir]}/.." + command "tar xvf #{node['systap-bigdataHA'][:source]}/REL.bigdata-1.3.0-*.tgz" +end + +execute "copy over the /etc/init.d/bigdataHA file" do + user 'root' + group 'root' + cwd "#{node['systap-bigdataHA'][:fed_dir]}/etc/init.d" + command "cp bigdataHA /etc/init.d/bigdataHA; chmod 00755 /etc/init.d/bigdataHA" +end + +# +# Copy the /etc/init.d/bigdataHA template: +# +# template "/etc/init.d/bigdataHA" do +# source "init.d/bigdataHA.erb" +# user 'root' +# group 'root' +# mode 00755 +# end + +# +# Create the log directory for bigdata: +# +directory node['systap-bigdataHA'][:log_dir] do + owner "bigdata" + group "bigdata" + mode 00755 + action :create +end + +# +# Install the log4jHA.properties file: +# +template "#{node['systap-bigdataHA'][:fed_dir]}/var/config/logging/log4jHA.properties" do + source "log4jHA.properties.erb" + owner 'bigdata' + group 'bigdata' + mode 00644 +end + +# +# Install the log4jHA.properties file: +# +template "#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF/jetty.xml" do + source "jetty.xml.erb" + owner 'bigdata' + group 'bigdata' + mode 00644 +end + + +# +# Set the absolute path to the RWStore.properties file +# +execute "set absolute path to RWStore.properties" do + cwd "#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF" + command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF/RWStore.properties|' web.xml" +end + +# +# Copy the /etc/default/bigdataHA template: +# +template "/etc/default/bigdataHA" do + source "default/bigdataHA.erb" + user 'root' + group 'root' + mode 00644 +end + +service "bigdataHA" do + supports :restart => true, :status => true + action [ :enable, :start ] +end + +# +# Install the zoo.cfg file: +# +template "/etc/zookeeper/conf/zoo.cfg" do + source "zoo.cfg.erb" + owner 'root' + group 'root' + mode 00644 +end + +# +# the hadoop cookbook overlooks the log4j.properties file presently, but a future version may get this right: +# +execute "copy the distribution log4j.properties file" do + user 'root' + group 'root' + cwd "/etc/zookeeper/conf.chef" + command "cp ../conf.dist/log4j.properties ." +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,32 @@ +# http://jamie.mccrindle.org/2013/07/installing-oracle-java-7-using-chef.html +# +# Cookbook Name:: java7 +# Recipe:: default +# + +apt_repository "webupd8team" do + uri "http://ppa.launchpad.net/webupd8team/java/ubuntu" + components ['main'] + distribution node['lsb']['codename'] + keyserver "keyserver.ubuntu.com" + key "EEA14886" + deb_src true +end + +execute "remove openjdk-6" do + command "apt-get -y remove --purge openjdk-6-jdk openjdk-6-jre openjdk-6-jre-headless openjdk-6-jre-lib" +end + + +# could be improved to run only on update +execute "accept-license" do + command "echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections" +end + +package "oracle-java7-installer" do + action :install +end + +package "oracle-java7-set-default" do + action :install +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,29 @@ +# +# Cookbook Name:: planx-aws-bigdata-rdr +# Recipe:: ssd +# +include_recipe "lvm" + +# +# SSD Setup +# +directory node['systap-bigdataHA'][:data_dir] do + owner "root" + group "root" + mode 00755 + action :create + recursive true +end + + +lvm_volume_group 'vg' do + action :create + physical_volumes ['/dev/xvdb', '/dev/xvdc'] + + logical_volume 'lv_bigdata' do + size '100%VG' + filesystem 'ext4' + mount_point location: node['systap-bigdataHA'][:data_dir], options: 'noatime,nodiratime' + # stripes 4 + end +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,51 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +BD_USER="<%= node['systap-bigdataHA'][:bigdata_user] %>" +BD_GROUP="<%= node['systap-bigdataHA'][:bigdata_group] %>" + +binDir=<%= node['systap-bigdataHA'][:fed_dir] %>/bin +pidFile=<%= node['systap-bigdataHA'][:fed_dir] %>/var/lock/pid + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=<%= node['systap-bigdataHA'][:fedname] %> + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=<%= node['systap-bigdataHA'][:logical_service_id] %> + +# Local directory where the service will store its state. +export FED_DIR=<%= node['systap-bigdataHA'][:fed_dir] %> +export DATA_DIR=<%= node['systap-bigdataHA'][:data_dir] %> + +# Apache River - NO default for "LOCATORS". +export GROUPS="${FEDNAME}" +export LOCATORS="jini://<%= node['systap-bigdataHA'][:river_locator1] %>/,jini://<%= node['systap-bigdataHA'][:river_locator2] %>/,jini://<%= node['systap-bigdataHA'][:river_locator3] %>/" + +# Apache ZooKeeper - NO default. +export ZK_SERVERS="<%= node['systap-bigdataHA'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['systap-bigdataHA'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['systap-bigdataHA'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>" + + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty/html +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,133 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". +if [ -f "/etc/init.d/functions" ]; then + . /etc/init.d/functions +else +# Run some action. Log its output. No fancy colors. First argument is the +# label for the log file. Remaining arguments are the command to execute +# and its arguments, if any. + action() { + local STRING rc + STRING=$1 + echo -n "$STRING " + shift + sudo -u bigdata -g bigdata "$@" && echo -n "[OK]" || echo -n "[FAILED]" + rc=$? + echo + return $rc + } +fi + +# Where the scripts live. +cd `dirname $0` + +## +# Highly Recommended OS Tuning. +## + +# Do not swap out applications while there is free memory. +#/sbin/sysctl -w vm.swappiness=0 + +# Setup the environment. +source /etc/default/bigdataHA + +if [ -z "$binDir" ]; then + echo $"$0 : environment not setup: binDir is undefined." + exit 1; +fi +if [ -z "$pidFile" ]; then + echo $"$0 : environment not setup: pidFile is undefined" + exit 1; +fi + +# +# See how we were called. +# +case "$1" in + start) + + cd <%= node['systap-bigdataHA'][:fed_dir] %> +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + action $"`date` : `hostname` : bringing down services: " kill $pid + rm -f "$pidFile" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,60 @@ +<?xml version="1.0"?> +<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> +<!-- See http://www.eclipse.org/jetty/documentation/current/ --> +<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> +<Configure id="Server" class="org.eclipse.jetty.server.Server"> + + <!-- =========================================================== --> + <!-- Server Thread Pool --> + <!-- =========================================================== --> + <Set name="ThreadPool"> + <!-- Default queued blocking threadpool --> + <New class="org.eclipse.jetty.util.thread.QueuedThreadPool"> + <Set name="minThreads">10</Set> + <Set name="maxThreads">64</Set> + </New> + </Set> + + <!-- =========================================================== --> + <!-- Set connectors --> + <!-- =========================================================== --> + + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.nio.SelectChannelConnector"> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080"/></Set> + </New> + </Arg> + </Call> + + <!-- =========================================================== --> + <!-- Set handler Collection Structure --> + <!-- =========================================================== --> + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> + <!-- The location of the top-level of the bigdata webapp. --> + <Set name="resourceBase"> + <Property name="jetty.resourceBase" default="<%= node['systap-bigdataHA'][:fed_dir] %>/var/jetty" /> + </Set> + <Set name="contextPath">/</Set> + <Set name="descriptor"><%= node['systap-bigdataHA'][:fed_dir] %>/var/jetty/WEB-INF/web.xml</Set> + <Set name="parentLoaderPriority">true</Set> + <Set name="extractWAR">false</Set> + <Set name="welcomeFiles"> + <Array type="java.lang.String"> + <Item>index.html</Item> + </Array> + </Set> + </New> + </Item> + </Array> + </Set> + </New> + </Set> + +</Configure> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,85 @@ +## +# This is the default log4j configuration for distribution and CI tests. +## + +# Note: logging at INFO or DEBUG will significantly impact throughput! +log4j.rootCategory=WARN, dest2 + +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO + +# This will only work if you have the slf4j bridge setup. +#log4j.org.eclipse.jetty.util.log.Log=INFO + +# This can provide valuable information about open connections. +log4j.logger.com.bigdata.txLog=INFO + +# HA related loggers (debugging only) +log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.haLog=INFO +##log4j.logger.com.bigdata.rwstore=ALL +#log4j.logger.com.bigdata.journal=INFO +##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL +log4j.logger.com.bigdata.journal.jini.ha=INFO +##log4j.logger.com.bigdata.service.jini.lookup=ALL +log4j.logger.com.bigdata.quorum=INFO +log4j.logger.com.bigdata.quorum.zk=INFO +#log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain +##log4j.logger.com.bigdata.io.writecache=ALL + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2=org.apache.log4j.RollingFileAppender +log4j.appender.dest2.File=<%= node['systap-bigdataHA'][:log_dir] %>/HAJournalServer.log +log4j.appender.dest2.MaxFileSize=500MB +log4j.appender.dest2.MaxBackupIndex=20 +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + +## +# Summary query evaluation log (tab delimited file). +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=<%= node['systap-bigdataHA'][:log_dir] %>/queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=<%= node['systap-bigdataHA'][:log_dir] %>/queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,15 @@ +clientPort=<%= node['zookeeper'][:zoocfg][:clientPort] %> +dataDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +dataLogDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the ensemble +server.1=<%= node['systap-bigdataHA'][:zk_server1] %>:2888:3888 +server.2=<%= node['systap-bigdataHA'][:zk_server2] %>:2888:3888 +server.3=<%= node['systap-bigdataHA'][:zk_server3] %>:2888:3888 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mar...@us...> - 2014-04-14 12:50:48
|
Revision: 8117 http://sourceforge.net/p/bigdata/code/8117 Author: martyncutcher Date: 2014-04-14 12:50:44 +0000 (Mon, 14 Apr 2014) Log Message: ----------- Add HA1 and HA5 tests to suite Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-14 12:41:29 UTC (rev 8116) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestAll.java 2014-04-14 12:50:44 UTC (rev 8117) @@ -108,6 +108,15 @@ // Test suite of longer running stress tests for an HA3 cluster. suite.addTestSuite(StressTestHA3JournalServer.class); + // Test suite of longer running stress tests for an HA5 cluster. + suite.addTestSuite(TestHA5JournalServer.class); + suite.addTestSuite(TestHA5JournalServerWithHALogs.class); + + // Test suite of longer running stress tests for an HA1 cluster. + suite.addTestSuite(TestHA1JournalServer.class); + suite.addTestSuite(TestHA1SnapshotPolicy.class); + suite.addTestSuite(TestHA1SnapshotPolicy2.class); + return suite; } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-04-14 12:41:35
|
Revision: 8116 http://sourceforge.net/p/bigdata/code/8116 Author: thompsonbry Date: 2014-04-14 12:41:29 +0000 (Mon, 14 Apr 2014) Log Message: ----------- Fix for the HA1 tests in the write cache service test suite. Modified Paths: -------------- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2014-04-14 09:59:13 UTC (rev 8115) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestRWWriteCacheService.java 2014-04-14 12:41:29 UTC (rev 8116) @@ -35,6 +35,7 @@ import java.util.HashMap; import java.util.Map.Entry; import java.util.Random; +import java.util.concurrent.TimeUnit; import junit.framework.AssertionFailedError; @@ -48,7 +49,6 @@ import com.bigdata.quorum.QuorumActor; import com.bigdata.rwstore.RWWriteCacheService; import com.bigdata.util.ChecksumUtility; -import com.bigdata.util.InnerCause; /** * Test suite for the {@link WriteCacheService} using scattered writes on a @@ -138,6 +138,19 @@ actor.castVote(0); fixture.awaitDeque(); + // Await quorum meet. + assertCondition(new Runnable() { + @Override + public void run() { + try { + assertEquals(0L, quorum.token()); + } catch (Exception e) { + fail(); + } + } + + }, 5000/*timeout*/, TimeUnit.MILLISECONDS); + file = File.createTempFile(getName(), ".rw.tmp"); opener = new ReopenFileChannel(file, "rw"); Modified: branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java =================================================================== --- branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-14 09:59:13 UTC (rev 8115) +++ branches/BIGDATA_MGC_HA1_HA5/bigdata/src/test/com/bigdata/io/writecache/TestWORMWriteCacheService.java 2014-04-14 12:41:29 UTC (rev 8116) @@ -521,7 +521,7 @@ final int nbuffers = 1; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // for HA1! false; // No write pipeline. final int k = 1; @@ -574,7 +574,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; final int k = 1; final long lastCommitTime = 0L; @@ -619,7 +619,7 @@ final int nbuffers = 2; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // for HA1! false; // No write pipeline. final int k = 1; @@ -672,7 +672,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = false; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // for HA1! false; // No write pipeline. final int k = 1; @@ -681,8 +681,7 @@ final String logicalServiceId = "logicalService_"+getName(); final MockQuorum<HAPipelineGlue, MyMockQuorumMember<HAPipelineGlue>> quorum = new MockQuorum<HAPipelineGlue, MyMockQuorumMember<HAPipelineGlue>>( k, fixture); - try { - + try { fixture.start(); quorum.start(new MyMockQuorumMember<HAPipelineGlue>(fixture,logicalServiceId)); @@ -718,7 +717,7 @@ final int nbuffers = 6; final boolean useChecksums = true; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // for HA1! false; // No write pipeline. final int k = 1; @@ -771,7 +770,7 @@ */ final double largeRecordRate = 0d; final boolean useChecksums = true; - final boolean isHighlyAvailable = false; + final boolean isHighlyAvailable = true; // for HA1! false; // No write pipeline. final int k = 1; @@ -2120,6 +2119,19 @@ + ", isHighlyAvailable=" + isHighlyAvailable); } + // Await quorum meet. + assertCondition(new Runnable() { + @Override + public void run() { + try { + assertEquals(0L, quorum.token()); + } catch (Exception e) { + fail(); + } + } + + }, 5000/*timeout*/, TimeUnit.MILLISECONDS); + File file = null; ReopenFileChannel opener = null; WriteCacheService writeCacheService = null; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-14 09:59:16
|
Revision: 8115 http://sourceforge.net/p/bigdata/code/8115 Author: dmekonnen Date: 2014-04-14 09:59:13 +0000 (Mon, 14 Apr 2014) Log Message: ----------- update at line 1515 so that the "if" test logic matches the error message on line 1516. Note that the test in HARestore.java (563) prevents the SnapshotManager test from failing. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-14 09:52:38 UTC (rev 8114) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-04-14 09:59:13 UTC (rev 8115) @@ -1512,7 +1512,7 @@ if (!src.exists()) throw new FileNotFoundException(src.getAbsolutePath()); - if (!dst.exists() && dst.length() == 0) + if (dst.exists() && dst.length() != 0) throw new IOException("Output file exists and is not empty: " + dst.getAbsolutePath()); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-14 09:52:41
|
Revision: 8114 http://sourceforge.net/p/bigdata/code/8114 Author: dmekonnen Date: 2014-04-14 09:52:38 +0000 (Mon, 14 Apr 2014) Log Message: ----------- updates to support running HARestore upon reboot. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-04-14 09:52:38 UTC (rev 8114) @@ -0,0 +1,10 @@ +#!/bin/bash + +source /etc/default/bigdataHA + +SERVICE_DIR="$FED_DIR/$FEDNAME/$LOGICAL_SERVICE_ID/HAJournalServer" +LIB_DIR="$FED_DIR/lib" + +java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar -Dlog4j.configuration=file:var/config/logging/log4j.properties com.bigdata.journal.jini.ha.HARestore -o $DATA_DIR/bigdata-ha.jnl $SERVICE_DIR/snapshot $SERVICE_DIR/HALog + + Property changes on: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-13 04:05:32 UTC (rev 8113) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-14 09:52:38 UTC (rev 8114) @@ -70,7 +70,13 @@ cd $FED_DIR +if [ ! -d "$DATA_DIR/lost+found" ]; then + mount /dev/vg/lv_bigdata $DATA_DIR + action $"`date` : `hostname` : restoring bigdata journal file: " sudo -u $BD_USER -g $BD_GROUP bin/HARestore +fi + + # # See how we were called. # This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-13 04:05:36
|
Revision: 8113 http://sourceforge.net/p/bigdata/code/8113 Author: dmekonnen Date: 2014-04-13 04:05:32 +0000 (Sun, 13 Apr 2014) Log Message: ----------- sudo command adjusted Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-13 03:10:21 UTC (rev 8112) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-13 04:05:32 UTC (rev 8113) @@ -89,7 +89,7 @@ fi fi if [ ! -f "$pidFile" ]; then - action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP source /etc/default/bigdataHA ; $binDir/startHAServices + action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP bash -c "source /etc/default/bigdataHA ; $binDir/startHAServices" else echo $"`date` : `hostname` : running as $pid" fi This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-04-13 03:10:26
|
Revision: 8112 http://sourceforge.net/p/bigdata/code/8112 Author: dmekonnen Date: 2014-04-13 03:10:21 +0000 (Sun, 13 Apr 2014) Log Message: ----------- the use of sudo to execute commands as another user appears to create a new environment/shell. thus the need to "source /etc/default/bigdataHA" again to make the environment variables available again to the startHAServices. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-11 21:16:50 UTC (rev 8111) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-04-13 03:10:21 UTC (rev 8112) @@ -89,7 +89,7 @@ fi fi if [ ! -f "$pidFile" ]; then - action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP $binDir/startHAServices + action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP source /etc/default/bigdataHA ; $binDir/startHAServices else echo $"`date` : `hostname` : running as $pid" fi This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-11 21:16:53
|
Revision: 8111 http://sourceforge.net/p/bigdata/code/8111 Author: tobycraig Date: 2014-04-11 21:16:50 +0000 (Fri, 11 Apr 2014) Log Message: ----------- User can now choose a custom filename when exporting data Modified Paths: -------------- branches/RDR/bigdata-war/src/html/index.html branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/index.html =================================================================== --- branches/RDR/bigdata-war/src/html/index.html 2014-04-11 18:58:09 UTC (rev 8110) +++ branches/RDR/bigdata-war/src/html/index.html 2014-04-11 21:16:50 UTC (rev 8111) @@ -117,10 +117,20 @@ </div> <div id="query-export" class="modal"> - <select> - </select> - <button id="query-download-rdf">Export</button> - <button class="modal-cancel">Cancel</button> + <h1>Export</h1> + <p> + <label for="export-format">Format: </label> + <select id="export-format"></select> + </p> + <p> + <label for="export-filename">Filename: </label> + <input type="text" id="export-filename" value="export"> + .<span id="export-filename-extension"></span> + </p> + <p> + <button id="query-download-rdf">Export</button> + <button class="modal-cancel">Cancel</button> + </p> </div> <div class="tab" id="explore-tab"> Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:58:09 UTC (rev 8110) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 21:16:50 UTC (rev 8111) @@ -478,7 +478,7 @@ $('#query-response, #query-explanation, #query-tab .bottom *').hide(); }); -$('#query-export-rdf').click(function() { showModal('query-export'); }); +$('#query-export-rdf').click(function() { updateExportFileExtension(); showModal('query-export'); }); $('#query-export-csv').click(exportCSV); $('#query-export-json').click(exportJSON); $('#query-export-xml').click(exportXML); @@ -494,25 +494,34 @@ }; for(var contentType in rdf_extensions) { - $('#query-export select').append('<option value="' + contentType + '">' + rdf_extensions[contentType][0] + '</option>'); + $('#export-format').append('<option value="' + contentType + '">' + rdf_extensions[contentType][0] + '</option>'); } +$('#export-format').change(updateExportFileExtension); + +function updateExportFileExtension() { + $('#export-filename-extension').html(rdf_extensions[$('#export-format').val()][1]); +} + $('#query-download-rdf').click(function() { - var dataType = $(this).siblings('select').val(); + var dataType = $('#export-format').val(); var settings = { type: 'POST', data: JSON.stringify(QUERY_RESULTS), contentType: 'application/sparql-results+json', headers: { 'Accept': dataType }, - success: function(data) { downloadRDFSuccess(data, dataType); }, + success: function(data) { downloadRDFSuccess(data, dataType, $('#export-filename').val()); }, error: downloadRDFError }; $.ajax('/bigdata/sparql?workbench&convert', settings); $(this).siblings('.modal-cancel').click(); }); -function downloadRDFSuccess(data, dataType) { - var filename = 'export.' + rdf_extensions[dataType][1]; +function downloadRDFSuccess(data, dataType, filename) { + if(filename == '') { + filename = 'export'; + } + filename += '.' + rdf_extensions[dataType][1]; downloadFile(data, dataType, filename); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-04-11 18:58:15
|
Revision: 8110 http://sourceforge.net/p/bigdata/code/8110 Author: tobycraig Date: 2014-04-11 18:58:09 +0000 (Fri, 11 Apr 2014) Log Message: ----------- Fixed RDF export button not hiding for incompatible data Modified Paths: -------------- branches/RDR/bigdata-war/src/html/js/workbench.js Modified: branches/RDR/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:53:18 UTC (rev 8109) +++ branches/RDR/bigdata-war/src/html/js/workbench.js 2014-04-11 18:58:09 UTC (rev 8110) @@ -672,6 +672,8 @@ QUERY_RESULTS.head.vars.pop() } $('#query-export-rdf').show(); + } else { + $('#query-export-rdf').hide(); } $('#query-response a').click(function(e) { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |