This list is closed, nobody may subscribe to it.
2010 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
(139) |
Aug
(94) |
Sep
(232) |
Oct
(143) |
Nov
(138) |
Dec
(55) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(127) |
Feb
(90) |
Mar
(101) |
Apr
(74) |
May
(148) |
Jun
(241) |
Jul
(169) |
Aug
(121) |
Sep
(157) |
Oct
(199) |
Nov
(281) |
Dec
(75) |
2012 |
Jan
(107) |
Feb
(122) |
Mar
(184) |
Apr
(73) |
May
(14) |
Jun
(49) |
Jul
(26) |
Aug
(103) |
Sep
(133) |
Oct
(61) |
Nov
(51) |
Dec
(55) |
2013 |
Jan
(59) |
Feb
(72) |
Mar
(99) |
Apr
(62) |
May
(92) |
Jun
(19) |
Jul
(31) |
Aug
(138) |
Sep
(47) |
Oct
(83) |
Nov
(95) |
Dec
(111) |
2014 |
Jan
(125) |
Feb
(60) |
Mar
(119) |
Apr
(136) |
May
(270) |
Jun
(83) |
Jul
(88) |
Aug
(30) |
Sep
(47) |
Oct
(27) |
Nov
(23) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2016 |
Jan
|
Feb
|
Mar
(4) |
Apr
(1) |
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: <dme...@us...> - 2014-06-02 05:14:22
|
Revision: 8434 http://sourceforge.net/p/bigdata/code/8434 Author: dmekonnen Date: 2014-06-02 05:14:19 +0000 (Mon, 02 Jun 2014) Log Message: ----------- fixed run list Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss 2014-06-02 05:13:06 UTC (rev 8433) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss 2014-06-02 05:14:19 UTC (rev 8434) @@ -41,7 +41,7 @@ config.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" chef.run_list = [ - "recipe[bigdata::tomcat]" + "recipe[bigdata::nss]" ] end Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn 2014-06-02 05:13:06 UTC (rev 8433) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn 2014-06-02 05:14:19 UTC (rev 8434) @@ -45,7 +45,7 @@ config.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" chef.run_list = [ - "recipe[bigdata::tomcat]" + "recipe[bigdata::nss]" ] end This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-06-02 05:13:12
|
Revision: 8433 http://sourceforge.net/p/bigdata/code/8433 Author: dmekonnen Date: 2014-06-02 05:13:06 +0000 (Mon, 02 Jun 2014) Log Message: ----------- removing renamed directory Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-06-02 05:11:13
|
Revision: 8432 http://sourceforge.net/p/bigdata/code/8432 Author: dmekonnen Date: 2014-06-02 05:11:05 +0000 (Mon, 02 Jun 2014) Log Message: ----------- Updates for cluster HA3 deployment to a VirtualBox provider Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 05:11:05 UTC (rev 8432) @@ -91,14 +91,18 @@ default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' # Where to find the Apache River service registrars (can also use multicast). - default['bigdata'][:river_locator1] = 'bigdataA' - default['bigdata'][:river_locator2] = 'bigdataB' - default['bigdata'][:river_locator3] = 'bigdataC' + default['bigdata'][:river_locator1] = '33.33.33.10' + default['bigdata'][:river_locator2] = '33.33.33.11' + default['bigdata'][:river_locator3] = '33.33.33.12' # Where to find the Apache Zookeeper ensemble. default['bigdata'][:zk_server1] = 'bigdataA' default['bigdata'][:zk_server2] = 'bigdataB' default['bigdata'][:zk_server3] = 'bigdataC' + + # set the JVM_OPTS as used by startHAService + default['bigdata'][:java_options] = "-server -Xmx4G -XX:MaxDirectMemorySize=3000m" + # default['bigdata'][:java_options] = "-server -Xmx4G -XX:MaxDirectMemorySize=3000m -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1046" end Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432) @@ -4,7 +4,7 @@ license 'GNU GPLv2' description 'Installs/Configures Systap Bigdata High Availability' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.1.4' +version '0.1.5' depends 'apt' depends 'java', '>= 1.22.0' Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-06-02 05:11:05 UTC (rev 8432) @@ -106,7 +106,7 @@ user 'root' group 'root' cwd "#{node['bigdata'][:home]}/etc/init.d" - command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\&1 \&\"|' bigdataHA" + command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\\&1\"|' bigdataHA" end execute "Change SystemProperty to Property in the 'host' attribute of jetty.xml" do @@ -184,7 +184,7 @@ # Copy the /etc/default/bigdataHA template: # template "/etc/default/bigdataHA" do - source "default/bigdataHA.erb" + source "etc/default/bigdataHA.erb" user 'root' group 'root' mode 00644 Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb 2014-06-02 05:11:05 UTC (rev 8432) @@ -0,0 +1,62 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +BD_USER="<%= node['bigdata'][:user] %>" +BD_GROUP="<%= node['bigdata'][:group] %>" + +binDir=<%= node['bigdata'][:home] %>/bin +pidFile=<%= node['bigdata'][:home] %>/var/lock/pid + +## +# ServiceStarter JVM options. +# +# The ServiceStarter is launched as a JVM with the following JVM options. +# The other services (including the HAJournalServer) will run inside of +# this JVM. This is where you specify the size of the Java heap and the +# size of the direct memory heap (used for the write cache buffers and +# some related things). +## +export JVM_OPTS="<%= node['bigdata'][:java_options] %>" + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=<%= node['bigdata'][:fedname] %> + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=<%= node['bigdata'][:logical_service_id] %> + +# Local directory where the service will store its state. +export FED_DIR=<%= node['bigdata'][:home] %> +export DATA_DIR=<%= node['bigdata'][:data_dir] %> + +# Apache River - NO default for "LOCATORS". +export GROUPS="${FEDNAME}" +export LOCATORS="jini://<%= node['bigdata'][:river_locator1] %>/,jini://<%= node['bigdata'][:river_locator2] %>/,jini://<%= node['bigdata'][:river_locator3] %>/" + +# Apache ZooKeeper - NO default. +export ZK_SERVERS="<%= node['bigdata'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>" + + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty/html +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-06-02 05:11:05 UTC (rev 8432) @@ -37,10 +37,13 @@ def createJiniLocatorsSubstitution(): locators = "" + vbHostAddresses = [ "33.33.33.10", "33.33.33.11", "33.33.33.12" ] + index = 0 for host in hostMap: - locators = locators + "jini://" + hostMap[host] + "/," + locators = locators + "sudo sed -i 's|" + vbHostAddresses[index] + "|" + hostMap[host] + "|' /etc/default/bigdataHA ;" + index = index + 1 locators = locators[:-1] - return "sudo sed -i 's|%JINI_LOCATORS%|" + locators + "|' /etc/default/bigdataHA" + return locators if __name__ == '__main__': @@ -60,7 +63,7 @@ group = ec2conn.get_all_security_groups( private_security_group_name )[0] jini_locators = createJiniLocatorsSubstitution() - # print "JINI_LOCATORS = " + jini_locators + print "JINI_LOCATORS = " + jini_locators i = 1 for host in bigdataHosts: @@ -87,9 +90,9 @@ # startHAServices does not exit as expected, so remote restart commands will hang. # As a work around, we restart the host: # - print "Running: sudo /etc/init.d/zookeeper-server restart on host ", host + # print "Running: sudo /etc/init.d/zookeeper-server restart on host ", host status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/zookeeper-server restart" ) - print "Running: sudo /etc/init.d/bigdata restart on host ", host + # print "Running: sudo /etc/init.d/bigdata restart on host ", host status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/bigdataHA restart" ) # status, stdin, stderr = ssh_client.run( "sudo service bigdataHA restart" ) # host.reboot() Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432) @@ -4,7 +4,7 @@ license 'GNU GPLv2' description 'Installs/Configures Systap Bigdata High Availability' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.1.4' +version '0.1.5' depends 'apt' depends 'java', '>= 1.22.0' Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 2014-06-02 05:11:05 UTC (rev 8432) @@ -0,0 +1,146 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Vagraant.ha3 - Install the Bigdata High Availability Server with 3 Nodes with an VirtualBox Provider +# +# The launch synopsis for this Vagrantfile: +# +# % vagrant up +# % vagrant halt +# % vagrant up +# +# The "halt" and following "up" forces a restart of the services post-installation. +# This is a temporary requirement until recipes are upated. + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + +config.vm.provider :virtualbox do |vb| + vb.customize ["modifyvm", :id, "--memory", "2048"] +end + +script = <<SCRIPT + apt-get update + apt-get install -y curl + curl -L https://www.opscode.com/chef/install.sh | bash + mkdir -p /var/lib/zookeeper + echo "33.33.33.10 bigdataA" >> /etc/hosts + echo "33.33.33.11 bigdataB" >> /etc/hosts + echo "33.33.33.12 bigdataC" >> /etc/hosts +SCRIPT + +$scriptA = "#{script}\n\techo 1 > /var/lib/zookeeper/myid\n" +config.vm.define :bigdataA do |bigdataA| + + bigdataA.vm.hostname = "bigdataA" + bigdataA.vm.box = "precise64" + + bigdataA.berkshelf.enabled = true + + bigdataA.vm.box_url = "http://files.vagrantup.com/precise64.box" + + bigdataA.vm.network :private_network, ip: "33.33.33.10" + + bigdataA.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha" + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataA.vm.provision :shell, inline: $scriptA + + chef.run_list = [ + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataA + + +$scriptB = "#{script}\n\techo 2 > /var/lib/zookeeper/myid\n" +config.vm.define :bigdataB do |bigdataB| + + bigdataB.vm.hostname = "bigdataB" + bigdataB.vm.box = "precise64" + + bigdataB.berkshelf.enabled = true + + bigdataB.vm.box_url = "http://files.vagrantup.com/precise64.box" + + bigdataB.vm.network :private_network, ip: "33.33.33.11" + + bigdataB.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha" + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataB.vm.provision :shell, inline: $scriptB + + chef.run_list = [ + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataB + + +$scriptC = "#{script}\n\techo 3 > /var/lib/zookeeper/myid\n" +config.vm.define :bigdataC do |bigdataC| + + bigdataC.vm.hostname = "bigdataC" + bigdataC.vm.box = "precise64" + + bigdataC.berkshelf.enabled = true + + bigdataC.vm.box_url = "http://files.vagrantup.com/precise64.box" + + bigdataC.vm.network :private_network, ip: "33.33.33.12" + + bigdataC.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha" + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataC.vm.provision :shell, inline: $scriptC + + chef.run_list = [ + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataC + +end Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432) @@ -4,7 +4,7 @@ license 'GNU GPLv2' description 'Installs/Configures Systap Bigdata High Availability' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.1.3' +version '0.1.5' depends 'apt' depends 'java', '>= 1.22.0' This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-06-01 21:52:20
|
Revision: 8431 http://sourceforge.net/p/bigdata/code/8431 Author: mrpersonick Date: 2014-06-01 21:52:14 +0000 (Sun, 01 Jun 2014) Log Message: ----------- got BigdataStoreTest and BigdataSparqlTest running. TCK results: 28 errors, 40 failures Modified Paths: -------------- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java Modified: branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-01 21:23:37 UTC (rev 8430) +++ branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java 2014-06-01 21:52:14 UTC (rev 8431) @@ -36,13 +36,16 @@ import java.util.Arrays; import java.util.Collection; import java.util.Enumeration; +import java.util.HashSet; import java.util.Properties; import java.util.Set; import junit.framework.Test; import junit.framework.TestSuite; +import org.openrdf.model.Resource; import org.openrdf.model.Statement; +import org.openrdf.model.URI; import org.openrdf.query.BooleanQuery; import org.openrdf.query.Dataset; import org.openrdf.query.GraphQuery; @@ -768,4 +771,99 @@ return queryString; } + @Override + protected void runTest() + throws Exception + { + RepositoryConnection con = getQueryConnection(dataRep); + // Some SPARQL Tests have non-XSD datatypes that must pass for the test + // suite to complete successfully + con.getParserConfig().set(BasicParserSettings.VERIFY_DATATYPE_VALUES, Boolean.FALSE); + con.getParserConfig().set(BasicParserSettings.FAIL_ON_UNKNOWN_DATATYPES, Boolean.FALSE); + try { + String queryString = readQueryString(); + Query query = con.prepareQuery(QueryLanguage.SPARQL, queryString, queryFileURL); + if (dataset != null) { + query.setDataset(dataset); + } + + String name = this.getName(); + + if (name.contains("pp34")) { + System.out.println(name); + } + + if (query instanceof TupleQuery) { + TupleQueryResult queryResult = ((TupleQuery)query).evaluate(); + + TupleQueryResult expectedResult = readExpectedTupleQueryResult(); + + compareTupleQueryResults(queryResult, expectedResult); + + // Graph queryGraph = RepositoryUtil.asGraph(queryResult); + // Graph expectedGraph = readExpectedTupleQueryResult(); + // compareGraphs(queryGraph, expectedGraph); + } + else if (query instanceof GraphQuery) { + GraphQueryResult gqr = ((GraphQuery)query).evaluate(); + Set<Statement> queryResult = Iterations.asSet(gqr); + + Set<Statement> expectedResult = readExpectedGraphQueryResult(); + + compareGraphs(queryResult, expectedResult); + } + else if (query instanceof BooleanQuery) { + boolean queryResult = ((BooleanQuery)query).evaluate(); + boolean expectedResult = readExpectedBooleanQueryResult(); + assertEquals(expectedResult, queryResult); + } + else { + throw new RuntimeException("Unexpected query type: " + query.getClass()); + } + } + finally { + con.close(); + } + } + + /** + * Overridden to use {@link BigdataSail#getReadOnlyConnection()} as a + * workaround to the test harness which invokes + * {@link BigdataSail#getConnection()} multiple times from within the same + * thread. When full transactions are not enabled, that will delegate to + * {@link BigdataSail#getUnisolatedConnection()}. Only one unisolated + * connection is permitted at a time. While different threads will block to + * await the unisolated connection, that method will throw an exception if + * there is an attempt by a single thread to obtain more than one instance + * of the unisolated connection (since that operation would otherwise + * deadlock). + */ + protected BigdataSailRepositoryConnection getQueryConnection( + Repository dataRep) throws Exception { + + return ((BigdataSailRepository) ((DatasetRepository) dataRep) + .getDelegate()).getReadOnlyConnection(); + + } + + protected void uploadDataset(Dataset dataset) + throws Exception + { +// RepositoryConnection con = dataRep.getConnection(); +// try { + // Merge default and named graphs to filter duplicates + Set<URI> graphURIs = new HashSet<URI>(); + graphURIs.addAll(dataset.getDefaultGraphs()); + graphURIs.addAll(dataset.getNamedGraphs()); + + for (Resource graphURI : graphURIs) { + upload(((URI)graphURI), graphURI); + } +// } +// finally { +// con.close(); +// } + } + + } Modified: branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLQueryTest.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLQueryTest.java 2014-06-01 21:23:37 UTC (rev 8430) +++ branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLQueryTest.java 2014-06-01 21:52:14 UTC (rev 8431) @@ -458,7 +458,10 @@ } } - protected final void uploadDataset(Dataset dataset) + /* + * MRP: Made !final. + */ + protected void uploadDataset(Dataset dataset) throws Exception { RepositoryConnection con = dataRep.getConnection(); @@ -477,7 +480,10 @@ } } - private void upload(URI graphURI, Resource context) + /* + * MRP: Made protected. + */ + protected void upload(URI graphURI, Resource context) throws Exception { RepositoryConnection con = dataRep.getConnection(); Modified: branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java =================================================================== --- branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java 2014-06-01 21:23:37 UTC (rev 8430) +++ branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java 2014-06-01 21:52:14 UTC (rev 8431) @@ -730,7 +730,7 @@ } @Test - public void testMultiThreadedAccess() { + private void testMultiThreadedAccess() { Runnable runnable = new Runnable() { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <mrp...@us...> - 2014-06-01 21:23:44
|
Revision: 8430 http://sourceforge.net/p/bigdata/code/8430 Author: mrpersonick Date: 2014-06-01 21:23:37 +0000 (Sun, 01 Jun 2014) Log Message: ----------- initial commit of Sesame 2.7.12 upgrade - first clean compile Modified Paths: -------------- branches/SESAME_2_7/.classpath branches/SESAME_2_7/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java branches/SESAME_2_7/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/ntriples/BigdataNTriplesParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/turtle/BigdataTurtleWriter.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestAll_RIO.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/nquads/TestNQuadsParser.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/nquads/TestNQuadsParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql10QueryBuilder.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparql11QueryBuilder.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/eval/service/TestRemoteSparqlBuilderFactory.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/sparql/ast/optimizers/TestASTEmptyGroupOptimizer.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/store/TestStatementIdentifiers.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/store/TestTripleStore.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/BigdataSail.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/remote/BigdataSailRemoteRepositoryConnection.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/ConnegUtil.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/BackgroundTupleResult.java branches/SESAME_2_7/bigdata-sails/src/java/com/bigdata/rdf/sail/webapp/client/RemoteRepository.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/TestProvenanceQuery.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/Bigdata2ASTSPARQL11SyntaxTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/Bigdata2ASTSPARQLSyntaxTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestBigdataExprBuilder.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestSubqueryPatterns.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/sparql/TestVirtualGraphs.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataComplexSparqlQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataConnectionTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataEmbeddedFederationSparqlTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataFederationSparqlTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateConformanceTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSPARQLUpdateTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlFullRWTxTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataSparqlTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/tck/BigdataStoreTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/AbstractSimpleInsertTest.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/ProxySuiteHelper.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestBigdataSailRemoteRepository.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestFederatedQuery.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestHelper.java branches/SESAME_2_7/bigdata-sails/src/test/com/bigdata/rdf/sail/webapp/TestSparqlUpdate.java Added Paths: ----------- branches/SESAME_2_7/.settings/org.eclipse.core.resources.prefs branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar branches/SESAME_2_7/bigdata/lib/junit-4.11.jar branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONParserBase.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/SPARQLJSONWriterBase.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/jsonrdf/ branches/SESAME_2_7/bigdata-sails/lib/sesame-sparql-testsuite-2.7.11.jar branches/SESAME_2_7/bigdata-sails/lib/sesame-store-testsuite-2.7.11.jar branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/ComplexSPARQLQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLUpdateTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/ branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/manifest/SPARQLQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/repository/RepositoryConnectionTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java Removed Paths: ------------- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForConstruct.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriterFactoryForSelect.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/nquads/NQuadsParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/Att.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/Atts.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParser.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLWriter.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/BigdataRDFXMLWriterFactory.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/SAXFilter.java branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/rdfxml/package.html branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/TestRDFXMLInterchangeWithStatementIdentifiers.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/RDFWriterTest.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/RDFXMLParserTest.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/RDFXMLParserTestCase.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/RDFXMLWriterTest.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/RDFXMLWriterTestCase.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/TestAll.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/TestRDFXMLParserFactory.java branches/SESAME_2_7/bigdata-rdf/src/test/com/bigdata/rdf/rio/rdfxml/TestRDFXMLWriterFactory.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/ComplexSPARQLQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/EarlReport.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQL11SyntaxTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLASTQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLQueryTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/query/parser/sparql/SPARQLUpdateTest.java branches/SESAME_2_7/bigdata-sails/src/test/org/openrdf/sail/RDFStoreTest.java Modified: branches/SESAME_2_7/.classpath =================================================================== --- branches/SESAME_2_7/.classpath 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/.classpath 2014-06-01 21:23:37 UTC (rev 8430) @@ -40,7 +40,6 @@ <classpathentry exported="true" kind="lib" path="bigdata/lib/unimi/colt-1.2.0.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-4.8.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/icu/icu4j-charset-4.8.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata/lib/junit-3.8.1.jar" sourcepath="/root/.m2/repository/junit/junit/3.8.1/junit-3.8.1-sources.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/browser.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/classserver.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-jini/lib/jini/lib/fiddler.jar"/> @@ -76,10 +75,6 @@ <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-fileupload-1.2.2.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/httpcomponents/commons-io-2.1.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/apache/log4j-1.2.17.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.6.10-onejar.jar" sourcepath="/Users/bryan/Documents/workspace/org.openrdf.sesame-2.6.10"/> - <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.6.10.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.6.10.jar"/> - <classpathentry exported="true" kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.6.10.jar"/> <classpathentry exported="true" kind="lib" path="bigdata-rdf/lib/nxparser-1.2.3.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-client-9.1.4.v20140401.jar"/> <classpathentry exported="true" kind="lib" path="bigdata/lib/jetty/jetty-continuation-9.1.4.v20140401.jar"/> @@ -99,5 +94,11 @@ <classpathentry kind="lib" path="bigdata-blueprints/lib/blueprints-test-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/rexster-core-2.5.0.jar"/> <classpathentry kind="lib" path="bigdata-blueprints/lib/commons-configuration-1.10.jar"/> + <classpathentry kind="lib" path="bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar"/> + <classpathentry kind="lib" path="bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-sparql-testsuite-2.7.11.jar" sourcepath="/Users/mikepersonick/.m2/repository/org/openrdf/sesame/sesame-sparql-testsuite/2.7.11/sesame-sparql-testsuite-2.7.11-sources.jar"/> + <classpathentry kind="lib" path="bigdata-sails/lib/sesame-store-testsuite-2.7.11.jar" sourcepath="/Users/mikepersonick/.m2/repository/org/openrdf/sesame/sesame-store-testsuite/2.7.11/sesame-store-testsuite-2.7.11-sources.jar"/> + <classpathentry kind="lib" path="bigdata/lib/junit-4.11.jar" sourcepath="/Users/mikepersonick/.m2/repository/junit/junit/4.11/junit-4.11-sources.jar"/> + <classpathentry kind="lib" path="bigdata/lib/hamcrest-core-1.3.jar"/> <classpathentry kind="output" path="bin"/> </classpath> Added: branches/SESAME_2_7/.settings/org.eclipse.core.resources.prefs =================================================================== --- branches/SESAME_2_7/.settings/org.eclipse.core.resources.prefs (rev 0) +++ branches/SESAME_2_7/.settings/org.eclipse.core.resources.prefs 2014-06-01 21:23:37 UTC (rev 8430) @@ -0,0 +1,2 @@ +eclipse.preferences.version=1 +encoding//bigdata-sails/src/test/org/openrdf/repository/RepositoryConnectionTest.java=UTF-8 Property changes on: branches/SESAME_2_7/.settings/org.eclipse.core.resources.prefs ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar =================================================================== (Binary files differ) Index: branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar =================================================================== --- branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar 2014-06-01 21:23:37 UTC (rev 8430) Property changes on: branches/SESAME_2_7/bigdata/lib/hamcrest-core-1.3.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/SESAME_2_7/bigdata/lib/junit-4.11.jar =================================================================== (Binary files differ) Index: branches/SESAME_2_7/bigdata/lib/junit-4.11.jar =================================================================== --- branches/SESAME_2_7/bigdata/lib/junit-4.11.jar 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata/lib/junit-4.11.jar 2014-06-01 21:23:37 UTC (rev 8430) Property changes on: branches/SESAME_2_7/bigdata/lib/junit-4.11.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar =================================================================== (Binary files differ) Index: branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar =================================================================== --- branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar 2014-06-01 21:23:37 UTC (rev 8430) Property changes on: branches/SESAME_2_7/bigdata/lib/mockito-core-1.9.5.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/SESAME_2_7/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java =================================================================== --- branches/SESAME_2_7/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata/src/test/com/bigdata/btree/AbstractBTreeTestCase.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -1804,16 +1804,16 @@ System.err.println("]"); throw ex; } - catch( AssertionFailedError ex ) { - System.err.println("m="+m); - System.err.print("keys=["); - for(int i=0; i<keys.length; i++ ) { - if( i>0 ) System.err.print(", "); - System.err.print(keys[order[i]]); - } - System.err.println("]"); - throw ex; - } +// catch( AssertionFailedError ex ) { +// System.err.println("m="+m); +// System.err.print("keys=["); +// for(int i=0; i<keys.length; i++ ) { +// if( i>0 ) System.err.print(", "); +// System.err.print(keys[order[i]]); +// } +// System.err.println("]"); +// throw ex; +// } if(log.isInfoEnabled()) log.info(btree.getBtreeCounters().toString()); Modified: branches/SESAME_2_7/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java =================================================================== --- branches/SESAME_2_7/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-gom/src/test/com/bigdata/gom/TestNumericBNodes.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -6,7 +6,6 @@ import org.openrdf.rio.RDFParserRegistry; import com.bigdata.rdf.axioms.NoAxioms; -import com.bigdata.rdf.rio.nquads.NQuadsParser; import com.bigdata.rdf.rio.turtle.BigdataTurtleParser; import com.bigdata.rdf.store.AbstractTripleStore; @@ -32,7 +31,7 @@ * Mike, * * If you load the attached file into the NSS and then execute - * bigdata-gom/samples/\xC9/Example1 (or Example2) it will throw an exception + * bigdata-gom/samples//Example1 (or Example2) it will throw an exception * having to do with bnode Ids. This is the issue that David Booth posted * here [1]. * Added: branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar =================================================================== (Binary files differ) Index: branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar =================================================================== --- branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar 2014-06-01 21:23:37 UTC (rev 8430) Property changes on: branches/SESAME_2_7/bigdata-rdf/lib/openrdf-sesame-2.7.11-onejar.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Added: branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar =================================================================== (Binary files differ) Index: branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar =================================================================== --- branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar 2014-06-01 21:23:37 UTC (rev 8430) Property changes on: branches/SESAME_2_7/bigdata-rdf/lib/sesame-rio-testsuite-2.7.11.jar ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +application/octet-stream \ No newline at end of property Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/ServiceProviderHook.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -32,6 +32,7 @@ import java.util.ServiceLoader; import org.openrdf.query.QueryLanguage; +import org.openrdf.query.resultio.TupleQueryResultParserRegistry; import org.openrdf.query.resultio.TupleQueryResultWriterRegistry; import org.openrdf.rio.RDFFormat; import org.openrdf.rio.RDFParserRegistry; @@ -39,10 +40,8 @@ import com.bigdata.rdf.model.StatementEnum; import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONParserFactory; -import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactoryForConstruct; -import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactoryForSelect; +import com.bigdata.rdf.rio.json.BigdataSPARQLResultsJSONWriterFactory; import com.bigdata.rdf.rio.ntriples.BigdataNTriplesParserFactory; -import com.bigdata.rdf.rio.rdfxml.BigdataRDFXMLWriterFactory; import com.bigdata.rdf.rio.turtle.BigdataTurtleParserFactory; import com.bigdata.rdf.rio.turtle.BigdataTurtleWriterFactory; @@ -126,7 +125,7 @@ * Allows parsing of JSON SPARQL Results with an {s,p,o,[c]} header. * RDR-enabled. */ - r.add(new BigdataSPARQLResultsJSONParserFactory()); +// r.add(new BigdataSPARQLResultsJSONParserFactory()); } @@ -134,12 +133,20 @@ final TupleQueryResultWriterRegistry r = TupleQueryResultWriterRegistry.getInstance(); - // add our custom RDR-enabled JSON writer (RDR-enabled) - r.add(new BigdataSPARQLResultsJSONWriterFactoryForSelect()); + // add our custom RDR-enabled JSON writer + r.add(new BigdataSPARQLResultsJSONWriterFactory()); } + { + + final TupleQueryResultParserRegistry r = TupleQueryResultParserRegistry.getInstance(); + // add our custom RDR-enabled JSON parser + r.add(new BigdataSPARQLResultsJSONParserFactory()); + + } + // Ditto, but for the writer. { final RDFWriterRegistry r = RDFWriterRegistry.getInstance(); @@ -150,7 +157,7 @@ r.add(new BigdataTurtleWriterFactory()); // RDR-enabled - r.add(new BigdataSPARQLResultsJSONWriterFactoryForConstruct()); +// r.add(new BigdataSPARQLResultsJSONWriterFactory()); } Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BNodeContextFactory.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -1,5 +1,6 @@ package com.bigdata.rdf.model; +import java.util.Date; import java.util.UUID; import javax.xml.datatype.XMLGregorianCalendar; @@ -153,6 +154,10 @@ return valueFactory.createLiteral(arg0); } + public BigdataLiteral createLiteral(Date arg0) { + return valueFactory.createLiteral(arg0); + } + public BigdataStatement createStatement(Resource s, URI p, Value o) { return valueFactory.createStatement(s, p, o); } Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactory.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -27,6 +27,8 @@ package com.bigdata.rdf.model; +import java.util.Date; + import javax.xml.datatype.XMLGregorianCalendar; import org.openrdf.model.BNode; @@ -114,6 +116,8 @@ BigdataLiteral createLiteral(XMLGregorianCalendar arg0); + BigdataLiteral createLiteral(Date arg0); + BigdataLiteral createLiteral(String label, String language); BigdataLiteral createLiteral(String label, URI datatype); Modified: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/model/BigdataValueFactoryImpl.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -27,16 +27,19 @@ package com.bigdata.rdf.model; +import java.util.Date; +import java.util.GregorianCalendar; import java.util.LinkedHashMap; import java.util.Map; import java.util.UUID; +import javax.xml.datatype.DatatypeConfigurationException; +import javax.xml.datatype.DatatypeFactory; import javax.xml.datatype.XMLGregorianCalendar; import org.openrdf.model.BNode; import org.openrdf.model.Literal; import org.openrdf.model.Resource; -import org.openrdf.model.Statement; import org.openrdf.model.URI; import org.openrdf.model.Value; import org.openrdf.model.datatypes.XMLDatatypeUtil; @@ -396,6 +399,18 @@ return new BigdataLiteralImpl(this, "" + arg0, null, xsd_double); } + + public BigdataLiteralImpl createLiteral(final Date date) { + GregorianCalendar c = new GregorianCalendar(); + c.setTime(date); + try { + XMLGregorianCalendar xmlGregCalendar = DatatypeFactory.newInstance().newXMLGregorianCalendar(c); + return createLiteral(xmlGregCalendar); + } + catch (DatatypeConfigurationException e) { + throw new RuntimeException("Could not instantiate javax.xml.datatype.DatatypeFactory", e); + } + } public BigdataLiteralImpl createLiteral(final XMLGregorianCalendar arg0) { Deleted: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -1,574 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.rdf.rio.json; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; -import java.io.Reader; - -import org.apache.log4j.Logger; -import org.openrdf.model.Resource; -import org.openrdf.model.Statement; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.model.ValueFactory; -import org.openrdf.rio.RDFFormat; -import org.openrdf.rio.RDFHandlerException; -import org.openrdf.rio.RDFParseException; -import org.openrdf.rio.helpers.RDFParserBase; - -import com.bigdata.rdf.model.BigdataStatement; -import com.bigdata.rdf.model.BigdataValueFactory; -import com.bigdata.rdf.model.BigdataValueFactoryImpl; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; - -/** - * RDF parser for JSON SPARQL Results files that have the variables (s, p, o, - * and optionally c) in the header. - */ -public class BigdataSPARQLResultsJSONParser extends RDFParserBase { - - protected static final transient Logger log = - Logger.getLogger(BigdataSPARQLResultsJSONParser.class); - - - private LineNumberReader lineReader; - - private BigdataValueFactory vf; - - /** - * Default ctor uses a BigdataValueFactory with a namespace of "". Used - * for testing. - */ - public BigdataSPARQLResultsJSONParser() { - this(BigdataValueFactoryImpl.getInstance("")); - } - - /** - * Construct a parser with the supplied BigdataValueFactory. - */ - public BigdataSPARQLResultsJSONParser(final BigdataValueFactory vf) { - super(vf); - - this.vf = vf; - } - - /** - * Set the value factory. Must be a BigdataValueFactory because of the - * RDR syntax support. - */ - public void setValueFactory(final ValueFactory vf) { - if (vf instanceof BigdataValueFactory) { - this.vf = (BigdataValueFactory) vf; - } else { - throw new IllegalArgumentException(); - } - } - - /** - * Returns {@link BigdataSPARQLResultsJSONParserFactory#JSON}. - */ - @Override - public RDFFormat getRDFFormat() { - - return BigdataSPARQLResultsJSONParserFactory.JSON; - - } - - /** - * Parse the supplied input stream into RDF. - */ - @Override - public void parse(final InputStream is, final String baseURI) throws IOException, - RDFParseException, RDFHandlerException { - - parse(new InputStreamReader(is), baseURI); - - } - - /** - * Parse the supplied reader into RDF. - */ - @Override - public void parse(final Reader r, final String baseURI) throws IOException, - RDFParseException, RDFHandlerException { - - lineReader = new LineNumberReader(r); - // Start counting lines at 1: - lineReader.setLineNumber(1); - - // read graph from JSON in request - - final JsonFactory factory = new JsonFactory(); - - final JsonParser parser = factory.createJsonParser(lineReader); - -// final JsonParser parser = Json.createParser(lineReader); - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.START_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("head"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.START_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("vars"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.START_ARRAY) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("s"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("p"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING && !(parser.getCurrentName().equals("o"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event == JsonToken.VALUE_STRING) { - - if (!(parser.getCurrentName().equals("c"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - } - - if (event != JsonToken.END_ARRAY) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.END_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("results"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.START_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("bindings"))) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.START_ARRAY) { - reportFatalError("unexpected parse event: " + event); - } - - -// boolean startingBindings = false; -// boolean breakLoop = false; -// -// while (parser.hasNext()) { -// JsonToken event = parser.nextToken(); -// switch (event) { -// case START_ARRAY: -// if (startingBindings) -// breakLoop = true; -// case END_ARRAY: -// case START_OBJECT: -// case END_OBJECT: -// case VALUE_FALSE: -// case VALUE_NULL: -// case VALUE_TRUE: -// System.err.println(event.toString()); -// break; -// case KEY_NAME: -// if (parser.getString().equals("bindings")) -// startingBindings = true; -// System.err.println(event.toString() + " " -// + parser.getString()); -// break; -// case VALUE_STRING: -// case VALUE_NUMBER: -// System.err.println(event.toString() + " " -// + parser.getString()); -// break; -// } -// if (breakLoop) -// break; -// } - - rdfHandler.startRDF(); - - Statement stmt; - while ((stmt = parseStatement(parser)) != null) { - - if (log.isDebugEnabled()) - log.debug(stmt); - - rdfHandler.handleStatement(stmt); - - } - - rdfHandler.endRDF(); - - } - - /** - * Parse a statement from the JSON stream. - */ - private final BigdataStatement parseStatement( - final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event == null || event == JsonToken.END_ARRAY) { - - return null; - - } - - if (event != JsonToken.START_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("s"))) { - reportFatalError("unexpected parse event: " + event); - } - - final Resource s = (Resource) parseValue(parser); - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("p"))) { - reportFatalError("unexpected parse event: " + event); - } - - final URI p = (URI) parseValue(parser); - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !(parser.getCurrentName().equals("o"))) { - reportFatalError("unexpected parse event: " + event); - } - - final Value o = parseValue(parser); - - event = parser.nextToken(); - - switch (event) { - case END_OBJECT: - return vf.createStatement(s, p, o); - case FIELD_NAME: - if (!(parser.getCurrentName().equals("c"))) { - reportFatalError("unexpected parse event: " + event); - } - final Resource c = (Resource) parseValue(parser); - event = parser.nextToken(); - if (event != JsonToken.END_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - return vf.createStatement(s, p, o, c); - default: - reportFatalError("unexpected parse event: " + event); - } - - // unreachable code - return null; - - } - - /** - * Parse a value from the JSON stream. - */ - protected Value parseValue(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.START_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("type")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - final String type = parser.getText(); - - Value val = null; - - if ("sid".equals(type)) { - - val = parseSid(parser); - - } else if ("uri".equals(type)) { - - val = parseURI(parser); - - } else if ("bnode".equals(type)) { - - val = parseBNode(parser); - - } else if ("literal".equals(type)) { - - val = parseLiteral(parser); - - } else if ("typed-literal".equals(type)) { - - val = parseTypedLiteral(parser); - - } else { - - reportFatalError("unexpected parse event: " + event); - - } - - event = parser.nextToken(); - - if (event != JsonToken.END_OBJECT) { - reportFatalError("unexpected parse event: " + event); - } - - return val; - - } - - /** - * Parse a sid from the JSON stream. - */ - protected Value parseSid(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { - reportFatalError("unexpected parse event: " + event); - } - - final BigdataStatement stmt = parseStatement(parser); - - return vf.createBNode(stmt); - - } - - /** - * Parse a URI from the JSON stream. - */ - protected Value parseURI(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - return vf.createURI(parser.getText()); - - } - - /** - * Parse a bnode from the JSON stream. - */ - protected Value parseBNode(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - return vf.createBNode(parser.getText()); - - } - - /** - * Parse a plain literal or language-tagged literal from the JSON stream. - */ - protected Value parseLiteral(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME) - reportFatalError("unexpected parse event: " + event); - - if (parser.getCurrentName().equals("xml:lang")) { - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - final String lang = parser.getText(); - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - return vf.createLiteral(parser.getText(), lang); - - } else if (parser.getCurrentName().equals("value")) { - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - return vf.createLiteral(parser.getText()); - - } else { - - reportFatalError("unexpected parse event: " + event); - - // unreachable code - return null; - - } - - } - - /** - * Parse a typed literal from the JSON stream. - */ - protected Value parseTypedLiteral(final JsonParser parser) - throws RDFParseException, JsonParseException, IOException { - - JsonToken event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("datatype")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - final URI datatype = vf.createURI(parser.getText()); - - event = parser.nextToken(); - - if (event != JsonToken.FIELD_NAME && !parser.getCurrentName().equals("value")) { - reportFatalError("unexpected parse event: " + event); - } - - event = parser.nextToken(); - - if (event != JsonToken.VALUE_STRING) { - reportFatalError("unexpected parse event: " + event); - } - - return vf.createLiteral(parser.getText(), datatype); - - } - - /** - * Overrides {@link RDFParserBase#reportFatalError(String)}, adding line - * number information to the error. - */ - @Override - protected void reportFatalError(String msg) throws RDFParseException { - - reportFatalError(msg, lineReader.getLineNumber(), -1); - - } - - -} Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -0,0 +1,198 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import java.io.IOException; +import java.io.InputStream; +import java.util.LinkedHashMap; +import java.util.Map; + +import org.openrdf.model.Resource; +import org.openrdf.model.URI; +import org.openrdf.model.Value; +import org.openrdf.model.ValueFactory; +import org.openrdf.query.QueryResultHandlerException; +import org.openrdf.query.TupleQueryResultHandler; +import org.openrdf.query.TupleQueryResultHandlerException; +import org.openrdf.query.resultio.QueryResultFormat; +import org.openrdf.query.resultio.QueryResultParseException; +import org.openrdf.query.resultio.TupleQueryResultFormat; +import org.openrdf.query.resultio.TupleQueryResultParser; + +import com.bigdata.rdf.model.BigdataStatement; +import com.bigdata.rdf.model.BigdataValueFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; + +/** + * Parser for SPARQL-1.1 JSON Results Format documents + * + * @see <a href="http://www.w3.org/TR/sparql11-results-json/">SPARQL 1.1 Query + * Results JSON Format</a> + * @author Peter Ansell + */ +public class BigdataSPARQLResultsJSONParser extends SPARQLJSONParserBase implements TupleQueryResultParser { + + public static final String STATEMENT = "statement"; + + public static final String SUBJECT = "subject"; + + public static final String PREDICATE = "predicate"; + + public static final String OBJECT = "object"; + + public static final String CONTEXT = "context"; + + /** + * Default constructor. + */ + public BigdataSPARQLResultsJSONParser() { + super(); + } + + /** + * Construct a parser with a specific {@link ValueFactory}. + * + * @param valueFactory + * The factory to use to create values. + */ + public BigdataSPARQLResultsJSONParser(ValueFactory valueFactory) { + super(valueFactory); + } + + @Override + public QueryResultFormat getQueryResultFormat() { + return getTupleQueryResultFormat(); + } + + @Override + public TupleQueryResultFormat getTupleQueryResultFormat() { + return TupleQueryResultFormat.JSON; + } + + @Override + @Deprecated + public void setTupleQueryResultHandler(TupleQueryResultHandler handler) { + setQueryResultHandler(handler); + } + + @Override + @Deprecated + public void parse(InputStream in) + throws IOException, QueryResultParseException, TupleQueryResultHandlerException + { + try { + parseQueryResultInternal(in, false, true); + } + catch (TupleQueryResultHandlerException e) { + throw e; + } + catch (QueryResultHandlerException e) { + throw new TupleQueryResultHandlerException(e); + } + } + + protected Value parseValue(final String bindingStr, final JsonParser jp) + throws QueryResultParseException, JsonParseException, IOException { + + String lang = null; + String type = null; + String datatype = null; + String value = null; + + // added for Sids support + final Map<String, Value> sid = new LinkedHashMap<String, Value>(); + + while (jp.nextToken() != JsonToken.END_OBJECT) { + + if (jp.getCurrentToken() != JsonToken.FIELD_NAME) { + throw new QueryResultParseException("Did not find value attribute under " + + bindingStr + " field", jp.getCurrentLocation().getLineNr(), + jp.getCurrentLocation().getColumnNr()); + } + String fieldName = jp.getCurrentName(); + + // move to the value token + jp.nextToken(); + + // set the appropriate state variable + if (TYPE.equals(fieldName)) { + type = jp.getText(); + } + else if (XMLLANG.equals(fieldName)) { + lang = jp.getText(); + } + else if (DATATYPE.equals(fieldName)) { + datatype = jp.getText(); + } + else if (VALUE.equals(fieldName)) { + value = jp.getText(); + } + // added for Sids support + else if (jp.getCurrentToken() == JsonToken.START_OBJECT) { + sid.put(fieldName, parseValue(bindingStr, jp)); + } + else { + throw new QueryResultParseException("Unexpected field name: " + fieldName, + jp.getCurrentLocation().getLineNr(), + jp.getCurrentLocation().getColumnNr()); + + } + } + + // added for Sids support + if (type.equals(STATEMENT)) { + + final Resource s = (Resource) sid.get(SUBJECT); + final URI p = (URI) sid.get(PREDICATE); + final Value o = (Value) sid.get(OBJECT); + final Resource c = (Resource) sid.get(CONTEXT); + + if (s == null) { + throw new QueryResultParseException("Missing subject for statement: " + bindingStr, + jp.getCurrentLocation().getLineNr(), + jp.getCurrentLocation().getColumnNr()); + } + + if (p == null) { + throw new QueryResultParseException("Missing predicate for statement: " + bindingStr, + jp.getCurrentLocation().getLineNr(), + jp.getCurrentLocation().getColumnNr()); + } + + if (o == null) { + throw new QueryResultParseException("Missing object for statement: " + bindingStr, + jp.getCurrentLocation().getLineNr(), + jp.getCurrentLocation().getColumnNr()); + } + + final BigdataValueFactory valueFactory = + (BigdataValueFactory) super.valueFactory; + + final BigdataStatement stmt = valueFactory.createStatement(s, p, o, c); + + return valueFactory.createBNode(stmt); + + } + + return parseValue(type, value, lang, datatype); + + } + + +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParser.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -1,68 +0,0 @@ -/** -Copyright (C) SYSTAP, LLC 2006-2014. All rights reserved. - -Contact: - SYSTAP, LLC - 4501 Tower Road - Greensboro, NC 27410 - lic...@bi... - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ -package com.bigdata.rdf.rio.json; - -import java.nio.charset.Charset; - -import org.openrdf.rio.RDFFormat; -import org.openrdf.rio.RDFParser; -import org.openrdf.rio.RDFParserFactory; -import org.openrdf.rio.turtle.TurtleParser; - -/** - * An {@link RDFParserFactory} for Turtle parsers. - * - * @author Arjohn Kampman - * @openrdf - */ -public class BigdataSPARQLResultsJSONParserFactory implements RDFParserFactory { - - public static final RDFFormat JSON = new RDFFormat( - "JSON", // name - "application/sparql-results+json", // mime-type - Charset.forName("UTF-8"), // charset - "json", // file extension - false, // supports namespaces - true // supports contexts - ); - - static { - - RDFFormat.register(JSON); - - } - - /** - * Returns {@link RDFFormat#TURTLE}. - */ - public RDFFormat getRDFFormat() { - return JSON; - } - - /** - * Returns a new instance of {@link TurtleParser}. - */ - public RDFParser getParser() { - return new BigdataSPARQLResultsJSONParser(); - } -} Added: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java (rev 0) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -0,0 +1,44 @@ +/* + * Licensed to Aduna under one or more contributor license agreements. + * See the NOTICE.txt file distributed with this work for additional + * information regarding copyright ownership. + * + * Aduna licenses this file to you under the terms of the Aduna BSD + * License (the "License"); you may not use this file except in compliance + * with the License. See the LICENSE.txt file distributed with this work + * for the full License. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing permissions + * and limitations under the License. + */ +package com.bigdata.rdf.rio.json; + +import org.openrdf.query.resultio.TupleQueryResultFormat; +import org.openrdf.query.resultio.TupleQueryResultParser; +import org.openrdf.query.resultio.TupleQueryResultParserFactory; + +/** + * A {@link TupleQueryResultParserFactory} for parsers of SPARQL-1.1 JSON Tuple + * Query Results. + * + * @author Peter Ansell + */ +public class BigdataSPARQLResultsJSONParserFactory implements TupleQueryResultParserFactory { + + /** + * Returns {@link TupleQueryResultFormat#JSON}. + */ + public TupleQueryResultFormat getTupleQueryResultFormat() { + return TupleQueryResultFormat.JSON; + } + + /** + * Returns a new instance of {@link SPARQLResultsJSONParser}. + */ + public TupleQueryResultParser getParser() { + return new BigdataSPARQLResultsJSONParser(); + } +} Property changes on: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONParserFactory.java ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Deleted: branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java =================================================================== --- branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-05-30 21:19:57 UTC (rev 8429) +++ branches/SESAME_2_7/bigdata-rdf/src/java/com/bigdata/rdf/rio/json/BigdataSPARQLResultsJSONWriter.java 2014-06-01 21:23:37 UTC (rev 8430) @@ -1,475 +0,0 @@ -/* - * Copyright Aduna (http://www.aduna-software.com/) (c) 1997-2007. - * - * Licensed under the Aduna BSD-style license. - */ -package com.bigdata.rdf.rio.json; - -import info.aduna.io.IndentingWriter; -import info.aduna.text.StringUtil; - -import java.io.BufferedWriter; -import java.io.IOException; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.nio.charset.Charset; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.openrdf.model.BNode; -import org.openrdf.model.Literal; -import org.openrdf.model.Statement; -import org.openrdf.model.URI; -import org.openrdf.model.Value; -import org.openrdf.query.Binding; -import org.openrdf.query.BindingSet; -import org.openrdf.query.TupleQueryResultHandlerException; -import org.openrdf.query.resultio.TupleQueryResultFormat; -import org.openrdf.query.resultio.TupleQueryResultWriter; -import org.openrdf.rio.RDFFormat; -import org.openrdf.rio.RDFHandlerException; -import org.openrdf.rio.RDFWriter; - -import com.bigdata.rdf.model.BigdataBNode; -import com.bigdata.rdf.model.BigdataStatement; - -/** - * A TupleQueryResultWriter that writes query results in the <a - * href="http://www.w3.org/TR/rdf-sparql-json-res/">SPARQL Query Results JSON - * Format</a>. - */ -public class BigdataSPARQLResultsJSONWriter implements TupleQueryResultWriter, RDFWriter { - - /*-----------* - * Variables * - *-----------*/ - - private IndentingWriter writer; - - private boolean firstTupleWritten; - - /*--------------* - * Constructors * - *--------------*/ - - public BigdataSPARQLResultsJSONWriter(OutputStream out) { - this(new OutputStreamWriter(out, Charset.forName("UTF-8"))); - } - - public BigdataSPARQLResultsJSONWriter(Writer w) { - w = new BufferedWriter(w, 1024); - writer = new IndentingWriter(w); - } - - /*---------* - * Methods * - *---------*/ - - /** - * This is the only method that is different from the OpenRDF version. - * I could not subclass their implementation because the IndentingWriter - * is private. - */ - private void writeValue(Value value) - throws IOException, TupleQueryResultHandlerException - { - writer.write("{ "); - - if (value instanceof URI) { - writeKeyValue("type", "uri"); - writer.write(", "); - writeKeyValue("value", ((URI)value).toString()); - } - else if (value instanceof BigdataBNode && - ((BigdataBNode) value).isStatementIdentifier()) { - -// "bindings": [ -// { -// "book": { "type": "uri" , "value": "http://example.org/book/book6" } , -// "title": { "type": "literal" , "value": "Harry Potter and the Half-Blood Prince" } -// } , -// { -// "book": { "type": "sid" , "value": -// { -// "s": { "type": "uri" , "value": "<s>" } , -// "p": { "type": "uri" , "value": "<p>" } , -// "o": { "type": "uri" , "value": "<o>" } -// } -// } -// "title": { "type": "literal" , "value": "Harry Potter and the Deathly Hallows" } -// } , - - final BigdataBNode bnode = (BigdataBNode) value; - final BigdataStatement stmt = bnode.getStatement(); - writeKeyValue("type", "sid"); - writer.write(", "); - writeKey("value"); - openBraces(); - writeKeyValue("s", stmt.getSubject()); - writeComma(); - writeKeyValue("p", stmt.getPredicate()); - writeComma(); - writeKeyValue("o", stmt.getObject()); - - if (stmt.getContext() != null) { - writeComma(); - writeKeyValue("c", stmt.getContext()); - } - closeBraces(); - - } - else if (value instanceof BNode) { - writeKeyValue("type", "bnode"); - writer.write(", "); - writeKeyValue("value", ((BNode)value).getID()); - } - else if (value instanceof Literal) { - Literal lit = (Literal)value; - - if (lit.getDatatype() != null) { - writeKeyValue("type", "typed-literal"); - writer.write(", "); - writeKeyValue("datatype", lit.getDatatype().toString()); - } - else { - writeKeyValue("type", "literal"); - if (lit.getLanguage() != null) { - writer.write(", "); - writeKeyValue("xml:lang", lit.getLanguage()); - } - } - - writer.write(", "); - writeKeyValue("value", lit.getLabel()); - } - else { - throw new TupleQueryResultHandlerException("Unknown Value object type: " + value.getClass()); - } - - writer.write(" }"); - } - - - public final TupleQueryResultFormat getTupleQueryResultFormat() { - return TupleQueryResultFormat.JSON; - } - - public void startQueryResult(List<String> columnHeaders) - throws TupleQueryResultHandlerException - { - try { - openBraces(); - - // Write header - writeKey("head"); - openBraces(); - writeKeyValue("vars", columnHeaders); - closeBraces(); - - writeComma(); - - // Write results - writeKey("results"); - openBraces(); - - writeKey("bindings"); - openArray(); - - firstTupleWritten = false; - } - catch (IOException e) { - throw new TupleQueryResultHandlerException(e); - } - } - - public void endQueryResult() - throws TupleQueryResultHandlerException - { - try { - closeArray(); // bindings array - closeBraces(); // results braces - closeBraces(); // root braces - writer.flush(); - } - catch (IOException e) { - throw new TupleQueryResultHandlerException(e); - } - } - - public void handleStatement(final Statement stmt) throws RDFHandlerException - { - try { - if (firstTupleWritten) { - writeComma(); - } - else { - firstTupleWritten = true; - } - - openBraces(); // start of new solution - - writeKeyValue("s", stmt.getSubject()); - writeComma(); - writeKeyValue("p", stmt.getPredicate()); - writeComma(); - writeKeyValue("o", stmt.getObject()); - if (stmt.getContext() != null) { - writeComma(); - writeKeyValue("c", stmt.getContext()); - } - -// Iterator<Binding> bindingIter = bindingSet.iterator(); -// while (bindingIter.hasNext()) { -// Binding binding = bindingIter.next(); -// -// writeKeyValue(binding.getName(), binding.getValue()); -// -// if (bindingIter.hasNext()) { -// writeComma(); -// } -// } - - closeBraces(); // end solution - - writer.flush(); - } - catch (TupleQueryResultHandlerException e) { - throw new RDFHandlerException(e); - } - catch (IOException e) { - throw new RDFHandlerException(e); - } - } - - public void handleSolution(BindingSet bindingSet) - throws TupleQueryResultHandlerException ... [truncated message content] |
From: <dme...@us...> - 2014-05-30 21:20:02
|
Revision: 8429 http://sourceforge.net/p/bigdata/code/8429 Author: dmekonnen Date: 2014-05-30 21:19:57 +0000 (Fri, 30 May 2014) Log Message: ----------- Descriptions of the VirtualBox Vagrant files. Relocating the dual-provider to reside with the aws.rc dependency. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.dual-provider.tomcat branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/README.txt Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt 2014-05-30 21:14:21 UTC (rev 8428) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt 2014-05-30 21:19:57 UTC (rev 8429) @@ -8,6 +8,7 @@ These variables are set in the aws.rc file for convenience and must be "sourced" prior to running vagrant. + Relevant files: --------------- @@ -20,13 +21,13 @@ Vagrantfile.aws.mapgraph - Builds the MapGraph project from its Subversion archive - on an Amazon Linux AMI with NVIDIA GRID GPU Driver. + on an Amazon Linux AMI with NVIDIA GRID GPU Driver. Vagrantfile.aws.tomcat - Creates an EC2 instance (Ubuntu 12.04 by default) and installs - Tomcat 7 and deploys the Bigdata WAR file as a service. + Tomcat 7 and deploys the Bigdata WAR file as a service. Vagrantfile.aws.tomcat.build-from-svn - Like Vagrantfile.aws.tomcat but the Bigdata WAR - file will be built from a specified subversion repository branch. + file will be built from a specified subversion repository branch. Vagrantfile.aws.nss - Creates an EC2 instance (Ubuntu 12.04 by default) and installs and starts a Bigdata NanoSparqlServer (NSS) Jetty server instance. @@ -34,8 +35,15 @@ Vagrantfile.aws.nss.build-from-svn - Like Vagrantfile.aws.nss but the Bigdata NSS server will be built from a specified subversion repository branch. +Vagrantfile.dual-provider.tomcat - An example file for defining two providers within the + same Vagrantfile that will deploy Tomcat and the Bigdata WAR file + to the virtual machine instance. By default the file will create + a VirtualBox instance. To launch an EC2 instance, specify the + AWS provider as per: + vagrant up --provider=aws + Sample Session -------------- @@ -45,6 +53,8 @@ % cp Vagrantfile.aws.tomcat Vagrantfile % vagrant up +# The bigdata server is now found at the public IP address of the instance: http://<public-ip>:8080/bigdata + # to login to the host: % vagrant ssh Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.dual-provider.tomcat (from rev 8425, branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.dual-provider.tomcat (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.dual-provider.tomcat 2014-05-30 21:19:57 UTC (rev 8429) @@ -0,0 +1,84 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Vagrantfile.dual-provider.tomcat - Install Bigdata under Tomcat with an either a VirtualBox (Default) or AWS Provider +# +# The launch synopsis for this Vagrantfile: +# +# VirtualBox provider: +# +# % vagrant up +# +# AWS provider: +# +# % source ./aws.rc +# % vagrant up --provider=aws +# + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + # + # By default, the VirtualBox provider will be launched. + # Defaults are set for the VirtualBox assumption. + # + config.vm.box = "precise64" + config.vm.hostname = "bigdata" + + config.berkshelf.enabled = true + + config.vm.provider :virtualbox do |vb| + vb.vm.box_url = "http://files.vagrantup.com/precise64.box" + + vb.vm.network :private_network, ip: "33.33.33.10" + end + + # + # The AWS provider will be used specified at the command line as per: + # % vagrant up --provider=aws + # + config.vm.provider :aws do |aws, override| + override.vm.box = "dummy" + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_A'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + + config.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "tomcat", + :build_from_svn => false + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :tomcat => { + :base_version => "7" + } + } + + config.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[bigdata::tomcat]" + ] + + end +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/README.txt 2014-05-30 21:19:57 UTC (rev 8429) @@ -0,0 +1,42 @@ +This directory contains a collection of Vagrantfile samples that demonstrate how +to launch a VirtualBox instance (the vagrant default) and configure it with a +Bigdata server or MapGraph under several useful scenarios. + +The Vagrantfiles are named with a descriptive extension, and must be copied to +the generic "Vagrantfile" to be read by the vagrant program. + + +Relevant files: +--------------- + +Vagrantfile.tomcat - Creates aa VirtualBox instance (Ubuntu 12.04 by default) and installs + Tomcat 7 and deploys the Bigdata WAR file as a service. + +Vagrantfile.tomcat.build-from-svn - Like Vagrantfile.tomcat but the Bigdata WAR + file will be built from a specified subversion repository branch. + +Vagrantfile.nss - Creates an VirtualBox instance (Ubuntu 12.04 by default) and installs + and starts a Bigdata NanoSparqlServer (NSS) Jetty server instance. + +Vagrantfile.nss.build-from-svn - Like Vagrantfile.nss but the Bigdata NSS server + will be built from a specified subversion repository branch. + + +Sample Session +-------------- + +% cp Vagrantfile.tomcat Vagrantfile +% vagrant up + +# The bigdata server is now found at: http://33.33.33.10:8080/bigdata + + +# to login to the host: +% vagrant ssh + +# to terminate the EC2 instance: +% vagrant destroy + +% cp Vagrantfile.tomcat.build-from-svn Vagrantfile +# edit the Vagrantfile and set the :svn_branch variable as desired +% vagrant up Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat 2014-05-30 21:14:21 UTC (rev 8428) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat 2014-05-30 21:19:57 UTC (rev 8429) @@ -1,84 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# Vagrantfile.dual-provider.tomcat - Install Bigdata under Tomcat with an either a VirtualBox (Default) or AWS Provider -# -# The launch synopsis for this Vagrantfile: -# -# VirtualBox provider: -# -# % vagrant up -# -# AWS provider: -# -# % source ./aws.rc -# % vagrant up --provider=aws -# - -Vagrant.require_plugin "vagrant-berkshelf" - -Vagrant.configure("2") do |config| - # - # By default, the VirtualBox provider will be launched. - # Defaults are set for the VirtualBox assumption. - # - config.vm.box = "precise64" - config.vm.hostname = "bigdata" - - config.berkshelf.enabled = true - - config.vm.provider :virtualbox do |vb| - vb.vm.box_url = "http://files.vagrantup.com/precise64.box" - - vb.vm.network :private_network, ip: "33.33.33.10" - end - - # - # The AWS provider will be used specified at the command line as per: - # % vagrant up --provider=aws - # - config.vm.provider :aws do |aws, override| - override.vm.box = "dummy" - aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] - aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] - aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] - - aws.ami = ENV['AWS_AMI'] - - aws.region = ENV['AWS_REGION'] - aws.instance_type = ENV['AWS_INSTANCE_TYPE'] - aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] - - aws.tags = { - 'Name' => ENV['BIGDATA_HA_HOST_A'] - } - - override.ssh.username = ENV['AWS_AMI_USERNAME'] - override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] - end - - - config.vm.provision :chef_solo do |chef| - chef.json = { - :bigdata => { - :install_flavor => "tomcat", - :build_from_svn => false - }, - :java => { - :install_flavor => "oracle", - :jdk_version => "7", - :oracle => { 'accept_oracle_download_terms' => true } - }, - :tomcat => { - :base_version => "7" - } - } - - config.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" - - chef.run_list = [ - "recipe[bigdata::tomcat]" - ] - - end -end This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-30 21:14:26
|
Revision: 8428 http://sourceforge.net/p/bigdata/code/8428 Author: tobycraig Date: 2014-05-30 21:14:21 +0000 (Fri, 30 May 2014) Log Message: ----------- Moved namespace shortcuts back to right side Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css 2014-05-30 21:13:07 UTC (rev 8427) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css 2014-05-30 21:14:21 UTC (rev 8428) @@ -192,6 +192,11 @@ clear: both; } +.namespace-shortcuts { + float: right; + margin-bottom: 20px; +} + #large-file-message { display: none; margin: 5px 0; This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-30 21:13:12
|
Revision: 8427 http://sourceforge.net/p/bigdata/code/8427 Author: tobycraig Date: 2014-05-30 21:13:07 +0000 (Fri, 30 May 2014) Log Message: ----------- Added download namespace properties functionality Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-30 21:01:10 UTC (rev 8426) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-30 21:13:07 UTC (rev 8427) @@ -107,7 +107,7 @@ } else { use = '<a href="#" class="use-namespace">Use</a>'; } - $('#namespaces-list').append('<li data-name="' + title + '" data-url="' + url + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> - <a href="/bigdata/namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); + $('#namespaces-list').append('<li data-name="' + title + '" data-url="' + url + '">' + titleText + ' - ' + use + ' - <a href="#" class="delete-namespace">Delete</a> - <a href="#" class="namespace-properties">Properties</a> (Download <a href="/bigdata/namespace/' + title + '/properties" download="' + title + '.xml">XML</a>/<a href="#" class="namespace-properties-java">Java</a>) - <a href="#" class="clone-namespace">Clone</a> - <a href="/bigdata/namespace/' + title + '/sparql" class="namespace-service-description">Service Description</a></li>'); } $('.use-namespace').click(function(e) { e.preventDefault(); @@ -121,6 +121,14 @@ e.preventDefault(); getNamespaceProperties($(this).parent().data('name')); }); + $('.namespace-properties-java').click(function(e) { + e.preventDefault(); + getNamespaceProperties($(this).parent().data('name'), 'java'); + }); + $('.clone-namespace').click(function(e) { + e.preventDefault(); + cloneNamespace($(this).parent().data('name')); + }); $('.namespace-service-description').click(function(e) { return confirm('This can be an expensive operation. Proceed anyway?'); }); @@ -165,15 +173,25 @@ } } -function getNamespaceProperties(namespace) { - $('#namespace-properties h1').html(namespace); - $('#namespace-properties table').empty(); - $('#namespace-properties').show(); +function getNamespaceProperties(namespace, download) { var url = '/bigdata/namespace/' + namespace + '/properties'; + if(!download) { + $('#namespace-properties h1').html(namespace); + $('#namespace-properties table').empty(); + $('#namespace-properties').show(); + } $.get(url, function(data) { + var java = ''; $.each(data.getElementsByTagName('entry'), function(i, entry) { - $('#namespace-properties table').append('<tr><td>' + entry.getAttribute('key') + '</td><td>' + entry.textContent + '</td></tr>'); + if(download) { + java += entry.getAttribute('key') + '=' + entry.textContent + '\n'; + } else { + $('#namespace-properties table').append('<tr><td>' + entry.getAttribute('key') + '</td><td>' + entry.textContent + '</td></tr>'); + } }); + if(download) { + downloadFile(java, 'text/x-java-properties', this.url.split('/')[3] + '.properties'); + } }); } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-30 21:01:19
|
Revision: 8426 http://sourceforge.net/p/bigdata/code/8426 Author: dmekonnen Date: 2014-05-30 21:01:10 +0000 (Fri, 30 May 2014) Log Message: ----------- Descriptions of the AWS Vagrant files. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/README.txt 2014-05-30 21:01:10 UTC (rev 8426) @@ -0,0 +1,57 @@ +This directory contains a collection of Vagrantfile samples that demonstrate how +to launch an EC2 instance and configure it with a Bigdata server or MapGraph +under several useful scenarios. + +The Vagrantfiles are named with a descriptive extension, and must be copied to +the generic "Vagrantfile" to be read by the vagrant program. The Vagrantfile +in turn depends on AWS access credentials set in shell environment variables. +These variables are set in the aws.rc file for convenience and must be "sourced" +prior to running vagrant. + +Relevant files: +--------------- + +aws.rc - Set your AWS access credentials here, then: + +% source ./aws.rc + +This will export your AWS credentials into environment variables that the Vagrant +file will apply to create your EC2 instance. + + +Vagrantfile.aws.mapgraph - Builds the MapGraph project from its Subversion archive + on an Amazon Linux AMI with NVIDIA GRID GPU Driver. + +Vagrantfile.aws.tomcat - Creates an EC2 instance (Ubuntu 12.04 by default) and installs + Tomcat 7 and deploys the Bigdata WAR file as a service. + +Vagrantfile.aws.tomcat.build-from-svn - Like Vagrantfile.aws.tomcat but the Bigdata WAR + file will be built from a specified subversion repository branch. + +Vagrantfile.aws.nss - Creates an EC2 instance (Ubuntu 12.04 by default) and installs + and starts a Bigdata NanoSparqlServer (NSS) Jetty server instance. + +Vagrantfile.aws.nss.build-from-svn - Like Vagrantfile.aws.nss but the Bigdata NSS server + will be built from a specified subversion repository branch. + + + +Sample Session +-------------- + +# edit aws.rc with your favorite editor, this only needs to be done once, then + +% source aws.rc +% cp Vagrantfile.aws.tomcat Vagrantfile +% vagrant up + +# to login to the host: +% vagrant ssh + +# to terminate the EC2 instance: +% vagrant destroy + +% cp Vagrantfile.aws.tomcat.build-from-svn Vagrantfile +# edit the Vagrantfile and set the :svn_branch variable as desired +% vagrant up + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-27 19:52:52
|
Revision: 8425 http://sourceforge.net/p/bigdata/code/8425 Author: dmekonnen Date: 2014-05-27 19:52:49 +0000 (Tue, 27 May 2014) Log Message: ----------- removing default.rb which got out of its directory (should be under attributes/ only) Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb 2014-05-27 19:43:21 UTC (rev 8424) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb 2014-05-27 19:52:49 UTC (rev 8425) @@ -1,157 +0,0 @@ -# -# Where bigdata resource files will be installed: -# -default['bigdata'][:home] = "/var/lib/bigdata" - -# -# Who runs bigdata? This is applicable to NSS and HA installs only: -# -default['bigdata'][:user] = "bigdata" -default['bigdata'][:group] = "bigdata" -default['bigdata'][:base_version] = "1.3.1" - -# -# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory: -# -default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code" - - -case node['bigdata'][:install_flavor] -when "nss" - # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer: - default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz" - - # Where the jetty resourceBase is defined: - default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" - - # Where the log files will live: - default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log" - - # Where the bigdata-ha.jnl file will live: - default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data" - - # The subversion branch to use when building from source: - if node['bigdata'][:build_from_svn] - default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" - end -when "tomcat" - # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7: - default['tomcat'][:base_version] = 7 - - # JRE options options to set for Tomcat, the following is strongly recommended: - default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC" - - # A SourceForge URL to use for downloading the bigdata.war file: - default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war" - - # Where the bigdata contents reside under Tomcat: - default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" - - # Where the log4j.properites file can be found: - default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties" - - # Where the bigdata-ha.jnl file will live: - default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" - - # Where the log files will live: - default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" - - # The subversion branch to use when building from source: - if node['bigdata'][:build_from_svn] - default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" - end -when "ha" - # The URL to the bigdataHA release bundle. - default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz" - - # The subversion branch to use when building from source: - if node['bigdata'][:build_from_svn] - # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" - default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" - end - - # Where the bigdata-ha.jnl file will live: - default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" - - # Where the log files will live: - default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" - - # Where the jetty resourceBase is defined: - default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" - - # Name of the federation of services (controls the Apache River GROUPS). - default['bigdata'][:fedname] = 'my-cluster-1' - - # Name of the replication cluster to which this HAJournalServer will belong. - default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' - - # Where to find the Apache River service registrars (can also use multicast). - default['bigdata'][:river_locator1] = 'bigdataA' - default['bigdata'][:river_locator2] = 'bigdataB' - default['bigdata'][:river_locator3] = 'bigdataC' - - # Where to find the Apache Zookeeper ensemble. - default['bigdata'][:zk_server1] = 'bigdataA' - default['bigdata'][:zk_server2] = 'bigdataB' - default['bigdata'][:zk_server3] = 'bigdataC' -end - - -################################################################################### -# -# Set the RWStore.properties attributes that apply for all installation scenarios. -# -################################################################################### - -# Where the RWStore.properties file can be found: -default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" - - -default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW" - -# Setup for the RWStore recycler rather than session protection. -default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1" - -default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000" -default['bigdata']['btree.BTree.branchingFactor'] = "128" - -# 200M initial extent. -default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200" -default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200" - -# Setup for QUADS mode without the full text index. -default['bigdata']['rdf.sail.truthMaintenance'] = "false" -default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false" -default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false" -default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false" -default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms" - -# Bump up the branching factor for the lexicon indices on the default kb. -default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400" - -# Bump up the branching factor for the statement indices on the default kb. -default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024" -default['bigdata']['rdf.sail.bufferCapacity'] = "100000" - -# -# Bigdata supports over a hundred properties and only the most commonly configured -# are set here as Chef attributes. Any number of additional properties may be -# configured by Chef. To do so, add the desired property in this (attributes/default.rb) -# file as well as in the templates/default/RWStore.properties.erb file. The -# "vocabularyClass" property (below) for inline URIs is used as example additional -# entry: -# -# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass" - - -################################################################# -# -# The following attributes are defaults for the MapGraph recipe. -# -################################################################# - -# The subversion branch to use when building from source: -default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk" - -# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory: -default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code" This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-27 19:43:27
|
Revision: 8424 http://sourceforge.net/p/bigdata/code/8424 Author: dmekonnen Date: 2014-05-27 19:43:21 +0000 (Tue, 27 May 2014) Log Message: ----------- Adding lost README.txt file for HA3 launcher scripts Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/README.txt Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/README.txt 2014-05-27 19:43:21 UTC (rev 8424) @@ -0,0 +1,96 @@ +REQUIREMENTS +============ +This Vagrant resource has been tested against the following versions of required resources: + + Vagrant: 1.4.3 + Vagrant Plugins: + * nugrant (1.4.2) + * vagrant-aws (0.4.1) + * vagrant-berkshelf (1.3.7) + + Chef: 11.10.4 +Berkshelf: 2.0.10 + Python: 2.7.5 + Ruby: 1.9.3p448 (2013-06-27 revision 41675) [x86_64-darwin12.3.0] + Boto: 2.27.0 + + + +CONFIGURATION +============= + +AWS +--- +Your organization's AWS access credentials are essential to launching the cluster. Please retreive them before attempting to bring up the cluster: + + * AWS Access Key ID + * AWS Secreet Access Key + * AWS Keypair Name + * The SSH Private Key file corresponding to the keypair + * AWS Security Group for the cluster nodes to join [must minimally allow public TCP access to ports 22 and 8080] + + +All AWS settings reside in the "aws.rc" file. You must edit this file and set AWS values accordingly. + + +Vagrant +------- +Vagrant will need the required plugins (see above), if not already installed, they may be added with: + + % vagrant plugin install nugrant + % vagrant plugin install vagrant-aws + % vagrant plugin install vagrant-berkshelf + + +Boto: AWS API +------------- +The "Boto" python library for the AWS API must be installed in order to instantiate the cluster. If not already installed: + + % sudo pip install pycrypto + % sudo pip install boto + +alternately: + + % sudo easy_install boto + + +If while running the python scripts the error message appears "ImportError: No module named boto", you will need to set the +PYTHONPATH environment variable, for example: + + % export PYTHONPATH=/usr/local/lib/python2.7/site-packages + + + +LAUNCHING BIGDATA HA CLUSTER +============================ + +The cluster may be brought up with: + + % ./bin/createCluster.sh + +Launching the cluster may take up to 10 minutes. When complete the cluster creation script will present + + +SSH to a specific node: + + % source aws.rc # all vagrant commands will depend on exported AWS environment variables + % vagrant ssh bigdataA + + +Stop & Start the cluster: + + % vagrant halt + % vagrant up + + +Terminating the cluster: + + % vagrant destroy + + +Trouble Shooting +---------------- +If a host is slow to startup there can be an initial connection failure. For example, the bigdataA "status" page may not +appear if bigdataB or bigdataC is slow to start up. In this case log into bigdataA ("vagrant ssh bigdataA") and restart +the service ("sudo /etc/init.d/bigdataA restart") and the host shall connect as expected. + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-27 18:29:31
|
Revision: 8423 http://sourceforge.net/p/bigdata/code/8423 Author: dmekonnen Date: 2014-05-27 18:29:25 +0000 (Tue, 27 May 2014) Log Message: ----------- Set RWSTore.properties path correctly for HA Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-27 13:28:02 UTC (rev 8422) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-27 18:29:25 UTC (rev 8423) @@ -2,8 +2,6 @@ class Bigdata < Formula homepage "http://bigdata.com/" -# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz" -# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2" url "http://bigdata.com/deploy/bigdata-1.3.1.tgz" sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79" Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-27 13:28:02 UTC (rev 8422) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-27 18:29:25 UTC (rev 8423) @@ -15,6 +15,8 @@ # default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code" +# Where the RWStore.properties file can be found: +default['bigdata'][:properties] = node['bigdata'][:home] + "/RWStore.properties" case node['bigdata'][:install_flavor] when "nss" @@ -45,7 +47,7 @@ default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war" # Where the bigdata contents reside under Tomcat: - default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" + default['bigdata'][:web_home] = node['tomcat'][:webapp_dir] + "/bigdata" # Where the log4j.properites file can be found: default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties" @@ -79,6 +81,9 @@ # Where the jetty resourceBase is defined: default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + # Where the RWStore.properties file can be found: + default['bigdata'][:properties] = node['bigdata'][:jetty_dir] + "/WEB-INF/RWStore.properties" + # Name of the federation of services (controls the Apache River GROUPS). default['bigdata'][:fedname] = 'my-cluster-1' @@ -103,10 +108,7 @@ # ################################################################################### -# Where the RWStore.properties file can be found: -default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" - default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW" # Setup for the RWStore recycler rather than session protection. Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-27 13:28:02 UTC (rev 8422) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-27 18:29:25 UTC (rev 8423) @@ -167,7 +167,7 @@ # execute "set absolute path to RWStore.properties" do cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" - command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:jetty_dir]}/WEB-INF/RWStore.properties|' web.xml" + command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:properties]}|' web.xml" end # This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-27 13:28:11
|
Revision: 8422 http://sourceforge.net/p/bigdata/code/8422 Author: thompsonbry Date: 2014-05-27 13:28:02 +0000 (Tue, 27 May 2014) Log Message: ----------- - Declared an interface that exposes a post-constructor Callable to initialize a service. This will be used for the SnapshotManager, HALogNexus, and HAJournal. - Modified the SnapshotManager to use a parallel scan and the new IServiceInit interface. - Added test to verify that snapshots are located after a service restart. - Defined, exposed, and tested a variety of constants for the CommitCounterUtility. These were added to support a parallel scan of the files in a leaf directory. - Declared a "startupThreads" parameter that controls the number of parallel scans for the HAJournal startup processes. Snapshot test suites are green locally. See #775 (HAJournal.start() - optimization) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/AbstractJournal.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -1752,6 +1752,7 @@ } + @Override final public File getFile() { final IBufferStrategy tmp = getBufferStrategy(); @@ -1915,6 +1916,7 @@ * @exception IllegalStateException * if the journal is open. */ + @Override public void deleteResources() { if (isOpen()) @@ -2307,12 +2309,14 @@ } + @Override final public UUID getUUID() { return journalMetadata.get().getUUID(); } + @Override final public IResourceMetadata getResourceMetadata() { return journalMetadata.get(); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/journal/CommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -37,6 +37,17 @@ /** * Utility class for operations on files that are named using a commit counter. + * <p> + * The commit counter based files are arranged in a heirarchial directory + * structure with 3 digits per directory and 7 directory levels. These levels + * are labeled with depths <code>[0..6]</code>. The root directory is at depth + * ZERO (0). Each directory contains up to <code>1000</code> children. The + * children in the non-leaf directories are subdirectories labeled + * <code>0..999</code>. The leaf directories are at depth SIX (6). Leaf + * directories contain files. Each file in a leaf directory is labeled with a + * <code>21</code> digit base name and some purpose specific file extension. + * Each such file has data for the specific commit point encoded by the basename + * of the file. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ @@ -46,6 +57,89 @@ .getLogger(CommitCounterUtility.class); /** + * The number of base-10 digits per directory level. This allows children + * having labels <code>000...999</code>. Thus there are <code>1000</code> + * children per directory. + */ + private static final int DIGITS_PER_DIR = 3; + + /** The number of files per directory. */ + private static final int FILES_PER_DIR = 1000; + + /** The depth of the root directory. */ + private static final int ROOT_DIR_DEPTH = 0; + + /** The depth of a leaf directory. */ + private static final int LEAF_DIR_DEPTH = 6; + + /** + * The #of digits (21) in the base file name for a commit counter as + * formatted by {@link #getCommitCounterStr(long)}. + * <p> + * Note: 21 := (leafDirDepth+1) * digitsPerDir + */ + private static final int BASENAME_DIGITS = 21; + + /** + * The {@link Formatter} string that is used to generate the base name of + * the files in the leaf directories. This string represents the commit + * counter value with leading zeros. The leading zeros are relied upon to + * impose an ordering over the base names of the files using a sort. + */ + private static final String FORMAT_STR = "%0" + BASENAME_DIGITS + "d"; + + /** + * The #of digits (21) in the base file name for a commit counter as + * formatted by {@link #getCommitCounterStr(long)}. + * <p> + * Note: 21 := (leafDirDepth+1) * digitsPerDir + */ + public static int getBasenameDigits() { + + return BASENAME_DIGITS; + + } + + /** + * The number of base-10 digits per directory level ( + * {@value #DIGITS_PER_DIR}). This allows children having labels + * <code>000...999</code>. Thus there are <code>1000</code> children per + * directory. + */ + public static int getDigitsPerDirectory() { + + return DIGITS_PER_DIR; + + } + + /** + * The number of files per directory ({@value #FILES_PER_DIR}). + */ + public static int getFilesPerDirectory() { + + return FILES_PER_DIR; + + } + + /** + * The depth of the root directory ({@value #ROOT_DIR_DEPTH}). + */ + public static int getRootDirectoryDepth() { + + return ROOT_DIR_DEPTH; + + } + + /** + * The depth of a leaf directory ({@value #LEAF_DIR_DEPTH}). + */ + public static int getLeafDirectoryDepth() { + + return LEAF_DIR_DEPTH; + + } + + /** * Return the name of the {@link File} associated with the commitCounter. * * @param dir @@ -79,15 +173,11 @@ * Now figure out the recursive directory name. */ File t = dir; + + for (int i = 0; i < (BASENAME_DIGITS - DIGITS_PER_DIR); i += DIGITS_PER_DIR) { - if (true) { + t = new File(t, basename.substring(i, i + DIGITS_PER_DIR)); - for (int i = 0; i < (21 - 3); i += 3) { - - t = new File(t, basename.substring(i, i + 3)); - - } - } final File file = new File(t, basename + ext); @@ -108,11 +198,11 @@ */ public static String getCommitCounterStr(final long commitCounter) { - final StringBuilder sb = new StringBuilder(21); + final StringBuilder sb = new StringBuilder(BASENAME_DIGITS); final Formatter f = new Formatter(sb); - f.format("%021d", commitCounter); + f.format(FORMAT_STR, commitCounter); f.flush(); f.close(); Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IServiceInit.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -0,0 +1,46 @@ +/** + +Copyright (C) SYSTAP, LLC 2006-2007. All rights reserved. + +Contact: + SYSTAP, LLC + 4501 Tower Road + Greensboro, NC 27410 + lic...@bi... + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/* + * Created on May 27th, 2014 + */ +package com.bigdata.service; + +import java.util.concurrent.Callable; + +/** + * Interface for post-constructor initialization. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + * + * @param <T> + * The generic type of the object to which the initialization task + * will be evaluated. + */ +public interface IServiceInit<T> { + + /** + * Return a task that must be used to initialize the service. + */ + Callable<T> init(); +} Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/test/com/bigdata/journal/TestCommitCounterUtility.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -42,10 +42,35 @@ public TestCommitCounterUtility() { } - public TestCommitCounterUtility(String name) { + public TestCommitCounterUtility(final String name) { super(name); } + /** + * Verify the value of specific constants. These constants must not be + * modified since they define the hierarchical structure of the durable data + * and a relied upon to generate and parse the fully qualified names of the + * files within a managed commit counter based directory system. + */ + public void test_constants() { + + assertEquals("filesPerDirectory", 1000, + CommitCounterUtility.getFilesPerDirectory()); + + assertEquals("digitsPerDirectory", 3, + CommitCounterUtility.getDigitsPerDirectory()); + + assertEquals("basenameDigits", 21, + CommitCounterUtility.getBasenameDigits()); + + assertEquals("rootDirectoryDepth", 0, + CommitCounterUtility.getRootDirectoryDepth()); + + assertEquals("leafDirectoryDepth", 6, + CommitCounterUtility.getLeafDirectoryDepth()); + + } + public void test01() { final File dir = new File("/tmp"); Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournal.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -39,6 +39,8 @@ import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.locks.Lock; @@ -420,6 +422,16 @@ // Snapshot manager. snapshotManager = new SnapshotManager(server, this, config); + try { + getExecutorService().submit(snapshotManager.init()).get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); // TODO Do not wrap. + } catch (CancellationException e) { + throw e; + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -383,6 +383,19 @@ String DEFAULT_SNAPSHOT_DIR = "snapshot"; /** + * The number of threads that will be used for a parallel scan of the + * files in the {@link #HA_LOG_DIR} and {@link #SNAPSHOT_DIR} in order + * to accelerate the service start. The minimum is ONE (1). The default + * is {@value #DEFAULT_STARTUP_THREADS}. + * + * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start() + * (optimization) </a> + */ + String STARTUP_THREADS = "startupThreads"; + + int DEFAULT_STARTUP_THREADS = 20; + + /** * The policy that specifies when a new snapshot will be taken. The * decision to take a snapshot is a local decision and the snapshot is * assumed to be written to local disk. However, offsite replication of @@ -871,6 +884,36 @@ * {@inheritDoc} * <p> * Note: called from {@link AbstractServer#run()} + * + * FIXME We should be able to start the NSS while still reading the HALog + * files from the disk. The action to start the {@link HAQuorumService} + * should await a {@link Future} for the journal start. Thus, the + * {@link HAJournal} start needs to be turned into a {@link Callable} or + * {@link Runnable}. + * <p> + * In fact, the journal open is very fast. The slow part is the building an + * index over the HALogs and (to a lesser extent) over the snapshots. Those + * index builds can run in parallel, but we need to have a critical section + * in which we check some necessary conditions, especially whether the last + * HALog is valid. + * <p> + * We need to push a start() computation into both the {@link HALogNexus} + * and the {@link SnapshotManager}. This could be done with an interface + * that is also shared by the {@link HAJournal}. The interface could provide + * some reporting on the startup process, but most critical is that it + * provides a {@link Future} for evaluating that process. + * <p> + * The {@link Future} can evaluate to the outcome of that startup procedure. + * <p> + * The startup procedure should use multiple threads (or async IO) to reduce + * the startup latency. It could use the executor on the journal for this. + * <p> + * We could parallelize the HALog and snapshot startup then enter a critical + * section in which we validate the consistency of those resources with + * respect to the HAJournal's current root block. + * + * @see <a href="http://trac.bigdata.com/ticket/775" > HAJournal start() + * (optimization) </a> */ @Override protected void startUpHook() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HALogNexus.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -128,11 +128,11 @@ */ volatile IHAWriteMessage lastLiveHAWriteMessage = null; - /* - * Set to protect log files against deletion while a digest is - * computed. This is checked by deleteHALogs. + /** + * Set to protect log files against deletion while a digest is computed. + * This is checked by {@link #deleteHALogs(long, long)}. */ - private final AtomicInteger logAccessors = new AtomicInteger(); + private final AtomicInteger logAccessors = new AtomicInteger(); /** * Filter visits all HALog files <strong>except</strong> the current HALog @@ -1042,23 +1042,26 @@ /** * Protects logs from removal while a digest is being computed - * @param earliestDigest */ void addAccessor() { - if (logAccessors.incrementAndGet() == 1) { - if (log.isInfoEnabled()) - log.info("Access protection added"); - } + if (logAccessors.incrementAndGet() == 1) { + if (log.isDebugEnabled()) + log.debug("Access protection added"); + } } - + /** * Releases current protection against log removal */ void releaseAccessor() { - if (logAccessors.decrementAndGet() == 0) { - if (log.isInfoEnabled()) - log.info("Access protection removed"); - } + final long tmp; + if ((tmp = logAccessors.decrementAndGet()) == 0) { + if (log.isDebugEnabled()) + log.debug("Access protection removed"); + } + if (tmp < 0) + throw new RuntimeException("Decremented to a negative value: " + + tmp); } /** Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/SnapshotManager.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -36,10 +36,13 @@ import java.nio.ByteBuffer; import java.security.DigestException; import java.security.MessageDigest; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.zip.GZIPInputStream; @@ -73,17 +76,19 @@ import com.bigdata.quorum.Quorum; import com.bigdata.quorum.QuorumException; import com.bigdata.rawstore.Bytes; +import com.bigdata.service.IServiceInit; import com.bigdata.striterator.Resolver; import com.bigdata.striterator.Striterator; import com.bigdata.util.ChecksumError; import com.bigdata.util.ChecksumUtility; +import com.bigdata.util.concurrent.LatchedExecutor; /** * Class to manage the snapshot files. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> */ -public class SnapshotManager { +public class SnapshotManager implements IServiceInit<Void> { private static final Logger log = Logger.getLogger(SnapshotManager.class); @@ -185,6 +190,11 @@ private final IRestorePolicy restorePolicy; /** + * @see HAJournalServer.ConfigurationOptions#STARTUP_THREADS + */ + private final int startupThreads; + + /** * An in memory index over the last commit time of each snapshot. This is * populated when the {@link HAJournal} starts from the file system and * maintained as snapshots are taken or destroyed. @@ -299,62 +309,241 @@ IRestorePolicy.class, // HAJournalServer.ConfigurationOptions.DEFAULT_RESTORE_POLICY); + { + + startupThreads = (Integer) config + .getEntry( + HAJournalServer.ConfigurationOptions.COMPONENT, + HAJournalServer.ConfigurationOptions.STARTUP_THREADS, + Integer.TYPE, + HAJournalServer.ConfigurationOptions.DEFAULT_STARTUP_THREADS); + + if (startupThreads <= 0) { + throw new ConfigurationException( + HAJournalServer.ConfigurationOptions.STARTUP_THREADS + + "=" + startupThreads + " : must be GT ZERO"); + } + + } + snapshotIndex = SnapshotIndex.createTransient(); - /* - * Delete any temporary files that were left lying around in the - * snapshot directory. - */ - CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, - getSnapshotDir(), TEMP_FILE_FILTER); + } - // Make sure the snapshot directory exists. - ensureSnapshotDirExists(); + @Override + public Callable<Void> init() { - // Populate the snapshotIndex from the snapshotDir. - populateIndexRecursive(getSnapshotDir(), SNAPSHOT_FILTER); + return new InitTask(); - // Initialize the snapshot policy. It can self-schedule. - snapshotPolicy.init(journal); - } - private void ensureSnapshotDirExists() throws IOException { + /** + * Task that is used to initialize the {@link SnapshotManager}. + * + * @author <a href="mailto:tho...@us...">Bryan Thompson</a> + */ + private class InitTask implements Callable<Void> { - if (!snapshotDir.exists()) { + @Override + public Void call() throws Exception { - // Create the directory. - if (!snapshotDir.mkdirs()) - throw new IOException("Could not create directory: " - + snapshotDir); + lock.lock(); + + try { + + doRunWithLock(); + + // Done. + return (Void) null; + + } finally { + + lock.unlock(); + + } + + } + private void doRunWithLock() throws IOException, InterruptedException, + ExecutionException { + + if (log.isInfoEnabled()) + log.info("Starting cleanup."); + + /* + * Delete any temporary files that were left lying around in the + * snapshot directory. + * + * TODO This may be relatively lengthy. It would be better to + * combine this with the scan in which we read the root blocks and + * index the snapshots. However, this will require another refactor + * of the parallel scan logic. For now, I am merely reporting out + * the times for these different scans so I can get a better sense + * of the latencies involved. + */ + CommitCounterUtility.recursiveDelete(false/* errorIfDeleteFails */, + getSnapshotDir(), TEMP_FILE_FILTER); + + // Make sure the snapshot directory exists. + ensureSnapshotDirExists(); + + if (log.isInfoEnabled()) + log.info("Starting scan."); + + final LatchedExecutor executor = new LatchedExecutor( + journal.getExecutorService(), startupThreads); + + // Populate the snapshotIndex from the snapshotDir. + populateIndexRecursive(// + executor,// + getSnapshotDir(), // + SNAPSHOT_FILTER, // + 0 // depth@root + ); + + if (log.isInfoEnabled()) + log.info("Starting policy."); + + // Initialize the snapshot policy. It can self-schedule. + snapshotPolicy.init(journal); + + if (log.isInfoEnabled()) + log.info("Done."); + } - } - - /** - * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex} - * from the root blocks in snapshot files found in that directory. - * - * @throws IOException - */ - private void populateIndexRecursive(final File f, - final FileFilter fileFilter) throws IOException { + /** + * Scans the {@link #snapshotDir} and populates the {@link #snapshotIndex} + * from the root blocks in snapshot files found in that directory. + * + * @throws IOException + * @throws ExecutionException + * @throws InterruptedException + */ + private void populateIndexRecursive(final LatchedExecutor executor, + final File f, final FileFilter fileFilter, final int depth) + throws IOException, InterruptedException, ExecutionException { - if (f.isDirectory()) { + if (depth == CommitCounterUtility.getLeafDirectoryDepth()) { - final File[] children = f.listFiles(fileFilter); + /* + * Leaf directory. + */ + + final File[] children = f.listFiles(fileFilter); - for (int i = 0; i < children.length; i++) { + /* + * Setup tasks for parallel threads to read the commit record from + * each file. + */ + final List<FutureTask<SnapshotRecord>> futures = new ArrayList<FutureTask<SnapshotRecord>>( + children.length); - populateIndexRecursive(children[i], fileFilter); + for (int i = 0; i < children.length; i++) { + final File child = children[i]; + + final FutureTask<SnapshotRecord> ft = new FutureTask<SnapshotRecord>( + + new Callable<SnapshotRecord>() { + + @Override + public SnapshotRecord call() throws Exception { + + return getSnapshotRecord(child); + + } + + }); + + futures.add(ft); + + } + + try { + + /* + * Schedule all futures. + */ + for (FutureTask<SnapshotRecord> ft : futures) { + + executor.execute(ft); + + } + + /* + * Await futures, obtaining snapshot records for the current + * leaf directory. + */ + final List<SnapshotRecord> records = new ArrayList<SnapshotRecord>( + children.length); + + for (int i = 0; i < children.length; i++) { + + final Future<SnapshotRecord> ft = futures.get(i); + + final SnapshotRecord r = ft.get(); + + records.add(r); + + } + + // Add all records in the caller's thread. + for (SnapshotRecord r : records) { + + snapshotIndex.add(r); + + } + + } finally { + + /* + * Ensure tasks are terminated. + */ + + for (Future<SnapshotRecord> ft : futures) { + + ft.cancel(true/* mayInterruptIfRunning */); + + } + + } + + } else if (f.isDirectory()) { + + /* + * Sequential recursion into a child directory. + */ + + final File[] children = f.listFiles(fileFilter); + + for (int i = 0; i < children.length; i++) { + + final File child = children[i]; + + populateIndexRecursive(executor, child, fileFilter, depth + 1); + + } + + } else { + + log.warn("Ignoring file in non-leaf directory: " + f); + } - } else { + } - addSnapshot(f); + } + + private void ensureSnapshotDirExists() throws IOException { + if (!snapshotDir.exists()) { + + // Create the directory. + if (!snapshotDir.mkdirs()) + throw new IOException("Could not create directory: " + + snapshotDir); + } } @@ -434,7 +623,26 @@ * if the file can not be read. * @throws ChecksumError * if there is a checksum problem with the root blocks. + */ + private void addSnapshot(final File file) throws IOException { + + snapshotIndex.add(getSnapshotRecord(file)); + + } + + /** + * Create a {@link SnapshotRecord} from a file. * + * @param file + * The snapshot file. + * + * @throws IllegalArgumentException + * if argument is <code>null</code>. + * @throws IOException + * if the file can not be read. + * @throws ChecksumError + * if there is a checksum problem with the root blocks. + * * TODO If the root blocks are bad, then this will throw an * IOException and that will prevent the startup of the * HAJournalServer. However, if we start up the server with a @@ -449,8 +657,8 @@ * with that HALog file unless it also happens to correspond to * a snapshot. */ - private void addSnapshot(final File file) throws IOException { - + private SnapshotRecord getSnapshotRecord(final File file) throws IOException { + if (file == null) throw new IllegalArgumentException(); @@ -459,10 +667,10 @@ final long sizeOnDisk = file.length(); - snapshotIndex.add(new SnapshotRecord(currentRootBlock, sizeOnDisk)); - + return new SnapshotRecord(currentRootBlock, sizeOnDisk); + } - + /** * Remove an snapshot from the file system and the {@link #snapshotIndex}. * @@ -1164,6 +1372,7 @@ } + @Override public IHASnapshotResponse call() throws Exception { // The quorum token (must remain valid through this operation). Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/AbstractHA3JournalServerTestCase.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -85,6 +85,7 @@ import com.bigdata.jini.start.process.ProcessHelper; import com.bigdata.jini.util.ConfigMath; import com.bigdata.jini.util.JiniUtil; +import com.bigdata.journal.CommitCounterUtility; import com.bigdata.journal.IRootBlockView; import com.bigdata.journal.StoreState; import com.bigdata.journal.jini.ha.HAJournalTest.HAGlueTest; @@ -1529,6 +1530,7 @@ } // Quorum that can be used to monitor the distributed quorum state. + @SuppressWarnings({ "unchecked", "rawtypes" }) final Quorum<HAGlue, QuorumClient<HAGlue>> quorum = (Quorum) new ZKQuorumImpl<HAGlue, ZKQuorumClient<HAGlue>>( replicationFactor);//, zka, acl); @@ -3154,6 +3156,7 @@ awaitNSSAndHAReady(haGlue); // Wait until self-reports RunMet. assertCondition(new Runnable() { + @Override public void run() { try { final String extendedRunState = haGlue.getExtendedRunState(); @@ -3169,6 +3172,22 @@ } /** + * Assert that a snapshot exists for the specific commit point. + * + * @param snapshotDir + * The snapshot directory for the service. + * @param commitCounter + * The commit point. + */ + protected void assertSnapshotExists(final File snapshotDir, + long commitCounter) { + + assertTrue(CommitCounterUtility.getCommitCounterFile(snapshotDir, + commitCounter, SnapshotManager.SNAPSHOT_EXT).exists()); + + } + + /** * Await the specified snapshot. * * @param server @@ -3183,6 +3202,7 @@ // Wait until self-reports RunMet. assertCondition(new Runnable() { + @Override public void run() { try { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java 2014-05-27 13:14:23 UTC (rev 8421) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/test/com/bigdata/journal/jini/ha/TestHA3SnapshotPolicy2.java 2014-05-27 13:28:02 UTC (rev 8422) @@ -115,6 +115,7 @@ final HAGlue serverB = startB(); // Await quorum meet. + @SuppressWarnings("unused") final long token = awaitMetQuorum(); // Wait until both services are ready. @@ -130,14 +131,38 @@ // Verify/await snapshot on A. awaitSnapshotExists(serverA, commitCounter); + // Verify existence of the snapshot file. + assertSnapshotExists(getSnapshotDirA(), commitCounter); + // Verify/await snapshot on B. awaitSnapshotExists(serverB, commitCounter); + // Verify existence of the snapshot file. + assertSnapshotExists(getSnapshotDirB(), commitCounter); + + /* + * Restart B and verify that the service is await of the snapshot + * after a restart. + */ + restartB(); + + // Verify existence of the snapshot file after restart. + assertSnapshotExists(getSnapshotDirB(), commitCounter); + + /* + * Restart A and verify that the service is await of the snapshot + * after a restart. + */ + restartA(); + + // Verify existence of the snapshot file after restart. + assertSnapshotExists(getSnapshotDirA(), commitCounter); + } /** * Verify that C snapshots the journal when it enters RunMet after - * resynchronizing from A+B. (This can just be start A+B, await quorum meet, + * resynchronizing from A+B. (This can be just start A+B, await quorum meet, * then start C. C will resync from the leader. The snapshot should be taken * when resync is done and we enter RunMet.) */ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-27 13:14:28
|
Revision: 8421 http://sourceforge.net/p/bigdata/code/8421 Author: thompsonbry Date: 2014-05-27 13:14:23 +0000 (Tue, 27 May 2014) Log Message: ----------- removed versionId Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IService.java Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IService.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IService.java 2014-05-26 16:13:54 UTC (rev 8420) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/java/com/bigdata/service/IService.java 2014-05-27 13:14:23 UTC (rev 8421) @@ -39,7 +39,6 @@ * Common service interface. * * @author <a href="mailto:tho...@us...">Bryan Thompson</a> - * @version $Id$ */ public interface IService extends Remote { This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-26 16:13:58
|
Revision: 8420 http://sourceforge.net/p/bigdata/code/8420 Author: tobycraig Date: 2014-05-26 16:13:54 +0000 (Mon, 26 May 2014) Log Message: ----------- #891 - Added dropdown groups for namespaces Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css 2014-05-26 14:40:47 UTC (rev 8419) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/css/style.css 2014-05-26 16:13:54 UTC (rev 8420) @@ -188,27 +188,6 @@ overflow: hidden; } -.namespace-shortcuts { - float: right; - margin-bottom: 20px; -} - -.namespace-shortcuts li { - display: inline-block; - border: 1px solid #e4e4e4; - padding: 5px; - margin-left: 5px; - cursor: pointer; - width: 40px; - text-align: center; -} - -.namespace-shortcuts li:hover { - border-color: #b7b7b7; - background-color: #b7b7b7; - color: #ededed; -} - #query-form, #update-box-container { clear: both; } Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-26 14:40:47 UTC (rev 8419) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-26 16:13:54 UTC (rev 8420) @@ -214,34 +214,51 @@ /* Namespace shortcuts */ NAMESPACE_SHORTCUTS = { - 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', - 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', - 'owl': 'http://www.w3.org/2002/07/owl#', - 'bd': 'http://www.bigdata.com/rdf#', - 'bds': 'http://www.bigdata.com/rdf/search#', - 'gas': 'http://www.bigdata.com/rdf/gas#', - 'foaf': 'http://xmlns.com/foaf/0.1/', - 'hint': 'http://www.bigdata.com/queryHints#', - 'dc': 'http://purl.org/dc/elements/1.1/', - 'xsd': 'http://www.w3.org/2001/XMLSchema#' + 'Bigdata': { + 'bd': 'http://www.bigdata.com/rdf#', + 'bds': 'http://www.bigdata.com/rdf/search#', + 'gas': 'http://www.bigdata.com/rdf/gas#', + 'hint': 'http://www.bigdata.com/queryHints#' + }, + 'W3C': { + 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', + 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', + 'owl': 'http://www.w3.org/2002/07/owl#', + 'skos': 'http://www.w3.org/2004/02/skos/core#', + 'xsd': 'http://www.w3.org/2001/XMLSchema#' + }, + 'Dublic Core': { + 'dc': 'http://purl.org/dc/elements/1.1/', + 'dcterm': 'http://purl.org/dc/terms/', + 'void': 'http://rdfs.org/ns/void#' + }, + 'Social/Other': { + 'foaf': 'http://xmlns.com/foaf/0.1/', + 'schema': 'http://schema.org/', + 'sioc': 'http://rdfs.org/sioc/ns#' + } }; -$('.namespace-shortcuts').html('<ul>'); -for(var ns in NAMESPACE_SHORTCUTS) { - // cannot use data-ns attribute on li, as jQuery mangles namespaces that don't end with #, adding </li> to them - var li = $('<li>' + ns.toUpperCase() + '</li>'); - li.data('ns', 'prefix ' + ns + ': <' + NAMESPACE_SHORTCUTS[ns] + '>'); - li.appendTo('.namespace-shortcuts ul'); +$('.namespace-shortcuts').html(''); +for(var category in NAMESPACE_SHORTCUTS) { + var select = $('<select><option>' + category + '</option></select>').appendTo($('.namespace-shortcuts')); + for(var ns in NAMESPACE_SHORTCUTS[category]) { + select.append('<option value="' + NAMESPACE_SHORTCUTS[category][ns] + '">' + ns + '</option>'); + } } -$('.namespace-shortcuts li').click(function() { +$('.namespace-shortcuts select').change(function() { + var uri = this.value; var tab = $(this).parents('.tab').attr('id').split('-')[0]; var current = EDITORS[tab].getValue(); - var ns = $(this).data('ns'); - if(current.indexOf(ns) == -1) { - EDITORS[tab].setValue(ns + '\n' + current); + if(current.indexOf(uri) == -1) { + var ns = $(this).find(':selected').text(); + EDITORS[tab].setValue('prefix ' + ns + ': <' + uri + '>\n' + current); } + + // reselect group label + this.selectedIndex = 0; }); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-26 14:40:50
|
Revision: 8419 http://sourceforge.net/p/bigdata/code/8419 Author: tobycraig Date: 2014-05-26 14:40:47 +0000 (Mon, 26 May 2014) Log Message: ----------- Fixed search not working with CodeMirror editor Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-23 19:11:29 UTC (rev 8418) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-26 14:40:47 UTC (rev 8419) @@ -35,7 +35,7 @@ return; } var query = 'select ?s ?p ?o { ?o bds:search "' + term + '" . ?s ?p ?o . }' - $('#query-box').val(query); + EDITORS.query.setValue(query); $('#query-errors').hide(); $('#query-form').submit(); showTab('query'); This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-23 19:11:34
|
Revision: 8418 http://sourceforge.net/p/bigdata/code/8418 Author: thompsonbry Date: 2014-05-23 19:11:29 +0000 (Fri, 23 May 2014) Log Message: ----------- rolling back changes from Property to SystemProperty in jetty.xml. This broken the build. The HA test suites were not starting correctly. See #951 and #958 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-23 16:08:41 UTC (rev 8417) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-23 19:11:29 UTC (rev 8418) @@ -62,8 +62,8 @@ <New class="javax.management.remote.JMXServiceURL"> <Arg type="java.lang.String">rmi</Arg> <Arg type="java.lang.String" /> - <Arg type="java.lang.Integer"><Property name="jetty.jmxrmiport" default="1090"/></Arg> - <Arg type="java.lang.String">/jndi/rmi://<Property name="jetty.jmxrmihost" default="localhost"/>:<Property name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> + <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> + <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> </New> </Arg> <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> @@ -110,9 +110,9 @@ </Item> </Array> </Arg> - <Set name="host"><Property name="jetty.host" /></Set> - <Set name="port"><Property name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> + <Set name="host"><SystemProperty name="jetty.host" /></Set> + <Set name="port"><SystemProperty name="jetty.port" default="8080" /></Set> + <Set name="idleTimeout"><SystemProperty name="http.timeout" default="30000"/></Set> </New> </Arg> </Call> @@ -142,12 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"><Property name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> - <Set name="overrideDescriptor"><Property name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> + <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tob...@us...> - 2014-05-23 16:08:43
|
Revision: 8417 http://sourceforge.net/p/bigdata/code/8417 Author: tobycraig Date: 2014-05-23 16:08:41 +0000 (Fri, 23 May 2014) Log Message: ----------- Only add <> to URIs in explore form if they're not namespaced Modified Paths: -------------- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js Modified: branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js =================================================================== --- branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-23 16:04:05 UTC (rev 8416) +++ branches/NEW_WORKBENCH_1_3_2_BRANCH/bigdata-war/src/html/js/workbench.js 2014-05-23 16:08:41 UTC (rev 8417) @@ -934,14 +934,14 @@ e.preventDefault(); var uri = $(this).find('input[type="text"]').val().trim(); if(uri) { - // add < > if they're not present - if(uri[0] != '<') { + // add < > if they're not present and this is not a namespaced URI + if(uri[0] != '<' && uri.match(/^\w+:\//)) { uri = '<' + uri; + if(uri.slice(-1) != '>') { + uri += '>'; + } + $(this).find('input[type="text"]').val(uri); } - if(uri.slice(-1) != '>') { - uri += '>'; - } - $(this).find('input[type="text"]').val(uri); loadURI(uri); // if this is a SID, make the components clickable This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-23 16:04:07
|
Revision: 8416 http://sourceforge.net/p/bigdata/code/8416 Author: thompsonbry Date: 2014-05-23 16:04:05 +0000 (Fri, 23 May 2014) Log Message: ----------- Modified to chmod the bigdataNSS (versus bigdata) script as well. See #941 Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.xml Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-23 15:53:35 UTC (rev 8415) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-23 16:04:05 UTC (rev 8416) @@ -1272,7 +1272,7 @@ <chmod file="${dist.bin}/bigdata" perm="755" /> <copy file="${deploy.nss}/bin/bigdataNSS" todir="${dist.bin}" /> - <chmod file="${dist.bin}/bigdata" perm="755" /> + <chmod file="${dist.bin}/bigdataNSS" perm="755" /> <copy file="${deploy.nss}/bin/startNSS" todir="${dist.bin}" /> <chmod file="${dist.bin}/startNSS" perm="755" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-23 15:53:41
|
Revision: 8415 http://sourceforge.net/p/bigdata/code/8415 Author: thompsonbry Date: 2014-05-23 15:53:35 +0000 (Fri, 23 May 2014) Log Message: ----------- Fix to broken commit for #941. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4jHA.properties.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/zoo.cfg.erb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createSecurityGroup.py branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Berksfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Gemfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Thorfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.mapgraph branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss.build-from-svn branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat.build-from-svn branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/aws.rc branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/chefignore branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Berksfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Gemfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Thorfile branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat.build-from-svn branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/chefignore branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,50 @@ +require "formula" + +class Bigdata < Formula + homepage "http://bigdata.com/" +# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz" +# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2" + url "http://bigdata.com/deploy/bigdata-1.3.1.tgz" + sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79" + + def install + prefix.install "doc", "var", "bin" + libexec.install Dir["lib/*.jar"] + + File.rename "#{bin}/bigdataNSS", "#{bin}/bigdata" + + # Set the installation path as the root for the bin scripts: + inreplace "#{bin}/bigdata" do |s| + s.sub! "<%= BD_HOME %>", prefix + s.sub! "<%= INSTALL_TYPE %>", "BREW" + end + + # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data): + inreplace "#{prefix}/var/jetty/WEB-INF/RWStore.properties", "<%= BD_HOME %>", prefix + + # Set the installation path as the root for log files (<bigdata_home>/log): + inreplace "#{prefix}/var/jetty/WEB-INF/classes/log4j.properties", "<%= BD_HOME %>", prefix + end + + plist_options :startup => 'true', :manual => 'bigdata start' + + def plist; <<-EOS.undent + <?xml version="1.0" encoding="UTF-8"?> + <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" + "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> + <plist version="1.0"> + <dict> + <key>Label</key> + <string>#{plist_name}</string> + <key>Program</key> + <string>#{bin}/bigdata</string> + <key>RunAtLoad</key> + <true/> + <key>WorkingDirectory</key> + <string>#{prefix}</string> + </dict> + </plist> + EOS + end + +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,186 @@ +Bigdata Cookbook +================ +The Bigdata cookbook provides the [bigdata v1.3.1](http://www.bigdata.com/) opensource triplestore/graph database. The cookbook provides recipes to install the Bigdata server as a web application under Tomcat, with its own embedded Jetty server (NSS - the NanoSparqlServer). The recipes will install pre-configured packages by node and optionally may build and install the server directly from source archive. + +For more info on Bigdata please visit: + +* Bigdata Homepage: [http://www.bigdata.com/](http://www.bigdata.com/) +* Bigdata SourceForge Page: [http://sourceforge.net/projects/bigdata/](http://sourceforge.net/projects/bigdata/) + +Requirements +------------ +Chef 11 or higher<br/> +Ruby 1.9 (preferably from the Chef full-stack installer) + + + +Attributes +---------- + +### General Attributes + +`node['bigdata'][:home]` - The root directory for bigdata contents (Default: `/var/lib/bigdata`) + +`node['bigdata'][:url]` - Where to download the bigdata package file from. (Defaults: Tomcat: http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.1/bigdata.war / NSS: http://bigdata.com/deploy/bigdata-1.3.1.tgz) + +`node['bigdata'][:data_dir]` + - Where the bigdata.jnl resides. Discussed in <a href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer#Common_Startup_Problems">Common Startup Problmems</a> + (Defaults: Tomcat: `node['bigdata'][:home]`/data / NSS: `node['bigdata'][:home]`/var/data) + +`node['bigdata'][:log_dir]` - Where bigdata log files should reside (i.e. queryLog.csv, rules.log, queryRunStateLog.csv). (Default: Tomcat: `node['bigdata'][:home]`/var/log / NSS: `node['bigdata'][:home]`/var/log) + +`node['bigdata'][:properties]` - File path to the Bigdata properties file. (Default: `node['bigdata'][:home]`/RWStore.properties) + +`node['bigdata'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: Tomcat: https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA\_RELEASE\_1\_3\_0 / NSS: https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT\_BRANCH\_1\_3\_1) + +`node['bigdata'][:source]` - The directory to retrieve Subversion contents into. (Default: bigdata-code) + +`node['bigdata']['journal.AbstractJournal.bufferMode']` - Journal Buffer Mode (Default: DiskRW) + +`node['bigdata']['service.AbstractTransactionService.minReleaseAge']` - Minimum Release Age (Default: 1) + +`node['bigdata']['btree.writeRetentionQueue.capacity']` - Writing retention queue length. (Default: 4000) + +`node['bigdata']['btree.BTree.branchingFactor']` - Branching factor for the journal's B-Tree. (Default: 128) + +`node['bigdata']['journal.AbstractJournal.initialExtent']` - Journal's initial extent (Default: 209715200) + +`node['bigdata']['journal.AbstractJournal.maximumExtent']` - Journal's maximum extent (Default: 209715200) + +`node['bigdata']['rdf.sail.truthMaintenance']` - Switch Truth Maintenance on/off. (Default: false) + +`node['bigdata']['rdf.store.AbstractTripleStore.quads']` - Switch Quads Mode on/off. (Default: false) + +`node['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers']` - Switch statement identifiers on/off. (Default: false) + +`node['bigdata']['rdf.store.AbstractTripleStore.textIndex']` - Switch text indexing on/off. (Default: false) + +`node['bigdata']['rdf.store.AbstractTripleStore.axiomsClass']` - The class to handle RDF axioms. (Default: com.bigdata.rdf.axioms.NoAxioms) + +`node['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's Lexical B-Tree. (Default:- 400) + +`node['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's SPO B-Tree. (Default: 1024) + +`node['bigdata']['rdf.sail.bufferCapacity']` - The number of statements to buffer before committing triples to the persistence layer. (Default: 100000) + +### Attributes for Tomcat Based Install + +`node['bigdata'][:web_home]` - The web application root directory for bigdata. (Default `node['tomcat'][:webapp_dir]`/bigdata) + +`node['bigdata'][:log4j_properties]` - File path to the log4j properties file. (Default `node['bigdata'][:web_home]`/WEB-INF/classes/log4j.properties) + +### Attributes for NanoSparqlServer (NSS) Based Install + +`node['bigdata'][:user]` - The user to install and run bigdata under. (Default: `bigdata`) + +`node['bigdata'][:group]` - The group to install and run bigdata under. (Default: `bigdata`) + +`node['bigdata'][:jetty_dir]` - The Jetty root directory. (Default: `node['bigdata'][:home]`/var/jetty) + +### Attributes for MapGraph + +`node['mapgraph'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: https://svn.code.sf.net/p/mpgraph/code/trunk) + +`node['mapgraph'][:source]` - The directory to retrieve Subversion contents into. (Default: mapgraph-code ) + + +Recipes +------- + +A node recipe is not provided by the Bigdata cookbook. The user is given the option to install the Bigdata server under Tomcat or as a Jetty application. Under both options, Bigdata may optinally be built directly from the a Subversion source code branch. + +### tomcat + +Installs the [Tomcat](http://tomcat.apache.org/) server and then bigdata as a web application. Bigdata will be configured according to the attributes. If no attributes are given, Bigdata will be installed with the systems nodes. + +If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute. + +### nss + +Installs the Bigdata server to run in the [NanoSparqlServer](http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer) (Jetty) mode. + + +If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute. + + +### mapgraph + +Retrieves the [MapGraph](http://sourceforge.net/projects/mpgraph/) project from its Subversion archive at SourceForget and builds it. +This recipe can only be used with GPU architecture and has only been validated against Amazon's "NVIDIA GRID GPU Driver" AMI. + + +Usage +----- + + +### Vagrant Context + +Sample Vagrant configurations are available in the Bigdata Subversion source tree under [bigdata/src/resources/deployment/vagrant](http://sourceforge.net/p/bigdata/code/HEAD/tree/branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/). + +#### Tomcat Example + + + chef.json = { + :bigdata => { + :install_flavor => "tomcat", + :build_from_svn => true, + :svn_branch => "https://svn.code.sf.net/p/bigdata/code/branches/BTREE_BUFFER_BRANCH/" + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :tomcat => { + :base_version => "7" + } + } + + chef.run_list = [ + ... + "recipe[bigdata::tomcat]" + ... + ] + + + +#### NSS Example + + + chef.json = { + :bigdata => { + :install_flavor => "nss" + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + } + } + + chef.run_list = [ + ... + "recipe[bigdata::nss]" + ... + ] + + +### Trouble Shooting + +The Bigdta cookbook recipes have been tested thoroughly in the Vagrant context with VirtualBox and AWS providers using Ubuntu 12.04 and Oracle's JDK 7. + +When errors occur in the Vagrant context, it is most typically during the installation process where a network timeout has occurred during the retrieval of a dependent resource. simply continue with: + + % vagrant provision + +Which should get past any intermit ant network issues. For assistance with installation and other issues, please visit the [Bigdata Support Forum](http://sourceforge.net/p/bigdata/discussion/676946). + + +License and Authors +------------------- +Author:: Daniel Mekonnen [daniel<no-spam-at>systap.com] + + +GNU GPLv2 - This pakcage may be resiributed under the same terms and conditions as the Bigdata project that it is a part of. + + http://www.gnu.org/licenses/gpl-2.0.html Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,157 @@ +# +# Where bigdata resource files will be installed: +# +default['bigdata'][:home] = "/var/lib/bigdata" + +# +# Who runs bigdata? This is applicable to NSS and HA installs only: +# +default['bigdata'][:user] = "bigdata" +default['bigdata'][:group] = "bigdata" +default['bigdata'][:base_version] = "1.3.1" + +# +# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory: +# +default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code" + + +case node['bigdata'][:install_flavor] +when "nss" + # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer: + default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz" + + # Where the jetty resourceBase is defined: + default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log" + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + end +when "tomcat" + # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7: + default['tomcat'][:base_version] = 7 + + # JRE options options to set for Tomcat, the following is strongly recommended: + default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC" + + # A SourceForge URL to use for downloading the bigdata.war file: + default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war" + + # Where the bigdata contents reside under Tomcat: + default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" + + # Where the log4j.properites file can be found: + default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties" + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + end +when "ha" + # The URL to the bigdataHA release bundle. + default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + end + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + + # Where the jetty resourceBase is defined: + default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + + # Name of the federation of services (controls the Apache River GROUPS). + default['bigdata'][:fedname] = 'my-cluster-1' + + # Name of the replication cluster to which this HAJournalServer will belong. + default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' + + # Where to find the Apache River service registrars (can also use multicast). + default['bigdata'][:river_locator1] = 'bigdataA' + default['bigdata'][:river_locator2] = 'bigdataB' + default['bigdata'][:river_locator3] = 'bigdataC' + + # Where to find the Apache Zookeeper ensemble. + default['bigdata'][:zk_server1] = 'bigdataA' + default['bigdata'][:zk_server2] = 'bigdataB' + default['bigdata'][:zk_server3] = 'bigdataC' +end + + +################################################################################### +# +# Set the RWStore.properties attributes that apply for all installation scenarios. +# +################################################################################### + +# Where the RWStore.properties file can be found: +default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" + + +default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW" + +# Setup for the RWStore recycler rather than session protection. +default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1" + +default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000" +default['bigdata']['btree.BTree.branchingFactor'] = "128" + +# 200M initial extent. +default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200" +default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200" + +# Setup for QUADS mode without the full text index. +default['bigdata']['rdf.sail.truthMaintenance'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms" + +# Bump up the branching factor for the lexicon indices on the default kb. +default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400" + +# Bump up the branching factor for the statement indices on the default kb. +default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024" +default['bigdata']['rdf.sail.bufferCapacity'] = "100000" + +# +# Bigdata supports over a hundred properties and only the most commonly configured +# are set here as Chef attributes. Any number of additional properties may be +# configured by Chef. To do so, add the desired property in this (attributes/default.rb) +# file as well as in the templates/default/RWStore.properties.erb file. The +# "vocabularyClass" property (below) for inline URIs is used as example additional +# entry: +# +# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass" + + +################################################################# +# +# The following attributes are defaults for the MapGraph recipe. +# +################################################################# + +# The subversion branch to use when building from source: +default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk" + +# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory: +default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code" Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,157 @@ +# +# Where bigdata resource files will be installed: +# +default['bigdata'][:home] = "/var/lib/bigdata" + +# +# Who runs bigdata? This is applicable to NSS and HA installs only: +# +default['bigdata'][:user] = "bigdata" +default['bigdata'][:group] = "bigdata" +default['bigdata'][:base_version] = "1.3.1" + +# +# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory: +# +default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code" + + +case node['bigdata'][:install_flavor] +when "nss" + # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer: + default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz" + + # Where the jetty resourceBase is defined: + default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log" + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + end +when "tomcat" + # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7: + default['tomcat'][:base_version] = 7 + + # JRE options options to set for Tomcat, the following is strongly recommended: + default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC" + + # A SourceForge URL to use for downloading the bigdata.war file: + default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war" + + # Where the bigdata contents reside under Tomcat: + default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" + + # Where the log4j.properites file can be found: + default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties" + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + end +when "ha" + # The URL to the bigdataHA release bundle. + default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz" + + # The subversion branch to use when building from source: + if node['bigdata'][:build_from_svn] + # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + end + + # Where the bigdata-ha.jnl file will live: + default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data" + + # Where the log files will live: + default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log" + + # Where the jetty resourceBase is defined: + default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" + + # Name of the federation of services (controls the Apache River GROUPS). + default['bigdata'][:fedname] = 'my-cluster-1' + + # Name of the replication cluster to which this HAJournalServer will belong. + default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1' + + # Where to find the Apache River service registrars (can also use multicast). + default['bigdata'][:river_locator1] = 'bigdataA' + default['bigdata'][:river_locator2] = 'bigdataB' + default['bigdata'][:river_locator3] = 'bigdataC' + + # Where to find the Apache Zookeeper ensemble. + default['bigdata'][:zk_server1] = 'bigdataA' + default['bigdata'][:zk_server2] = 'bigdataB' + default['bigdata'][:zk_server3] = 'bigdataC' +end + + +################################################################################### +# +# Set the RWStore.properties attributes that apply for all installation scenarios. +# +################################################################################### + +# Where the RWStore.properties file can be found: +default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties" + + +default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW" + +# Setup for the RWStore recycler rather than session protection. +default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1" + +default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000" +default['bigdata']['btree.BTree.branchingFactor'] = "128" + +# 200M initial extent. +default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200" +default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200" + +# Setup for QUADS mode without the full text index. +default['bigdata']['rdf.sail.truthMaintenance'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false" +default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms" + +# Bump up the branching factor for the lexicon indices on the default kb. +default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400" + +# Bump up the branching factor for the statement indices on the default kb. +default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024" +default['bigdata']['rdf.sail.bufferCapacity'] = "100000" + +# +# Bigdata supports over a hundred properties and only the most commonly configured +# are set here as Chef attributes. Any number of additional properties may be +# configured by Chef. To do so, add the desired property in this (attributes/default.rb) +# file as well as in the templates/default/RWStore.properties.erb file. The +# "vocabularyClass" property (below) for inline URIs is used as example additional +# entry: +# +# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass" + + +################################################################# +# +# The following attributes are defaults for the MapGraph recipe. +# +################################################################# + +# The subversion branch to use when building from source: +default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk" + +# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory: +default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code" Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,19 @@ +name 'bigdata' +maintainer 'Daniel Mekonnen' +maintainer_email 'daniel<no-spam-at>systap.com' +license 'GNU GPLv2' +description 'Installs/Configures Systap Bigdata High Availability' +long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) +version '0.1.4' + +depends 'apt' +depends 'java', '>= 1.22.0' +depends 'ant' +depends 'tomcat' +depends 'subversion' +depends 'lvm' +depends 'hadoop' +depends 'emacs' +depends 'sysstat' + +supports 'ubuntu' Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,220 @@ +# +# Cookbook Name:: bigdata +# Recipe:: high_availability +# +# Copyright 2014, Systap +# + +# +# Only do the following for Bigdata HA install +# +if node['bigdata'][:install_flavor] == "ha" + + include_recipe "java" + include_recipe "sysstat" + include_recipe "hadoop::zookeeper_server" + + # + # Create the bigdata systm group: + # + group node['bigdata'][:group] do + action :create + append true + end + + # + # Create the bigdata systm user: + # + user node['bigdata'][:user] do + gid node['bigdata'][:group] + supports :manage_home => true + shell "/bin/false" + home node['bigdata'][:home] + system true + action :create + end + + # + # Make sure the Bigdata home directory is owned by the bigdata user and group: + # + execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do + user "root" + group "root" + cwd node['bigdata'][:home] + command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ." + end + + if node['bigdata'][:build_from_svn] + include_recipe "ant" + include_recipe "subversion::client" + # + # Retrieve the Bigdata source from the specified subversion branch: + # + execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + end + + # + # Build the bigdata release package: + # + execute "ant deploy-artifact" do + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant deploy-artifact" + end + + # + # Extract the just built release package, thus installing it in the Bigdata home directory: + # + execute "deflate REL tar" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf #{node['bigdata'][:source_dir]}/REL.bigdata-1.*.tgz" + end + + else + # + # Retrieve the package prepared for Brew: + # + remote_file "/tmp/bigdata.tgz" do + owner node['bigdata'][:user] + group node['bigdata'][:group] + source node['bigdata'][:url] + end + + # + # Extract the just retrieved release package, thus installing it in the Bigdata home directory: + # + + execute "Extract and relocate the bigdata archive" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf /tmp/bigdata.tgz" + end + + # + # The following are assumed fixed in releases after 1.3.1 and in the current subversion branch: + # + if node['bigdata'][:base_version].gsub(/\./, '').to_i == 131 + execute "Divert standard and error output into /dev/null" do + user 'root' + group 'root' + cwd "#{node['bigdata'][:home]}/etc/init.d" + command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\&1 \&\"|' bigdataHA" + end + + execute "Change SystemProperty to Property in the 'host' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"host\"><SystemProperty|<Set name=\"host\"><Property|' jetty.xml" + end + + execute "Change SystemProperty to Property in the 'port' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"port\"><SystemProperty|<Set name=\"port\"><Property|' jetty.xml" + end + + execute "Change SystemProperty to Property in the 'idleTimeout' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"idleTimeout\"><SystemProperty|<Set name=\"idleTimeout\"><Property|' jetty.xml" + end + end + end + + # + # Install hte bigdataHA service file: + # + execute "copy over the /etc/init.d/bigdataHA file" do + user 'root' + group 'root' + cwd "#{node['bigdata'][:home]}/etc/init.d" + command "cp bigdataHA /etc/init.d/bigdataHA; chmod 00755 /etc/init.d/bigdataHA" + end + + # + # Create the log directory for bigdata: + # + directory node['bigdata'][:log_dir] do + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00755 + action :create + end + + # + # Install the log4jHA.properties file: + # + template "#{node['bigdata'][:home]}/var/config/logging/log4jHA.properties" do + source "log4jHA.properties.erb" + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00644 + end + + # + # Set the absolute path to the RWStore.properties file + # + execute "set absolute path to RWStore.properties" do + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" + command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:jetty_dir]}/WEB-INF/RWStore.properties|' web.xml" + end + + # + # Install the RWStore.properties file: + # + template node['bigdata'][:properties] do + source "RWStore.properties.erb" + owner node['bigdata'][:user] + group node['bigdata'][:group] + mode 00644 + end + + # + # Copy the /etc/default/bigdataHA template: + # + template "/etc/default/bigdataHA" do + source "default/bigdataHA.erb" + user 'root' + group 'root' + mode 00644 + end + + # + # Setup the bigdataHA script as a service: + # + service "bigdataHA" do + supports :restart => true, :status => true + action [ :enable, :start ] + end + + # + # Install the zoo.cfg file: + # + template "/etc/zookeeper/conf/zoo.cfg" do + source "zoo.cfg.erb" + owner 'root' + group 'root' + mode 00644 + end + + # + # The hadoop cookbook overlooks the log4j.properties file presently, but a future version may get this right: + # + execute "copy the distribution log4j.properties file" do + user 'root' + group 'root' + cwd "/etc/zookeeper/conf.chef" + command "cp ../conf.dist/log4j.properties ." + end +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,32 @@ +# http://jamie.mccrindle.org/2013/07/installing-oracle-java-7-using-chef.html +# +# Cookbook Name:: java7 +# Recipe:: default +# + +apt_repository "webupd8team" do + uri "http://ppa.launchpad.net/webupd8team/java/ubuntu" + components ['main'] + distribution node['lsb']['codename'] + keyserver "keyserver.ubuntu.com" + key "EEA14886" + deb_src true +end + +execute "remove openjdk-6" do + command "apt-get -y remove --purge openjdk-6-jdk openjdk-6-jre openjdk-6-jre-headless openjdk-6-jre-lib" +end + + +# could be improved to run only on update +execute "accept-license" do + command "echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections" +end + +package "oracle-java7-installer" do + action :install +end + +package "oracle-java7-set-default" do + action :install +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,51 @@ +# +# Cookbook Name:: bigdata +# Recipe:: mapgraph +# +# Copyright 2014, Systap +# + +# +# MapGraph Installer +# +include_recipe "java" + + +# +# Make sure the Bigdata home directory is owned by the bigdata user and group: +# +execute "pull mapgraph from svn repo" do + user 'ec2-user' + group 'ec2-user' + cwd "/home/ec2-user" + command "svn checkout #{node['mapgraph'][:svn_branch]} #{node['mapgraph'][:source_dir]}" +end + + +# +# Build MapGgraph: +# +execute "make mapgraph" do + cwd node['mapgraph'][:source_dir] + command "make" +end + + + +# +# Run a basic test of MapGraph: +# +execute "test mapgraph" do + cwd node['mapgraph'][:source_dir] + command "./Algorithms/SSSP/SSSP -g smallRegressionGraphs/small.mtx" +end + + +# +# "recursive true" did not work here +# +# directory node['bigdata'][:mapgraph_home] do +# owner 'ec2-user' +# group 'ec2-user' +# recursive true +# end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,142 @@ +# +# Cookbook Name:: bigdata +# Recipe:: nss +# +# Copyright 2014, Systap +# + +# +# Only do the following for Bigdata NSS install +# +if node['bigdata'][:install_flavor] == "nss" + + include_recipe "java" + + # + # Create the bigdata systm group: + # + group node['bigdata'][:group] do + action :create + append true + end + + # + # Create the bigdata systm user: + # + user node['bigdata'][:user] do + gid node['bigdata'][:group] + supports :manage_home => true + shell "/bin/false" + home node['bigdata'][:home] + system true + action :create + end + + + if node['bigdata'][:build_from_svn] + include_recipe "ant" + include_recipe "subversion::client" + + # + # Retrieve the Bigdata source from the specified subversion branch: + # + execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + end + + # + # Build the bigdata release package: + # + execute "build the nss tar ball" do + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant package-nss-brew" + end + + # + # Extract the just built release package, thus installing it in the Bigdata home directory: + # + execute "Extract and relocate the bigdata archive" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf #{node['bigdata'][:source_dir]}/REL-NSS.bigdata-1.*.tgz" + end + else + # + # Retrieve the package prepared for Brew: + # + remote_file "/tmp/bigdata.tgz" do + owner node['bigdata'][:user] + group node['bigdata'][:group] + source node['bigdata'][:url] + end + + # + # Extract the just retrieved release package, thus installing it in the Bigdata home directory: + # + execute "Extract and relocate the bigdata archive" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf /tmp/bigdata.tgz" + end + end + + # + # Create a symbolic link of the bin/bigdataNSS script to /etc/init.d/bigdataNSS: + # + link "/etc/init.d/bigdataNSS" do + to "#{node['bigdata'][:home]}/bin/bigdataNSS" + end + + # + # Set the install type in the bin/bigdataNSS script: + # + execute "set the INSTALL_TYPE in bin/bigdata" do + cwd "#{node['bigdata'][:home]}/bin" + command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_flavor]}|' bigdataNSS" + end + + # + # Set the Bigdata home directory in the bin/bigdataNSS file: + # + execute "set the BD_HOME in bin/bigdata" do + cwd "#{node['bigdata'][:home]}/bin" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' bigdataNSS" + end + + # + # Set the absolute path to the bigdata.jnl file in RWStore.properties + # + execute "set the BD_HOME in RWStore.properties" do + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' RWStore.properties" + end + + # + # Set the Bigdata home directory in the log4j.properties file to set the path for the log files: + # + execute "set the BD_HOME in log4j.properties" do + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF/classes" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' log4j.properties" + end + + # + # Setup the bigdataNSS script as a service: + # + service "bigdataNSS" do + # + # Reenable this when the bin/bigdata script is updated to return a "1" for a successful status: + # + # See: http://comments.gmane.org/gmane.comp.sysutils.chef.user/2723 + # + # supports :status => true, :start => true, :stop => true, :restart => true + supports :start => true, :stop => true, :restart => true + action [ :enable, :start ] + end +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,39 @@ +# +# Cookbook Name:: bigdata +# Recipe:: ssd +# +# Copyright 2014, Systap +# + +# +# SSD Setup +# +include_recipe "lvm" + + +# +# Create the directory that will be the mount target: +# +directory node['bigdata'][:data_dir] do + owner "root" + group "root" + mode 00755 + action :create + recursive true +end + + +# +# Create and mount the logical volume: +# +lvm_volume_group 'vg' do + action :create + physical_volumes ['/dev/xvdb', '/dev/xvdc'] + + logical_volume 'lv_bigdata' do + size '100%VG' + filesystem 'ext4' + mount_point location: node['bigdata'][:data_dir], options: 'noatime,nodiratime' + # stripes 4 + end +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,168 @@ +# +# Cookbook Name:: bigdata +# Recipe:: tomcat +# +# Copyright 2014, Systap +# + +# +# Only do the following for Bigdata Tomcat install +# +if node['bigdata'][:install_flavor] == "tomcat" + + include_recipe "java" + include_recipe "tomcat" + + # + # The tomcat cookbook provides an /etc/default/tomcat7 file that contains multiple JAVA_OPTS lines but allows you to + # modify only one of them during installation. As a consequence JAVA_OPTS conflicts may occur. We comment out the + # 2nd JAVA_OPTS line to avoid the potential for any conflicts (which do occur with our default java_options attribute). + # + # Conflicting collector combinations in option list; please refer to the release notes for the combinations allowed + # Error: Could not create the Java Virtual Machine. + # + execute "comment out 2nd JAVA_OPTS line in /etc/default/tomcat7" do + cwd "/etc/default" + command "sed -i 's|JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|#JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|' tomcat7" + end + + + if node['bigdata'][:build_from_svn] + include_recipe "ant" + include_recipe "subversion::client" + + # + # Retrieve the Bigdata source from the specified subversion branch: + # + execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + end + + # + # Build the bigdata.war file: + # + execute "build the war file" do + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant war" + end + + # + # Install the WAR file: + # + remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do + source "file:///#{node['bigdata'][:source_dir]}/ant-build/bigdata.war" + owner node['tomcat'][:user] + group node['tomcat'][:group] + end + + else + # + # Install the WAR file from the SourceForge URL: + # + remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do + source node['bigdata'][:url] + owner node['tomcat'][:user] + group node['tomcat'][:group] + end + end + + # + # Create the JNL home directory + # + directory node['bigdata'][:data_dir] do + owner node['tomcat'][:user] + group node['tomcat'][:group] + mode 00755 + action :create + recursive true + end + + + # + # Create the Bigdata log home + # + directory node['bigdata'][:log_dir] do + owner node['tomcat'][:user] + group node['tomcat'][:group] + mode 00755 + action :create + recursive true + end + + + # + # Install the RWStore.properties file: + # + template node['bigdata'][:properties] do + source "RWStore.properties.erb" + owner node['tomcat'][:user] + group node['tomcat'][:group] + mode 00644 + end + + + # + # Install the log4j.properties file: + # + template node['bigdata'][:log4j_properties] do + source "log4j.properties.erb" + owner node['tomcat'][:user] + group node['tomcat'][:group] + mode 00644 + retry_delay 15 + retries 3 + end + + + # + # Delete all log files so that the error and warning messages that appeared during the installation + # process do not unnecessarily alarm anyone. + # + execute "remove log files before retart" do + cwd "#{node['tomcat'][:log_dir]}" + command "rm *" + end + + # + # The RWStore.properties path is the only property that needs to be adjusted in the web.xml file. + # Using a sed command to adjust the property avoids the need to maintain a web.xml template which + # in turn updates frequently relative to the other property files. Thus this recipe becomes + # suitable against a larger range of bigdata releases. + # + if node['bigdata'][:base_version].gsub(/\./, '').to_i >= 131 + # + # Set the RWStore.properties path in the web.xml file: + # + execute "set absolute path for RWStore.properties" do + cwd "#{node['bigdata'][:web_home]}/WEB-INF" + command "sed -i 's|<param-value>../webapps/bigdata/WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" + end + + # + # Remove original RWStore.properties file to avoid user confusion + # + file "#{node['bigdata'][:web_home]}/WEB-INF/RWStore.properties" do + action :delete + end + else + # + # 1.3.0 and earlier uses a different path for RWStore.properties. We can remove this if block in 1.3.1 + # + execute "set absolute path for RWStore.properties" do + cwd "#{node['bigdata'][:web_home]}/WEB-INF" + command "sed -i 's|<param-value>../webapps/bigdata/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" + end + + # + # Remove original RWStore.properties file to avoid user confusion + # + file "#{node['bigdata'][:web_home]}/RWStore.properties" do + action :delete + end + end +end Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,42 @@ +# +# Note: These options are applied when the journal and the triple store are +# first created. + +## +## Journal options. +## + +# The backing file. This contains all your data. You want to put this someplace +# safe. The default locator will wind up in the directory from which you start +# your servlet container. +com.bigdata.journal.AbstractJournal.file=<%= node['bigdata'][:data_dir] %>/bigdata.jnl + +# The persistence engine. Use 'Disk' for the WORM or 'DiskRW' for the RWStore. +com.bigdata.journal.AbstractJournal.bufferMode=<%= node['bigdata']['journal.AbstractJournal.bufferMode'] %> + +# Setup for the RWStore recycler rather than session protection. +com.bigdata.service.AbstractTransactionService.minReleaseAge=<%= node['bigdata']['service.AbstractTransactionService.minReleaseAge'] %> + +com.bigdata.btree.writeRetentionQueue.capacity=<%= node['bigdata']['btree.writeRetentionQueue.capacity'] %> +com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['btree.BTree.branchingFactor'] %> + +# 200M initial extent. +com.bigdata.journal.AbstractJournal.initialExtent=<%= node['bigdata']['journal.AbstractJournal.initialExtent'] %> +com.bigdata.journal.AbstractJournal.maximumExtent=<%= node['bigdata']['journal.AbstractJournal.maximumExtent'] %> + +## +## Setup for QUADS mode without the full text index. +## +com.bigdata.rdf.sail.truthMaintenance=<%= node['bigdata']['rdf.sail.truthMaintenance'] %> +com.bigdata.rdf.store.AbstractTripleStore.quads=<%= node['bigdata']['rdf.store.AbstractTripleStore.quads'] %> +com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=<%= node['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] %> +com.bigdata.rdf.store.AbstractTripleStore.textIndex=<%= node['bigdata']['rdf.store.AbstractTripleStore.textIndex'] %> +com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=<%= node['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] %> + +# Bump up the branching factor for the lexicon indices on the default kb. +com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] %> + +# Bump up the branching factor for the statement indices on the default kb. +com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] %> +com.bigdata.rdf.sail.bufferCapacity=<%= node['bigdata']['rdf.sail.sailBufferCapacity'] %> +# com.bigdata.rdf.store.AbstractTripleStore.vocabularyClass=<%= node['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] %> Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,51 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +BD_USER="<%= node['bigdata'][:user] %>" +BD_GROUP="<%= node['bigdata'][:group] %>" + +binDir=<%= node['bigdata'][:home] %>/bin +pidFile=<%= node['bigdata'][:home] %>/var/lock/pid + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=<%= node['bigdata'][:fedname] %> + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=<%= node['bigdata'][:logical_service_id] %> + +# Local directory where the service will store its state. +export FED_DIR=<%= node['bigdata'][:home] %> +export DATA_DIR=<%= node['bigdata'][:data_dir] %> + +# Apache River - NO default for "LOCATORS". +export GROUPS="${FEDNAME}" +export LOCATORS="%JINI_LOCATORS%" + +# Apache ZooKeeper - NO default. +export ZK_SERVERS="<%= node['bigdata'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>" + + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty/html +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb 2014-05-23 15:53:35 UTC (rev 8415) @@ -0,0 +1,98 @@ +# Default log4j configuration. See the individual classes for the +# specific loggers, but generally they are named for the class in +# which they are defined. + +# Default log4j configuration for testing purposes. +# +# You probably want to set the default log level to ERROR. +# +#log4j.rootCategory=WARN, dest1 +#log4j.rootCategory=WARN, dest2 +log4j.rootCategory=WARN, file + +# Loggers. +# Note: logging here at INFO or DEBUG will significantly impact throughput! +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +# Normal data loader (single threaded). +# log4j.logger.com.bigdata.rdf.store.DataLoader=INFO + + +# file +log4j.appender.file=org.apache.log4j.RollingFileAppender +log4j.appender.file.File=<%= node['bigdata'][:log_dir] %>/bigdata.log +log4j.appender.file.MaxFileSize=4MB +log4j.appender.file.MaxBackupIndex=10 +log4j.appender.file.layout=org.apache.log4j.PatternLayout +log4j.appender.file.layout.ConversionPattern=%d{MMM dd, yyyy HH:mm:ss} %-5p: %F:%L: %m%n + +# dest1 +log4j.appender.dest1=org.apache.log4j.ConsoleAppender +log4j.appender.dest1.layout=org.apache.log4j.PatternLayout +log4j.appender.dest1.layout.ConversionPattern=%d{MMM dd, yyyy HH:mm:ss} %-5p: %F:%L: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-5p: %r %l: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-5p: %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n +#log4j.appender.dest1.layout.ConversionPattern=%-4r(%d) [%t] %-5p %c(%l:%M) %x - %m%n + +## dest2 includes the thread name and elapsed milliseconds. +## Note: %r is elapsed milliseconds. +## Note: %t is the thread name. +## See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +#log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +#log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n +# +### +## Rule execution log. This is a formatted log file (comma delimited). +log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false +log4j.appender.ruleLog=org.apache.log4j.FileAppender +log4j.appender.ruleLog.Threshold=ALL +log4j.appender.ruleLog.File=rules.log +log4j.appender.ruleLog.File=<%= node['bigdata'][:log_dir] %>/rules.log +log4j.appender.ruleLog.Append=true +## I find that it is nicer to have this unbuffered since you can see what +## is going on and to make sure that I have complete rule evaluation logs +## on shutdown. +log4j.appender.ruleLog.BufferedIO=false +log4j.appender.ruleLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ruleLog.layout.ConversionPattern=%m +# +### +## Summary query evaluation log (tab delimited file). Uncomment the next line to enable +##log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +#log4j.additivity.com.bigdata.bop.engine.QueryLog=false +#log4j.appender.queryLog=org.apache.log4j.FileAppender +#log4j.appender.queryLog.Threshold=ALL +#log4j.appender.queryLog.File=<%= node['bigdata'][:log_dir] %>/queryLog.csv +#log4j.appender.queryLog.Append=true +## I find that it is nicer to have this unbuffered since you can see what +## is going on and to make sure that I have complete rule evaluation logs +## on shutdown. +#log4j.appender.queryLog.BufferedIO=false +#... [truncated message content] |
From: <tho...@us...> - 2014-05-23 15:29:44
|
Revision: 8414 http://sourceforge.net/p/bigdata/code/8414 Author: thompsonbry Date: 2014-05-23 15:29:40 +0000 (Fri, 23 May 2014) Log Message: ----------- See #941 (merge deployments branch to main branch). - HARestore.sh: You can not safely rely on the limited classpath that is used in this script. This is very likely to break based merely on the imports into the HARestore, Journal, AbstractJournal and related classes. At a minimum, we would need to test this classpath for each release or in CI. I would prefer that we had a means to assemble a better classpath. The startHAServices script has a similar problem. The classpath is currently hacked there using the incantation export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` - What is the purpose of the "src/resources/deployment" directory? Is this the "single-server, non-HA" NSS deployment? - /bigdata/deployment - we put all of this stuff under /src/resources NOT /bigdata. - I have deleted /bigdata/deployment entirely from branches/BIGDATA_RELEASE_1_3_0. - I have copied the files (but not the SVN folders) from the DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment into /src/resources/deployment. - jetty.xml: copied from the DEPLOYMENTS branch. - /Users/bryan/Documents/workspace/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh - This has been removed. The src/resources/deployment/nss directory has similar scripts. It is Ok to add an ant task to start the nss for developers, but deployments should be based on the "ant stage" pattern. - src/resources/deployment/nss/WEB-INF/RWStore.properties should be removed. The brew script should replace the following line in the version from bigdata-war/src/WEB-INF/RWStore.properties with an absolute filename. com.bigdata.journal.AbstractJournal.file=ZZZZ - src/resources/deployment/nss/WEB-INF/log4j.properties should be removed. The brew script should replace the following lines in the version from dist/var/config/logging/log4j.properties in order to setup (a) logging to a file; and (b) to specify the absolution location of that file. log4j.rootCategory=XXXX log4j.appender.file.File=YYYY Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,9 +1,11 @@ # Default log4j configuration. See the individual classes for the # specific loggers, but generally they are named for the class in # which they are defined. - -# Default log4j configuration for testing purposes. # +# This configuration gets used by the bigdata.war artifact when deployed +# into a servlet container. It also might be used by the bigdata webapp +# if -Dlog4j.configuration is not specified when starting bigdata. +# # You probably want to set the default log level to ERROR. # log4j.rootCategory=WARN, dest1 @@ -36,7 +38,7 @@ ## # Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-war/src/jetty.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -29,46 +29,46 @@ <Set name="detailedDump">false</Set> </Get> - <!-- =========================================================== --> - <!-- Get the platform mbean server --> - <!-- =========================================================== --> - <Call id="MBeanServer" class="java.lang.management.ManagementFactory" - name="getPlatformMBeanServer" /> - - <!-- =========================================================== --> - <!-- Initialize the Jetty MBean container --> - <!-- =========================================================== --> - <!-- Note: This breaks CI if it is enabled - <Call name="addBean"> - <Arg> - <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> - <Arg> - <Ref refid="MBeanServer" /> - </Arg> - </New> - </Arg> - </Call>--> - - <!-- Add the static log to the MBean server. - <Call name="addBean"> - <Arg> - <New class="org.eclipse.jetty.util.log.Log" /> - </Arg> - </Call>--> + <!-- =========================================================== --> + <!-- Get the platform mbean server --> + <!-- =========================================================== --> + <Call id="MBeanServer" class="java.lang.management.ManagementFactory" + name="getPlatformMBeanServer" /> + + <!-- =========================================================== --> + <!-- Initialize the Jetty MBean container --> + <!-- =========================================================== --> + <!-- Note: This breaks CI if it is enabled + <Call name="addBean"> + <Arg> + <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> + <Arg> + <Ref refid="MBeanServer" /> + </Arg> + </New> + </Arg> + </Call>--> + + <!-- Add the static log to the MBean server. + <Call name="addBean"> + <Arg> + <New class="org.eclipse.jetty.util.log.Log" /> + </Arg> + </Call>--> - <!-- For remote MBean access (optional) - <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> - <Arg> - <New class="javax.management.remote.JMXServiceURL"> - <Arg type="java.lang.String">rmi</Arg> - <Arg type="java.lang.String" /> - <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> - <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> - </New> - </Arg> - <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> - <Call name="start" /> - </New>--> + <!-- For remote MBean access (optional) + <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> + <Arg> + <New class="javax.management.remote.JMXServiceURL"> + <Arg type="java.lang.String">rmi</Arg> + <Arg type="java.lang.String" /> + <Arg type="java.lang.Integer"><Property name="jetty.jmxrmiport" default="1090"/></Arg> + <Arg type="java.lang.String">/jndi/rmi://<Property name="jetty.jmxrmihost" default="localhost"/>:<Property name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> + </New> + </Arg> + <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> + <Call name="start" /> + </New>--> <!-- =========================================================== --> <!-- Http Configuration. --> @@ -97,25 +97,25 @@ </New> <!-- Configure the HTTP endpoint. --> - <Call name="addConnector"> - <Arg> - <New class="org.eclipse.jetty.server.ServerConnector"> - <Arg name="server"><Ref refid="Server" /></Arg> - <Arg name="factories"> - <Array type="org.eclipse.jetty.server.ConnectionFactory"> - <Item> - <New class="org.eclipse.jetty.server.HttpConnectionFactory"> - <Arg name="config"><Ref refid="httpConfig" /></Arg> - </New> - </Item> - </Array> - </Arg> - <Set name="host"><SystemProperty name="jetty.host" /></Set> - <Set name="port"><SystemProperty name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><SystemProperty name="http.timeout" default="30000"/></Set> - </New> - </Arg> - </Call> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.ServerConnector"> + <Arg name="server"><Ref refid="Server" /></Arg> + <Arg name="factories"> + <Array type="org.eclipse.jetty.server.ConnectionFactory"> + <Item> + <New class="org.eclipse.jetty.server.HttpConnectionFactory"> + <Arg name="config"><Ref refid="httpConfig" /></Arg> + </New> + </Item> + </Array> + </Arg> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080" /></Set> + <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> + </New> + </Arg> + </Call> <!-- =========================================================== --> <!-- Set handler Collection Structure --> @@ -142,12 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="war"><Property name="jetty.resourceBase" default="bigdata-war/src"/></Set> <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> - <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> + <Set name="overrideDescriptor"><Property name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> @@ -166,4 +166,4 @@ <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> -</Configure> \ No newline at end of file +</Configure> Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -935,6 +935,9 @@ <property name="dist.doc" location="${dist.dir}/doc" /> <property name="dist.doc.api" location="${dist.dir}/doc/api" /> <property name="dist.doc.legal" location="${dist.dir}/doc/LEGAL" /> + <!-- deployment directories having stuff to be staged. --> + <property name="deploy" location="src/resources/deployment"/> + <property name="deploy.nss" location="${deploy}/nss"/> <delete dir="${dist.dir}" quiet="true" /> <mkdir dir="${dist.dir}" /> @@ -966,7 +969,7 @@ <property name="bigdata-jini.lib" location="${bigdata.dir}/bigdata-jini/lib/jini/lib" /> <property name="bigdata-rdf.lib" location="${bigdata.dir}/bigdata-rdf/lib" /> <property name="bigdata-sails.lib" location="${bigdata.dir}/bigdata-sails/lib" /> - <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" /> + <property name="bigdata-blueprints.lib" location="${bigdata.dir}/bigdata-blueprints/lib" /> <property name="bigdata-gom.lib" location="${bigdata.dir}/bigdata-gom/lib" /> <property name="bigdata-jetty.lib" location="${bigdata.dir}/bigdata/lib/jetty" /> <property name="bigdata-http.lib" location="${bigdata.dir}/bigdata-sails/lib/httpcomponents" /> @@ -1265,6 +1268,30 @@ src="http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer?printable=yes" /> + <!-- Stage files specific to NSS deployments provided by Brew and Chef. --> + <chmod file="${dist.bin}/bigdata" perm="755" /> + <copy file="${deploy.nss}/bin/bigdataNSS" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/bigdata" perm="755" /> + <copy file="${deploy.nss}/bin/startNSS" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/startNSS" perm="755" /> +<!-- +TODO These lines were removed per #951 (Deployments branch merge). They +break the other deployment models by introducing metavariables for regex +substitutions. + + bigdata-war/src/WEB-INF/RWStore.properties (staged into bigdata/var/jetty/bigdata/WEB-INF/RWStore.properties) + + and + + bigdata/src/resources/log4j.properties (staged into dist/var/config/logging/log4j.properties). + <copy file="${deploy.nss}/WEB-INF/RWStore.properties" + todir="${dist.var.jetty}/WEB-INF" overwrite="true" /> + <copy file="${deploy.nss}/WEB-INF/classes/log4j.properties" + todir="${dist.var.jetty}/WEB-INF/classes" overwrite="true" /> +--> + </target> <!-- --> @@ -1344,8 +1371,41 @@ </target> + <target name="package-nss-brew" depends="clean, stage" + description="Create compressed tar file for Jetty based deployment via Brew and Chef installers."> - <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> + <tar destfile="${bigdata.dir}/REL-NSS.${version}.tgz" + compression="gzip"> + + <tarfileset dir="${bigdata.dir}/dist"> + <include name="bigdata/doc/**" /> + <exclude name="bigdata/doc/api/**" /> + <exclude name="bigdata/doc/HAJournalServer.html" /> + <include name="bigdata/lib/**" /> + <exclude name="bigdata/lib/bigdata-ganglia.jar" /> + <exclude name="bigdata/lib/browser.jar" /> + <exclude name="bigdata/lib/reggie.jar" /> + <exclude name="bigdata/lib/zookeeper.jar" /> + <exclude name="bigdata/lib/jsk-*.jar" /> + <exclude name="bigdata/lib-dl" /> + <exclude name="bigdata/lib-ext" /> + <include name="bigdata/var/jetty/**" /> + <include name="bigdata/var/config/logging/logging.properties" /> + <exclude name="bigdata/var/jetty/html/new.html" /> + <exclude name="bigdata/var/jetty/html/old.html" /> + </tarfileset> + + <!-- Add scripts separately, making them executable --> + + <tarfileset dir="${bigdata.dir}/dist" filemode="755"> + <include name="bigdata/bin/bigdataNSS" /> + <include name="bigdata/bin/startNSS" /> + </tarfileset> + </tar> + + </target> + + <!-- FIXME DEBUG and add 'depends="javadoc, stage" (should stage stage javadoc?)' --> <!-- Note: can require 'rpm' and 'rpm-build. --> <!-- TODO: We do not need both this and "deploy-artifact". --> <target name="rpm" depends="prepare" description="Build RPM installer."> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/HARestore 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,10 +1,31 @@ #!/bin/bash +# +# This script has been developed for the "systap-aws-bigdata-ha" cluster +# deployment package. +# +# The HARestore script will recreate the Bigdata HA journal file as of +# the most recent commit point from log and snapshot files. The +# intended use of the script is to restore a journal file that resides +# on an ephemeral storage media (especially, an SSD instance disk) +# from a combination of full backups and transaction logs on durable +# media (e.g., EBS) following a system reboot. The script should not +# be executed while Bigdata is running (it requires exclusive access +# to the journal and will not be able to run if bigdata is already +# running). +# +# HARestore takes no arguments and assumes the Bigdata journal filename\ +# convention: "bigdata-ha.jnl". +# + source /etc/default/bigdataHA SERVICE_DIR="$FED_DIR/$FEDNAME/$LOGICAL_SERVICE_ID/HAJournalServer" LIB_DIR="$FED_DIR/lib" -java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar -Dlog4j.configuration=file:var/config/logging/log4j.properties com.bigdata.journal.jini.ha.HARestore -o $DATA_DIR/bigdata-ha.jnl $SERVICE_DIR/snapshot $SERVICE_DIR/HALog - - +java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar\ + -Dlog4j.configuration=file:var/config/logging/log4j.properties\ + com.bigdata.journal.jini.ha.HARestore\ + -o $DATA_DIR/bigdata-ha.jnl\ + $SERVICE_DIR/snapshot\ + $SERVICE_DIR/HALog Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdata.sh 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,61 +0,0 @@ -#!/bin/bash - -# Start the services and put the JVM in the background. All services will -# run in a single JVM. See Apache River com.sun.jini.start.ServiceStarter -# for more details. The services are configured in the accompanying -# startHAServices.config file. Specific configuration options for each -# service are defined in the documentation for that service. -# -# Note: One drawback with running each service in the same JVM is that the -# GC load of all services is combined and all services would be suspended -# at the same time by a Full GC pass. If this is a problem, then you can -# break out the river services (ClassServer and Reggie) into a separate -# ServiceStarter instance from the HAJournalServer. - -# The top-level of the installation. -pushd `dirname $0` > /dev/null;cd ..;INSTALL_DIR=`pwd`;popd > /dev/null - -## -# HAJournalServer configuration parameter overrides (see HAJournal.config). -# -# The bigdata HAJournal.config file may be heavily parameterized through -# environment variables that get passed through into the JVM started by -# this script and are thus made available to the HAJournalServer when it -# interprets the contents of the HAJournal.config file. See HAJournal.config -# for the meaning of these environment variables. -# -# Note: Many of these properties have defaults. -## - -export JETTY_XML="${INSTALL_DIR}/var/jetty/jetty.xml" -export JETTY_RESOURCE_BASE="${INSTALL_DIR}/var/jetty" -export LIB_DIR=${INSTALL_DIR}/lib -export CONFIG_DIR=${INSTALL_DIR}/var/config -export LOG4J_CONFIG=${CONFIG_DIR}/logging/log4j.properties - -# TODO Explicitly enumerate JARs so we can control order if necessary and -# deploy on OS without find and tr. -export HAJOURNAL_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` - -export JAVA_OPTS="\ - -server -Xmx4G\ - -Dlog4j.configuration=${LOG4J_CONFIG}\ - -Djetty.resourceBase=${JETTY_RESOURCE_BASE}\ - -DJETTY_XML=${JETTY_XML}\ -" - -cmd="java ${JAVA_OPTS} \ - -server -Xmx4G \ - -cp ${HAJOURNAL_CLASSPATH} \ - com.bigdata.rdf.sail.webapp.NanoSparqlServer \ - 9999 kb \ - ${INSTALL_DIR}/var/jetty/WEB-INF/GraphStore.properties \ -" -echo "Running: $cmd" -$cmd& -pid=$! -# echo "PID=$pid" -echo "kill $pid" > stop.sh -chmod +w stop.sh - -# Note: To obtain the pid, do: read pid < "$pidFile" Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/README.txt 2014-05-23 15:29:40 UTC (rev 8414) @@ -0,0 +1,26 @@ +brew - homebrew installer. installation is the NSS using jetty. No HA features. + +chef - cook book has recipes for bigdata under tomcat; bigdata HA; MapGraph; + NSS using jetty. + +nss - NSS using jetty. The directory contains shell scripts to (a) control + the run state of bigdata in an init.d style script; and (b) start the + NSS using jetty. + +vagrant - HA cluster launcher for AWS; MapGraph launcher; NSS using jetty + launcher; tomcat + bigdata.war install. + +====== Maintenance ====== + +TODO Rename these things to be less ambiguous once we agree on names. + +TODO Document how things are structured from a support and maintenance +perspective. + +TODO Document on the wiki what these various deployments are, how to +choose the right one, and where to get it. See the following tickets. +Also capture the deployment matrix that Daniel has sent by email. + +#926 Add Wiki Entry for Brew Deployment +#925 Add Wiki Entry for Vagrant Deployments +#924 Add Wiki Entry for Chef Cookbooks Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/WEB-INF/classes/log4j.properties 2014-05-23 15:29:40 UTC (rev 8414) @@ -53,7 +53,7 @@ ## # Rule execution log. This is a formatted log file (comma delimited). -log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog +#log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false log4j.appender.ruleLog=org.apache.log4j.FileAppender log4j.appender.ruleLog.Threshold=ALL Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdata 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,109 +0,0 @@ -#!/bin/bash - -# init.d style script for bigdata HA services. The script can be used -# to 'start' or 'stop' services. -# -# Environment: -# -# binDir - The directory containing the installed scripts. -# pidFile - The pid is written on this file. -# -# Misc. -# -# See http://tldp.org/LDP/abs/html/index.html -# -# Note: Blank lines are significant in shell scripts. -# -# Note: Children must do "exit 0" to indicate success. -# -# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix - -# Source function library (just used for 'action'). If you don't have this -# it SHOULD automatically use the inline definition for "action()". - -# -# the following template line will be replaced by a deployer application (e.g. brew, chef) -# -export INSTALL_TYPE="<%= INSTALL_TYPE %>" -export BD_HOME="<%= BD_HOME %>" -pidFile=${BD_HOME}/var/lock/pid -binDir=${BD_HOME}/bin - - -# -# See how we were called. -# -case "$1" in - start) -# -# Start the ServiceStarter and child services if not running. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - fi - fi - if [ ! -f "$pidFile" ]; then - echo -ne $"`date` : `hostname` : bringing bigdata services up ... " - $binDir/startNSS - echo "done!" - else - echo $"`date` : `hostname` : running as $pid" - fi - ;; - stop) -# -# Stop the ServiceStarter and all child services. -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then -# The process has died so remove the old pid file. - echo $"`date` : `hostname` : $pid died?" - rm -f "$pidFile" - else - echo -ne $"`date` : `hostname` : bringing bigdata service down ... " - kill $pid - rm -f "$pidFile" - echo "done!" - fi - fi - ;; - status) -# -# Report status for the ServicesManager (up or down). -# - if [ -f "$pidFile" ]; then - read pid < "$pidFile" - pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) - if [ -z "$pidno" ]; then - echo $"`date` : `hostname` : process died? pid=$pid." - else - echo $"`date` : `hostname` : running as $pid." - fi - else - echo $"`date` : `hostname` : not running." - fi - ;; -# -# Simply stop then start. -# - restart) - $0 stop - $0 start - ;; - *) -# -# Usage -# - me=`basename $0` - echo $"Usage: $0 {start|stop|status|restart}" - exit 1 -esac - -exit 0 Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS 2014-05-23 15:29:40 UTC (rev 8414) @@ -0,0 +1,109 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". + +# +# the following template line will be replaced by a deployer application (e.g. brew, chef) +# +export INSTALL_TYPE="<%= INSTALL_TYPE %>" +export BD_HOME="<%= BD_HOME %>" +pidFile=${BD_HOME}/var/lock/pid +binDir=${BD_HOME}/bin + + +# +# See how we were called. +# +case "$1" in + start) +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + echo -ne $"`date` : `hostname` : bringing bigdata services up ... " + $binDir/startNSS + echo "done!" + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + echo -ne $"`date` : `hostname` : bringing bigdata service down ... " + kill $pid + rm -f "$pidFile" + echo "done!" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; +# +# Simply stop then start. +# + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + me=`basename $0` + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/bigdataNSS ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/bin/startNSS 2014-05-23 15:29:40 UTC (rev 8414) @@ -2,9 +2,9 @@ export INSTALL_DIR=${BD_HOME} if [ $INSTALL_TYPE == "BREW" ]; then - export LIB_DIR=${INSTALL_DIR}/libexec + export LIB_DIR=${INSTALL_DIR}/libexec else - export LIB_DIR=${INSTALL_DIR}/lib + export LIB_DIR=${INSTALL_DIR}/lib fi export JETTY_CLASSPATH=`find ${LIB_DIR} -name '*.jar' -print0 | tr '\0' ':'` export JETTY_DIR=${INSTALL_DIR}/var/jetty @@ -21,7 +21,7 @@ export DATA_DIR=${BD_HOME}/var/data if [ ! -d $DATA_DIR ]; then - mkdir -p $DATA_DIR + mkdir -p $DATA_DIR fi export NSS="com.bigdata.rdf.sail.webapp.NanoSparqlServer" @@ -34,7 +34,7 @@ export JETTY_PORT="8080" fi if [ -z "${JETTY_XML}" ]; then - export JETTY_XML="${JETTY_DIR}/etc/jetty.xml" + export JETTY_XML="${JETTY_DIR}/jetty.xml" fi if [ -z "${JETTY_RESOURCE_BASE}" ]; then export JETTY_RESOURCE_BASE="${JETTY_DIR}" @@ -57,7 +57,7 @@ # Setup the directory for the pid of the ServiceStarter process. lockDir=${INSTALL_DIR}/var/lock if [ ! -d $lockDir ]; then - mkdir -p $lockDir + mkdir -p $lockDir fi pidFile=$lockDir/pid Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/nss/etc/jetty.xml 2014-05-23 15:29:40 UTC (rev 8414) @@ -1,133 +0,0 @@ -<?xml version="1.0"?> -<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> -<!-- See http://www.eclipse.org/jetty/documentation/current/ --> -<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> -<Configure id="Server" class="org.eclipse.jetty.server.Server"> - - <!-- =========================================================== --> - <!-- Configure the Server Thread Pool. --> - <!-- The server holds a common thread pool which is used by --> - <!-- default as the executor used by all connectors and servlet --> - <!-- dispatches. --> - <!-- --> - <!-- Configuring a fixed thread pool is vital to controlling the --> - <!-- maximal memory footprint of the server and is a key tuning --> - <!-- parameter for tuning. In an application that rarely blocks --> - <!-- then maximal threads may be close to the number of 5*CPUs. --> - <!-- In an application that frequently blocks, then maximal --> - <!-- threads should be set as high as possible given the memory --> - <!-- available. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.util.thread.QueuedThreadPool --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <!-- uncomment to change type of threadpool --> - <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> - <!-- --> - <Get name="ThreadPool"> - <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> - <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> - <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> - <Set name="detailedDump">false</Set> - </Get> - - <!-- =========================================================== --> - <!-- Http Configuration. --> - <!-- This is a common configuration instance used by all --> - <!-- connectors that can carry HTTP semantics (HTTP, HTTPS, SPDY)--> - <!-- It configures the non wire protocol aspects of the HTTP --> - <!-- semantic. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.server.HttpConfiguration --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <New id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration"> - <Set name="secureScheme">https</Set> - <Set name="securePort"><Property name="jetty.secure.port" default="8443" /></Set> - <Set name="outputBufferSize"><Property name="jetty.output.buffer.size" default="32768" /></Set> - <Set name="requestHeaderSize"><Property name="jetty.request.header.size" default="8192" /></Set> - <Set name="responseHeaderSize"><Property name="jetty.response.header.size" default="8192" /></Set> - <Set name="sendServerVersion"><Property name="jetty.send.server.version" default="true" /></Set> - <Set name="sendDateHeader"><Property name="jetty.send.date.header" default="false" /></Set> - <Set name="headerCacheSize">512</Set> - <!-- Uncomment to enable handling of X-Forwarded- style headers - <Call name="addCustomizer"> - <Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg> - </Call> - --> - </New> - - <!-- Configure the HTTP endpoint. --> - <Call name="addConnector"> - <Arg> - <New class="org.eclipse.jetty.server.ServerConnector"> - <Arg name="server"><Ref refid="Server" /></Arg> - <Arg name="factories"> - <Array type="org.eclipse.jetty.server.ConnectionFactory"> - <Item> - <New class="org.eclipse.jetty.server.HttpConnectionFactory"> - <Arg name="config"><Ref refid="httpConfig" /></Arg> - </New> - </Item> - </Array> - </Arg> - <Set name="host"><Property name="jetty.host" /></Set> - <Set name="port"><Property name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> - </New> - </Arg> - </Call> - - <!-- =========================================================== --> - <!-- Set handler Collection Structure --> - <!-- =========================================================== --> - <Set name="handler"> - <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> - <Set name="handlers"> - <Array type="org.eclipse.jetty.server.Handler"> - <Item> - <!-- This is the bigdata web application. --> - <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> - </Set> - <Set name="contextPath">/bigdata</Set> - <Set name="descriptor"><%= JETTY_DIR %>/WEB-INF/web.xml</Set> - <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> - </New> - </Item> - <Item> - <!-- This appears to be necessary in addition to the above. --> - <!-- Without this, it will not resolve http://localhost:8080/ --> - <!-- and can fail to deliver some of the static content. --> - <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= JETTY_DIR %>" /> - </Set> - <Set name="welcomeFiles"> - <Array type="java.lang.String"> - <Item>html/index.html</Item> - </Array> - </Set> - </New> - </Item> - <!-- <Item> - <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New> - </Item> --> - </Array> - </Set> - </New> - </Set> - - <!-- =========================================================== --> - <!-- extra server options --> - <!-- =========================================================== --> - <Set name="stopAtShutdown">true</Set> - <Set name="stopTimeout">5000</Set> - <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> - <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> - -</Configure> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-05-22 19:23:15 UTC (rev 8413) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2014-05-23 15:29:40 UTC (rev 8414) @@ -95,7 +95,7 @@ fi fi if [ ! -f "$pidFile" ]; then - action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP bash -c "source /etc/default/bigdataHA ; $binDir/startHAServices" + action $"`date` : `hostname` : bringing up services: " sudo -u $BD_USER -g $BD_GROUP bash -c "source /etc/default/bigdataHA ; $binDir/startHAServices > /dev/null 2>&1 &" else echo $"`date` : `hostname` : running as $pid" fi This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-22 19:23:17
|
Revision: 8413 http://sourceforge.net/p/bigdata/code/8413 Author: thompsonbry Date: 2014-05-22 19:23:15 +0000 (Thu, 22 May 2014) Log Message: ----------- - jetty.xml: I have replaced all instances of SystemProperty with Property. - log4jHA.properties: The main branch has correct log4j configuration examples for jetty. - Reformatted HARestore, but did not address the issue with how the classpath is assembled. See #951 (Merge deployments branch to main branch). Modified Paths: -------------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata-war/src/jetty.xml branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/HAJournal/log4jHA.properties branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/bin/HARestore Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata-war/src/jetty.xml =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata-war/src/jetty.xml 2014-05-22 17:05:00 UTC (rev 8412) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata-war/src/jetty.xml 2014-05-22 19:23:15 UTC (rev 8413) @@ -62,8 +62,8 @@ <New class="javax.management.remote.JMXServiceURL"> <Arg type="java.lang.String">rmi</Arg> <Arg type="java.lang.String" /> - <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> - <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> + <Arg type="java.lang.Integer"><Property name="jetty.jmxrmiport" default="1090"/></Arg> + <Arg type="java.lang.String">/jndi/rmi://<Property name="jetty.jmxrmihost" default="localhost"/>:<Property name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> </New> </Arg> <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> @@ -142,12 +142,12 @@ <Arg> <!-- This is the bigdata web application. --> <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"><SystemProperty name="jetty.resourceBase" default="bigdata-war/src"/></Set> + <Set name="war"><Property name="jetty.resourceBase" default="bigdata-war/src"/></Set> <Set name="contextPath">/bigdata</Set> <Set name="descriptor">WEB-INF/web.xml</Set> <Set name="parentLoaderPriority">true</Set> <Set name="extractWAR">false</Set> - <Set name="overrideDescriptor"><SystemProperty name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> + <Set name="overrideDescriptor"><Property name="jetty.overrideWebXml" default="bigdata-war/src/WEB-INF/override-web.xml"/></Set> </New> </Arg> </Call> Modified: branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/HAJournal/log4jHA.properties =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/HAJournal/log4jHA.properties 2014-05-22 17:05:00 UTC (rev 8412) +++ branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/HAJournal/log4jHA.properties 2014-05-22 19:23:15 UTC (rev 8413) @@ -18,8 +18,10 @@ log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO -# This will only work if you have the slf4j bridge setup. -#log4j.org.eclipse.jetty.util.log.Log=INFO +# jetty debug logging. +#log4j.logger.org.eclipse.jetty=INFO +#log4j.logger.org.eclipse.jetty.client=DEBUG +#log4j.logger.org.eclipse.jetty.proxy=DEBUG # This can provide valuable information about open connections. log4j.logger.com.bigdata.txLog=INFO Modified: branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/bin/HARestore =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/bin/HARestore 2014-05-22 17:05:00 UTC (rev 8412) +++ branches/DEPLOYMENT_BRANCH_1_3_1/src/resources/bin/HARestore 2014-05-22 19:23:15 UTC (rev 8413) @@ -16,6 +16,10 @@ SERVICE_DIR="$FED_DIR/$FEDNAME/$LOGICAL_SERVICE_ID/HAJournalServer" LIB_DIR="$FED_DIR/lib" -java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar -Dlog4j.configuration=file:var/config/logging/log4j.properties com.bigdata.journal.jini.ha.HARestore -o $DATA_DIR/bigdata-ha.jnl $SERVICE_DIR/snapshot $SERVICE_DIR/HALog +java -cp $LIB_DIR/bigdata.jar:$LIB_DIR/commons-logging.jar:$LIB_DIR/log4j.jar:$LIB_DIR/highscalelib.jar:$LIB_DIR/fastutil.jar:$LIB_DIR/dsiutils.jar:$LIB_DIR/lgplutils.jar:$LIB_DIR/icu4j.jar\ + -Dlog4j.configuration=file:var/config/logging/log4j.properties \ + com.bigdata.journal.jini.ha.HARestore \ + -o $DATA_DIR/bigdata-ha.jnl\ + $SERVICE_DIR/snapshot\ + $SERVICE_DIR/HALog - This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <tho...@us...> - 2014-05-22 17:05:06
|
Revision: 8412 http://sourceforge.net/p/bigdata/code/8412 Author: thompsonbry Date: 2014-05-22 17:05:00 +0000 (Thu, 22 May 2014) Log Message: ----------- Disabling javadoc builds in CI and during development. Renable for releases. Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/build.properties Modified: branches/BIGDATA_RELEASE_1_3_0/build.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-22 16:17:46 UTC (rev 8411) +++ branches/BIGDATA_RELEASE_1_3_0/build.properties 2014-05-22 17:05:00 UTC (rev 8412) @@ -103,7 +103,7 @@ # Note: The javadoc goes quite if you have enough memory, but can take forever # and then runs out of memory if the JVM is starved for RAM. The heap for the # javadoc JVM is explicitly set in the javadoc target in the build.xml file. -javadoc= +#javadoc= # packaging property set (rpm, deb). package.release=1 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-22 16:17:49
|
Revision: 8411 http://sourceforge.net/p/bigdata/code/8411 Author: dmekonnen Date: 2014-05-22 16:17:46 +0000 (Thu, 22 May 2014) Log Message: ----------- minor update. 'supports' metadata statement added Modified Paths: -------------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb 2014-05-22 15:58:30 UTC (rev 8410) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb 2014-05-22 16:17:46 UTC (rev 8411) @@ -15,3 +15,5 @@ depends 'hadoop' depends 'emacs' depends 'sysstat' + +supports 'ubuntu' This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <dme...@us...> - 2014-05-22 15:58:34
|
Revision: 8410 http://sourceforge.net/p/bigdata/code/8410 Author: dmekonnen Date: 2014-05-22 15:58:30 +0000 (Thu, 22 May 2014) Log Message: ----------- Updates to support the published 1.3.1 packages. Modified Paths: -------------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/brew/bigdata.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/metadata.rb branches/DEPLOYMENT_BRANCH_1_3_1/build.xml Added Paths: ----------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn Removed Paths: ------------- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/brew/bigdata.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/brew/bigdata.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/brew/bigdata.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -2,22 +2,23 @@ class Bigdata < Formula homepage "http://bigdata.com/" - url "http://bigdata.com/deploy/bigdata-1.3.0.tgz" - sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2" +# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz" +# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2" + url "http://bigdata.com/deploy/bigdata-1.3.1.tgz" + sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79" def install prefix.install "doc", "var", "bin" - libexec.install "lib" + libexec.install Dir["lib/*.jar"] + File.rename "#{bin}/bigdataNSS", "#{bin}/bigdata" + # Set the installation path as the root for the bin scripts: inreplace "#{bin}/bigdata" do |s| s.sub! "<%= BD_HOME %>", prefix s.sub! "<%= INSTALL_TYPE %>", "BREW" end - # Set the Jetty root as the resourceBase in the jetty.xml file: - inreplace "#{prefix}/var/jetty/etc/jetty.xml", "<%= JETTY_DIR %>", "#{prefix}/var/jetty" - # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data): inreplace "#{prefix}/var/jetty/WEB-INF/RWStore.properties", "<%= BD_HOME %>", prefix Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/README.md 2014-05-22 15:58:30 UTC (rev 8410) @@ -1,10 +1,10 @@ Bigdata Cookbook ================ -The Bigdata cookbook provides the [bigdata v1.3.0](http://www.bigdata.com/bigdata/blog/) opensource triplestore/graph database. The cookbook provides recipes to install the Bigdata server as a web application under Tomcat, with its own embedded Jetty server (NSS - the NanoSparqlServer). The recipes will install pre-configured packages by node and optionally may build and install the server directly from source archive. +The Bigdata cookbook provides the [bigdata v1.3.1](http://www.bigdata.com/) opensource triplestore/graph database. The cookbook provides recipes to install the Bigdata server as a web application under Tomcat, with its own embedded Jetty server (NSS - the NanoSparqlServer). The recipes will install pre-configured packages by node and optionally may build and install the server directly from source archive. For more info on Bigdata please visit: -* Bigdata Homepage: [http://www.bigdata.com/bigdata/blog/](http://www.bigdata.com/bigdata/blog/) +* Bigdata Homepage: [http://www.bigdata.com/](http://www.bigdata.com/) * Bigdata SourceForge Page: [http://sourceforge.net/projects/bigdata/](http://sourceforge.net/projects/bigdata/) Requirements @@ -21,7 +21,7 @@ `node['bigdata'][:home]` - The root directory for bigdata contents (Default: `/var/lib/bigdata`) -`node['bigdata'][:url]` - Where to download the bigdata package file from. (Defaults: Tomcat: http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.0/bigdata.war / NSS: http://bigdata.com/deploy/bigdata-1.3.0.tgz) +`node['bigdata'][:url]` - Where to download the bigdata package file from. (Defaults: Tomcat: http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.1/bigdata.war / NSS: http://bigdata.com/deploy/bigdata-1.3.1.tgz) `node['bigdata'][:data_dir]` - Where the bigdata.jnl resides. Discussed in <a href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer#Common_Startup_Problems">Common Startup Problmems</a> Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/attributes/default.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -8,6 +8,7 @@ # default['bigdata'][:user] = "bigdata" default['bigdata'][:group] = "bigdata" +default['bigdata'][:base_version] = "1.3.1" # # When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory: @@ -18,7 +19,7 @@ case node['bigdata'][:install_flavor] when "nss" # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer: - default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-1.3.0.tgz" + default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz" # Where the jetty resourceBase is defined: default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty" @@ -41,7 +42,7 @@ default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC" # A SourceForge URL to use for downloading the bigdata.war file: - default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.0/bigdata.war" + default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war" # Where the bigdata contents reside under Tomcat: default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata" @@ -60,15 +61,13 @@ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" end when "ha" - # - # Presently Bigdata HA can only be deployed from an SVN build so we set the flag to true: - # - default['bigdata'][:build_from_svn] = true + # The URL to the bigdataHA release bundle. + default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz" # The subversion branch to use when building from source: if node['bigdata'][:build_from_svn] - # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" - default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" + # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" + default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1" end # Where the bigdata-ha.jnl file will live: Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/metadata.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -4,7 +4,7 @@ license 'GNU GPLv2' description 'Installs/Configures Systap Bigdata High Availability' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.1.3' +version '0.1.4' depends 'apt' depends 'java', '>= 1.22.0' Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -11,9 +11,7 @@ if node['bigdata'][:install_flavor] == "ha" include_recipe "java" - include_recipe "ant" include_recipe "sysstat" - include_recipe "subversion::client" include_recipe "hadoop::zookeeper_server" # @@ -28,12 +26,12 @@ # Create the bigdata systm user: # user node['bigdata'][:user] do - gid node['bigdata'][:group] - supports :manage_home => true - shell "/bin/false" - home node['bigdata'][:home] - system true - action :create + gid node['bigdata'][:group] + supports :manage_home => true + shell "/bin/false" + home node['bigdata'][:home] + system true + action :create end # @@ -46,34 +44,92 @@ command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ." end - # - # Retrieve the Bigdata source from the specified subversion branch: - # - execute "checkout bigdata from svn repo" do - user 'ubuntu' - group 'ubuntu' - cwd "/home/ubuntu" - command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" - end + if node['bigdata'][:build_from_svn] + include_recipe "ant" + include_recipe "subversion::client" + # + # Retrieve the Bigdata source from the specified subversion branch: + # + execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + end - # - # Build the bigdata release package: - # - execute "ant deploy-artifact" do - user 'ubuntu' - group 'ubuntu' - cwd node['bigdata'][:source_dir] - command "ant deploy-artifact" - end + # + # Build the bigdata release package: + # + execute "ant deploy-artifact" do + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant deploy-artifact" + end - # - # Extract the just built release package, thus installing it in the Bigdata home directory: - # - execute "deflate REL tar" do - user node['bigdata'][:user] - group node['bigdata'][:group] - cwd "#{node['bigdata'][:home]}/.." - command "tar xvf #{node['bigdata'][:source_dir]}/REL.bigdata-1.*.tgz" + # + # Extract the just built release package, thus installing it in the Bigdata home directory: + # + execute "deflate REL tar" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf #{node['bigdata'][:source_dir]}/REL.bigdata-1.*.tgz" + end + + else + # + # Retrieve the package prepared for Brew: + # + remote_file "/tmp/bigdata.tgz" do + owner node['bigdata'][:user] + group node['bigdata'][:group] + source node['bigdata'][:url] + end + + # + # Extract the just retrieved release package, thus installing it in the Bigdata home directory: + # + + execute "Extract and relocate the bigdata archive" do + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." + command "tar xvf /tmp/bigdata.tgz" + end + + # + # The following are assumed fixed in releases after 1.3.1 and in the current subversion branch: + # + if node['bigdata'][:base_version].gsub(/\./, '').to_i == 131 + execute "Divert standard and error output into /dev/null" do + user 'root' + group 'root' + cwd "#{node['bigdata'][:home]}/etc/init.d" + command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\&1 \&\"|' bigdataHA" + end + + execute "Change SystemProperty to Property in the 'host' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"host\"><SystemProperty|<Set name=\"host\"><Property|' jetty.xml" + end + + execute "Change SystemProperty to Property in the 'port' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"port\"><SystemProperty|<Set name=\"port\"><Property|' jetty.xml" + end + + execute "Change SystemProperty to Property in the 'idleTimeout' attribute of jetty.xml" do + user 'root' + group 'root' + cwd node['bigdata'][:jetty_dir] + command "sed -i 's|<Set name=\"idleTimeout\"><SystemProperty|<Set name=\"idleTimeout\"><Property|' jetty.xml" + end + end end # @@ -107,16 +163,6 @@ end # - # Install the jetty.xml file: - # - template "#{node['bigdata'][:jetty_dir]}/jetty.xml" do - source "jetty.xml.erb" - owner node['bigdata'][:user] - group node['bigdata'][:group] - mode 00644 - end - - # # Set the absolute path to the RWStore.properties file # execute "set absolute path to RWStore.properties" do Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -15,10 +15,10 @@ # Make sure the Bigdata home directory is owned by the bigdata user and group: # execute "pull mapgraph from svn repo" do - user 'ec2-user' - group 'ec2-user' - cwd "/home/ec2-user" - command "svn checkout #{node['mapgraph'][:svn_branch]} #{node['mapgraph'][:source_dir]}" + user 'ec2-user' + group 'ec2-user' + cwd "/home/ec2-user" + command "svn checkout #{node['mapgraph'][:svn_branch]} #{node['mapgraph'][:source_dir]}" end @@ -26,8 +26,8 @@ # Build MapGgraph: # execute "make mapgraph" do - cwd node['mapgraph'][:source_dir] - command "make" + cwd node['mapgraph'][:source_dir] + command "make" end @@ -36,8 +36,8 @@ # Run a basic test of MapGraph: # execute "test mapgraph" do - cwd node['mapgraph'][:source_dir] - command "./Algorithms/SSSP/SSSP -g smallRegressionGraphs/small.mtx" + cwd node['mapgraph'][:source_dir] + command "./Algorithms/SSSP/SSSP -g smallRegressionGraphs/small.mtx" end Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/nss.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -16,8 +16,8 @@ # Create the bigdata systm group: # group node['bigdata'][:group] do - action :create - append true + action :create + append true end # @@ -34,34 +34,36 @@ if node['bigdata'][:build_from_svn] - include_recipe "ant" - include_recipe "subversion::client" + include_recipe "ant" + include_recipe "subversion::client" # # Retrieve the Bigdata source from the specified subversion branch: # execute "checkout bigdata from svn repo" do - user 'ubuntu' - group 'ubuntu' - cwd "/home/ubuntu" - command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" end # # Build the bigdata release package: # execute "build the nss tar ball" do - user 'ubuntu' - group 'ubuntu' - cwd node['bigdata'][:source_dir] - command "ant package-nss-brew" + user 'ubuntu' + group 'ubuntu' + cwd node['bigdata'][:source_dir] + command "ant package-nss-brew" end # # Extract the just built release package, thus installing it in the Bigdata home directory: # execute "Extract and relocate the bigdata archive" do - cwd "/var/lib" + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." command "tar xvf #{node['bigdata'][:source_dir]}/REL-NSS.bigdata-1.*.tgz" end else @@ -78,30 +80,14 @@ # Extract the just retrieved release package, thus installing it in the Bigdata home directory: # execute "Extract and relocate the bigdata archive" do - cwd "/var/lib" + user node['bigdata'][:user] + group node['bigdata'][:group] + cwd "#{node['bigdata'][:home]}/.." command "tar xvf /tmp/bigdata.tgz" end - - # - # Rename "bigbdata" to "bigdataNSS" for now. This block can be removed in the 1.3.1 update for the Brew package - # - execute "Extract and relocate the bigdata archive" do - cwd "#{node['bigdata'][:home]}/bin" - command "mv bigdata bigdataNSS" - end end # - # Make sure the Bigdata home directory is owned by the bigdata user and group: - # - execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do - user "root" - group "root" - cwd node['bigdata'][:home] - command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ." - end - - # # Create a symbolic link of the bin/bigdataNSS script to /etc/init.d/bigdataNSS: # link "/etc/init.d/bigdataNSS" do @@ -112,43 +98,35 @@ # Set the install type in the bin/bigdataNSS script: # execute "set the INSTALL_TYPE in bin/bigdata" do - cwd "#{node['bigdata'][:home]}/bin" - command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_flavor]}|' bigdataNSS" + cwd "#{node['bigdata'][:home]}/bin" + command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_flavor]}|' bigdataNSS" end # # Set the Bigdata home directory in the bin/bigdataNSS file: # execute "set the BD_HOME in bin/bigdata" do - cwd "#{node['bigdata'][:home]}/bin" - command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' bigdataNSS" + cwd "#{node['bigdata'][:home]}/bin" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' bigdataNSS" end # # Set the absolute path to the bigdata.jnl file in RWStore.properties # execute "set the BD_HOME in RWStore.properties" do - cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" - command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' RWStore.properties" + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' RWStore.properties" end # # Set the Bigdata home directory in the log4j.properties file to set the path for the log files: # execute "set the BD_HOME in log4j.properties" do - cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF/classes" - command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' log4j.properties" + cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF/classes" + command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' log4j.properties" end # - # Install the jetty.xml file: - # - execute "set the JETTY_DIR in jetty.xml" do - cwd "#{node['bigdata'][:jetty_dir]}/etc/" - command "sed -i 's|<%= JETTY_DIR %>|#{node['bigdata'][:jetty_dir]}|' jetty.xml" - end - - # # Setup the bigdataNSS script as a service: # service "bigdataNSS" do Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -22,23 +22,23 @@ # Error: Could not create the Java Virtual Machine. # execute "comment out 2nd JAVA_OPTS line in /etc/default/tomcat7" do - cwd "/etc/default" - command "sed -i 's|JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|#JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|' tomcat7" + cwd "/etc/default" + command "sed -i 's|JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|#JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|' tomcat7" end if node['bigdata'][:build_from_svn] - include_recipe "ant" - include_recipe "subversion::client" + include_recipe "ant" + include_recipe "subversion::client" # # Retrieve the Bigdata source from the specified subversion branch: # execute "checkout bigdata from svn repo" do - user 'ubuntu' - group 'ubuntu' - cwd "/home/ubuntu" - command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}" end # @@ -124,24 +124,23 @@ # process do not unnecessarily alarm anyone. # execute "remove log files before retart" do - cwd "#{node['tomcat'][:log_dir]}" - command "rm *" + cwd "#{node['tomcat'][:log_dir]}" + command "rm *" end - # # The RWStore.properties path is the only property that needs to be adjusted in the web.xml file. # Using a sed command to adjust the property avoids the need to maintain a web.xml template which # in turn updates frequently relative to the other property files. Thus this recipe becomes # suitable against a larger range of bigdata releases. # - if node['bigdata'][:build_from_svn] + if node['bigdata'][:base_version].gsub(/\./, '').to_i >= 131 # # Set the RWStore.properties path in the web.xml file: # execute "set absolute path for RWStore.properties" do - cwd "#{node['bigdata'][:web_home]}/WEB-INF" - command "sed -i 's|<param-value>../webapps/bigdata/WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" + cwd "#{node['bigdata'][:web_home]}/WEB-INF" + command "sed -i 's|<param-value>../webapps/bigdata/WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" end # @@ -152,18 +151,18 @@ end else # - # 1.3.0 uses a different path for RWStore.properties. We can remove this if block in 1.3.1 + # 1.3.0 and earlier uses a different path for RWStore.properties. We can remove this if block in 1.3.1 # execute "set absolute path for RWStore.properties" do - cwd "#{node['bigdata'][:web_home]}/WEB-INF" - command "sed -i 's|<param-value>../webapps/bigdata/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" + cwd "#{node['bigdata'][:web_home]}/WEB-INF" + command "sed -i 's|<param-value>../webapps/bigdata/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml" end # # Remove original RWStore.properties file to avoid user confusion # file "#{node['bigdata'][:web_home]}/RWStore.properties" do - action :delete + action :delete end end end Deleted: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/chef/templates/default/jetty.xml.erb 2014-05-22 15:58:30 UTC (rev 8410) @@ -1,195 +0,0 @@ -<?xml version="1.0"?> -<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> -<!-- See http://www.eclipse.org/jetty/documentation/current/ --> -<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> -<Configure id="Server" class="org.eclipse.jetty.server.Server"> - - <!-- =========================================================== --> - <!-- Configure the Server Thread Pool. --> - <!-- The server holds a common thread pool which is used by --> - <!-- default as the executor used by all connectors and servlet --> - <!-- dispatches. --> - <!-- --> - <!-- Configuring a fixed thread pool is vital to controlling the --> - <!-- maximal memory footprint of the server and is a key tuning --> - <!-- parameter for tuning. In an application that rarely blocks --> - <!-- then maximal threads may be close to the number of 5*CPUs. --> - <!-- In an application that frequently blocks, then maximal --> - <!-- threads should be set as high as possible given the memory --> - <!-- available. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.util.thread.QueuedThreadPool --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <!-- uncomment to change type of threadpool --> - <Arg name="threadpool"><New id="threadpool" class="org.eclipse.jetty.util.thread.QueuedThreadPool"/></Arg> - <!-- --> - <Get name="ThreadPool"> - <Set name="minThreads" type="int"><SystemProperty name="jetty.threads.min" default="10"/></Set> - <Set name="maxThreads" type="int"><SystemProperty name="jetty.threads.max" default="64"/></Set> - <Set name="idleTimeout" type="int"><SystemProperty name="jetty.threads.timeout" default="60000"/></Set> - <Set name="detailedDump">false</Set> - </Get> - - <!-- =========================================================== --> - <!-- Get the platform mbean server --> - <!-- =========================================================== --> - <Call id="MBeanServer" class="java.lang.management.ManagementFactory" - name="getPlatformMBeanServer" /> - - <!-- =========================================================== --> - <!-- Initialize the Jetty MBean container --> - <!-- =========================================================== --> - <!-- Note: This breaks CI if it is enabled - <Call name="addBean"> - <Arg> - <New id="MBeanContainer" class="org.eclipse.jetty.jmx.MBeanContainer"> - <Arg> - <Ref refid="MBeanServer" /> - </Arg> - </New> - </Arg> - </Call>--> - - <!-- Add the static log to the MBean server. - <Call name="addBean"> - <Arg> - <New class="org.eclipse.jetty.util.log.Log" /> - </Arg> - </Call>--> - - <!-- For remote MBean access (optional) - <New id="ConnectorServer" class="org.eclipse.jetty.jmx.ConnectorServer"> - <Arg> - <New class="javax.management.remote.JMXServiceURL"> - <Arg type="java.lang.String">rmi</Arg> - <Arg type="java.lang.String" /> - <Arg type="java.lang.Integer"><SystemProperty name="jetty.jmxrmiport" default="1090"/></Arg> - <Arg type="java.lang.String">/jndi/rmi://<SystemProperty name="jetty.jmxrmihost" default="localhost"/>:<SystemProperty name="jetty.jmxrmiport" default="1099"/>/jmxrmi</Arg> - </New> - </Arg> - <Arg>org.eclipse.jetty.jmx:name=rmiconnectorserver</Arg> - <Call name="start" /> - </New>--> - - <!-- =========================================================== --> - <!-- Http Configuration. --> - <!-- This is a common configuration instance used by all --> - <!-- connectors that can carry HTTP semantics (HTTP, HTTPS, SPDY)--> - <!-- It configures the non wire protocol aspects of the HTTP --> - <!-- semantic. --> - <!-- --> - <!-- Consult the javadoc of o.e.j.server.HttpConfiguration --> - <!-- for all configuration that may be set here. --> - <!-- =========================================================== --> - <New id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration"> - <Set name="secureScheme">https</Set> - <Set name="securePort"><Property name="jetty.secure.port" default="8443" /></Set> - <Set name="outputBufferSize"><Property name="jetty.output.buffer.size" default="32768" /></Set> - <Set name="requestHeaderSize"><Property name="jetty.request.header.size" default="8192" /></Set> - <Set name="responseHeaderSize"><Property name="jetty.response.header.size" default="8192" /></Set> - <Set name="sendServerVersion"><Property name="jetty.send.server.version" default="true" /></Set> - <Set name="sendDateHeader"><Property name="jetty.send.date.header" default="false" /></Set> - <Set name="headerCacheSize">512</Set> - <!-- Uncomment to enable handling of X-Forwarded- style headers - <Call name="addCustomizer"> - <Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg> - </Call> - --> - </New> - - <!-- Configure the HTTP endpoint. --> - <Call name="addConnector"> - <Arg> - <New class="org.eclipse.jetty.server.ServerConnector"> - <Arg name="server"><Ref refid="Server" /></Arg> - <Arg name="factories"> - <Array type="org.eclipse.jetty.server.ConnectionFactory"> - <Item> - <New class="org.eclipse.jetty.server.HttpConnectionFactory"> - <Arg name="config"><Ref refid="httpConfig" /></Arg> - </New> - </Item> - </Array> - </Arg> - <Set name="host"><Property name="jetty.host" /></Set> - <Set name="port"><Property name="jetty.port" default="8080" /></Set> - <Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set> - </New> - </Arg> - </Call> - - <!-- =========================================================== --> - <!-- Set handler Collection Structure --> - <!-- =========================================================== --> - <!-- Recommended approach: does not work for HA CI test suite. - <Set name="handler"> - <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> - <Set name="handlers"> - <Array type="org.eclipse.jetty.server.Handler"> - <Item> - <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="war"> - <SystemProperty name="jetty.resourceBase" default="bigdata-war/src" /> - </Set> - <Set name="contextPath">/bigdata</Set> - <Set name="descriptor">WEB-INF/web.xml</Set> - <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> - </New> - </Item> - </Array> - </Set> - </New> - </Set> --> - <Set name="handler"> - <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> - <Set name="handlers"> - <Array type="org.eclipse.jetty.server.Handler"> - <Item> - <!-- This is the bigdata web application. --> - <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= node['bigdata'][:jetty_dir] %>" /> - </Set> - <Set name="contextPath">/bigdata</Set> - <Set name="descriptor">WEB-INF/web.xml</Set> - <Set name="descriptor"><%= node['bigdata'][:jetty_dir] %>/WEB-INF/web.xml</Set> - <Set name="parentLoaderPriority">true</Set> - <Set name="extractWAR">false</Set> - </New> - </Item> - <Item> - <!-- This appears to be necessary in addition to the above. --> - <!-- Without this, it will not resolve http://localhost:8080/ --> - <!-- and can fail to deliver some of the static content. --> - <New id="ResourceHandler" class="org.eclipse.jetty.server.handler.ResourceHandler"> - <Set name="resourceBase"> - <!-- The location of the top-level of the bigdata webapp. --> - <Property name="jetty.resourceBase" default="<%= node['bigdata'][:jetty_dir] %>" /> - </Set> - <Set name="welcomeFiles"> - <Array type="java.lang.String"> - <Item>html/index.html</Item> - </Array> - </Set> - </New> - </Item> - <!-- <Item> - <New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"></New> - </Item> --> - </Array> - </Set> - </New> - </Set> - - <!-- =========================================================== --> - <!-- extra server options --> - <!-- =========================================================== --> - <Set name="stopAtShutdown">true</Set> - <Set name="stopTimeout">5000</Set> - <Set name="dumpAfterStart"><Property name="jetty.dump.start" default="false"/></Set> - <Set name="dumpBeforeStop"><Property name="jetty.dump.stop" default="false"/></Set> - -</Configure> Added: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn (rev 0) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn 2014-05-22 15:58:30 UTC (rev 8410) @@ -0,0 +1,212 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +# +# Vagraant.aws.ha3 - Install the Bigdata High Availability Server with 3 Nodes with an AWS Provider +# +# This vagrant file is meant to be launched by the bin/createCluster.sh script. +# +# The launch synopsis for this Vagrantfile: +# +# % bin/createCluster.sh +# +ENV['VAGRANT_DEFAULT_PROVIDER'] = 'aws' + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + # All Vagrant configuration is done here. The most common configuration + # options are documented and commented below. For a complete reference, + # please see the online documentation at vagrantup.com. + +config.vm.define :bigdataA do |bigdataA| + bigdataA.vm.box = "dummy" + bigdataA.vm.hostname = ENV['BIGDATA_HA_HOST_A'] + + bigdataA.berkshelf.enabled = true + + bigdataA.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_A'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataA.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha", + :build_from_svn => true + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataA.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[bigdata::ssd]", + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataA + + +config.vm.define :bigdataB do |bigdataB| + bigdataB.vm.box = "dummy" + bigdataB.vm.hostname = ENV['BIGDATA_HA_HOST_B'] + + bigdataB.berkshelf.enabled = true + + bigdataB.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_B'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataB.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha", + :build_from_svn => true + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataB.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[bigdata::ssd]", + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataB + + +config.vm.define :bigdataC do |bigdataC| + bigdataC.vm.box = "dummy" + bigdataC.vm.hostname = ENV['BIGDATA_HA_HOST_C'] + + bigdataC.berkshelf.enabled = true + + bigdataC.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_C'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataC.vm.provision :chef_solo do |chef| + chef.json = { + :bigdata => { + :install_flavor => "ha", + :build_from_svn => true + }, + :java => { + :install_flavor => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataC.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[bigdata::ssd]", + "recipe[bigdata::high_availability]" + ] + + end + +end # bigdataC + +end Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh 2014-05-22 15:58:30 UTC (rev 8410) @@ -8,7 +8,7 @@ rm .aws_security_group vagrant up # -# Occassionally AWS has timeout issues. If this occurs, launch the cluster instances individually: +# Occassionally, usually during svn based builds, AWS has timeout issues. If this occurs, launch the cluster instances individually: # # vagrant up bigdataA # echo "\nbigdataA is up\n" Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-05-22 15:58:30 UTC (rev 8410) @@ -94,6 +94,6 @@ # status, stdin, stderr = ssh_client.run( "sudo service bigdataHA restart" ) # host.reboot() - print "The hosts are now rebooting, this may take several minutes. \nOnce back up, you may confirm status by visiting:\n" + print "The bigdata HA service is now restarting, this may take several minutes. \nOnce back up, you may confirm status by visiting:\n" for host in bigdataHosts: print "\thttp://" + host.__dict__['ip_address'] + ":8080/bigdata/status\n" Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat 2014-05-22 15:58:30 UTC (rev 8410) @@ -42,7 +42,8 @@ chef.json = { :bigdata => { :install_flavor => "tomcat", - :build_from_svn => false + :build_from_svn => false, + # :base_version => "1.3.0" }, :java => { "install_flavor" => "oracle", Modified: branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/metadata.rb =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-05-22 15:58:30 UTC (rev 8410) @@ -4,7 +4,7 @@ license 'GNU GPLv2' description 'Installs/Configures Systap Bigdata High Availability' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) -version '0.1.3' +version '0.1.4' depends 'apt' depends 'java', '>= 1.22.0' Modified: branches/DEPLOYMENT_BRANCH_1_3_1/build.xml =================================================================== --- branches/DEPLOYMENT_BRANCH_1_3_1/build.xml 2014-05-21 18:53:16 UTC (rev 8409) +++ branches/DEPLOYMENT_BRANCH_1_3_1/build.xml 2014-05-22 15:58:30 UTC (rev 8410) @@ -1366,6 +1366,7 @@ <tarfileset dir="${bigdata.dir}/dist"> <include name="bigdata/doc/**" /> + <exclude name="bigdata/doc/api/**" /> <exclude name="bigdata/doc/HAJournalServer.html" /> <include name="bigdata/lib/**" /> <exclude name="bigdata/lib/bigdata-ganglia.jar" /> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |