|
[Bigdata-commit] SF.net SVN: bigdata:[8415]
branches/BIGDATA_RELEASE_1_3_0/src/resources/ deployment
From: <tho...@us...> - 2014-05-23 15:53:41
|
Revision: 8415
http://sourceforge.net/p/bigdata/code/8415
Author: thompsonbry
Date: 2014-05-23 15:53:35 +0000 (Fri, 23 May 2014)
Log Message:
-----------
Fix to broken commit for #941.
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4jHA.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/zoo.cfg.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createSecurityGroup.py
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Berksfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Gemfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Thorfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.mapgraph
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/aws.rc
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/chefignore
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Berksfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Gemfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Thorfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/chefignore
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,50 @@
+require "formula"
+
+class Bigdata < Formula
+ homepage "http://bigdata.com/"
+# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz"
+# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2"
+ url "http://bigdata.com/deploy/bigdata-1.3.1.tgz"
+ sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79"
+
+ def install
+ prefix.install "doc", "var", "bin"
+ libexec.install Dir["lib/*.jar"]
+
+ File.rename "#{bin}/bigdataNSS", "#{bin}/bigdata"
+
+ # Set the installation path as the root for the bin scripts:
+ inreplace "#{bin}/bigdata" do |s|
+ s.sub! "<%= BD_HOME %>", prefix
+ s.sub! "<%= INSTALL_TYPE %>", "BREW"
+ end
+
+ # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data):
+ inreplace "#{prefix}/var/jetty/WEB-INF/RWStore.properties", "<%= BD_HOME %>", prefix
+
+ # Set the installation path as the root for log files (<bigdata_home>/log):
+ inreplace "#{prefix}/var/jetty/WEB-INF/classes/log4j.properties", "<%= BD_HOME %>", prefix
+ end
+
+ plist_options :startup => 'true', :manual => 'bigdata start'
+
+ def plist; <<-EOS.undent
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+ <plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>#{plist_name}</string>
+ <key>Program</key>
+ <string>#{bin}/bigdata</string>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>WorkingDirectory</key>
+ <string>#{prefix}</string>
+ </dict>
+ </plist>
+ EOS
+ end
+
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,186 @@
+Bigdata Cookbook
+================
+The Bigdata cookbook provides the [bigdata v1.3.1](http://www.bigdata.com/) opensource triplestore/graph database. The cookbook provides recipes to install the Bigdata server as a web application under Tomcat, with its own embedded Jetty server (NSS - the NanoSparqlServer). The recipes will install pre-configured packages by node and optionally may build and install the server directly from source archive.
+
+For more info on Bigdata please visit:
+
+* Bigdata Homepage: [http://www.bigdata.com/](http://www.bigdata.com/)
+* Bigdata SourceForge Page: [http://sourceforge.net/projects/bigdata/](http://sourceforge.net/projects/bigdata/)
+
+Requirements
+------------
+Chef 11 or higher<br/>
+Ruby 1.9 (preferably from the Chef full-stack installer)
+
+
+
+Attributes
+----------
+
+### General Attributes
+
+`node['bigdata'][:home]` - The root directory for bigdata contents (Default: `/var/lib/bigdata`)
+
+`node['bigdata'][:url]` - Where to download the bigdata package file from. (Defaults: Tomcat: http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.1/bigdata.war / NSS: http://bigdata.com/deploy/bigdata-1.3.1.tgz)
+
+`node['bigdata'][:data_dir]`
+ - Where the bigdata.jnl resides. Discussed in <a href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer#Common_Startup_Problems">Common Startup Problmems</a>
+ (Defaults: Tomcat: `node['bigdata'][:home]`/data / NSS: `node['bigdata'][:home]`/var/data)
+
+`node['bigdata'][:log_dir]` - Where bigdata log files should reside (i.e. queryLog.csv, rules.log, queryRunStateLog.csv). (Default: Tomcat: `node['bigdata'][:home]`/var/log / NSS: `node['bigdata'][:home]`/var/log)
+
+`node['bigdata'][:properties]` - File path to the Bigdata properties file. (Default: `node['bigdata'][:home]`/RWStore.properties)
+
+`node['bigdata'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: Tomcat: https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA\_RELEASE\_1\_3\_0 / NSS: https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT\_BRANCH\_1\_3\_1)
+
+`node['bigdata'][:source]` - The directory to retrieve Subversion contents into. (Default: bigdata-code)
+
+`node['bigdata']['journal.AbstractJournal.bufferMode']` - Journal Buffer Mode (Default: DiskRW)
+
+`node['bigdata']['service.AbstractTransactionService.minReleaseAge']` - Minimum Release Age (Default: 1)
+
+`node['bigdata']['btree.writeRetentionQueue.capacity']` - Writing retention queue length. (Default: 4000)
+
+`node['bigdata']['btree.BTree.branchingFactor']` - Branching factor for the journal's B-Tree. (Default: 128)
+
+`node['bigdata']['journal.AbstractJournal.initialExtent']` - Journal's initial extent (Default: 209715200)
+
+`node['bigdata']['journal.AbstractJournal.maximumExtent']` - Journal's maximum extent (Default: 209715200)
+
+`node['bigdata']['rdf.sail.truthMaintenance']` - Switch Truth Maintenance on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.quads']` - Switch Quads Mode on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers']` - Switch statement identifiers on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.textIndex']` - Switch text indexing on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.axiomsClass']` - The class to handle RDF axioms. (Default: com.bigdata.rdf.axioms.NoAxioms)
+
+`node['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's Lexical B-Tree. (Default:- 400)
+
+`node['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's SPO B-Tree. (Default: 1024)
+
+`node['bigdata']['rdf.sail.bufferCapacity']` - The number of statements to buffer before committing triples to the persistence layer. (Default: 100000)
+
+### Attributes for Tomcat Based Install
+
+`node['bigdata'][:web_home]` - The web application root directory for bigdata. (Default `node['tomcat'][:webapp_dir]`/bigdata)
+
+`node['bigdata'][:log4j_properties]` - File path to the log4j properties file. (Default `node['bigdata'][:web_home]`/WEB-INF/classes/log4j.properties)
+
+### Attributes for NanoSparqlServer (NSS) Based Install
+
+`node['bigdata'][:user]` - The user to install and run bigdata under. (Default: `bigdata`)
+
+`node['bigdata'][:group]` - The group to install and run bigdata under. (Default: `bigdata`)
+
+`node['bigdata'][:jetty_dir]` - The Jetty root directory. (Default: `node['bigdata'][:home]`/var/jetty)
+
+### Attributes for MapGraph
+
+`node['mapgraph'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: https://svn.code.sf.net/p/mpgraph/code/trunk)
+
+`node['mapgraph'][:source]` - The directory to retrieve Subversion contents into. (Default: mapgraph-code )
+
+
+Recipes
+-------
+
+A node recipe is not provided by the Bigdata cookbook. The user is given the option to install the Bigdata server under Tomcat or as a Jetty application. Under both options, Bigdata may optinally be built directly from the a Subversion source code branch.
+
+### tomcat
+
+Installs the [Tomcat](http://tomcat.apache.org/) server and then bigdata as a web application. Bigdata will be configured according to the attributes. If no attributes are given, Bigdata will be installed with the systems nodes.
+
+If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute.
+
+### nss
+
+Installs the Bigdata server to run in the [NanoSparqlServer](http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer) (Jetty) mode.
+
+
+If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute.
+
+
+### mapgraph
+
+Retrieves the [MapGraph](http://sourceforge.net/projects/mpgraph/) project from its Subversion archive at SourceForget and builds it.
+This recipe can only be used with GPU architecture and has only been validated against Amazon's "NVIDIA GRID GPU Driver" AMI.
+
+
+Usage
+-----
+
+
+### Vagrant Context
+
+Sample Vagrant configurations are available in the Bigdata Subversion source tree under [bigdata/src/resources/deployment/vagrant](http://sourceforge.net/p/bigdata/code/HEAD/tree/branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/).
+
+#### Tomcat Example
+
+
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "tomcat",
+ :build_from_svn => true,
+ :svn_branch => "https://svn.code.sf.net/p/bigdata/code/branches/BTREE_BUFFER_BRANCH/"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :tomcat => {
+ :base_version => "7"
+ }
+ }
+
+ chef.run_list = [
+ ...
+ "recipe[bigdata::tomcat]"
+ ...
+ ]
+
+
+
+#### NSS Example
+
+
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "nss"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ }
+ }
+
+ chef.run_list = [
+ ...
+ "recipe[bigdata::nss]"
+ ...
+ ]
+
+
+### Trouble Shooting
+
+The Bigdta cookbook recipes have been tested thoroughly in the Vagrant context with VirtualBox and AWS providers using Ubuntu 12.04 and Oracle's JDK 7.
+
+When errors occur in the Vagrant context, it is most typically during the installation process where a network timeout has occurred during the retrieval of a dependent resource. simply continue with:
+
+ % vagrant provision
+
+Which should get past any intermit ant network issues. For assistance with installation and other issues, please visit the [Bigdata Support Forum](http://sourceforge.net/p/bigdata/discussion/676946).
+
+
+License and Authors
+-------------------
+Author:: Daniel Mekonnen [daniel<no-spam-at>systap.com]
+
+
+GNU GPLv2 - This pakcage may be resiributed under the same terms and conditions as the Bigdata project that it is a part of.
+
+ http://www.gnu.org/licenses/gpl-2.0.html
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,157 @@
+#
+# Where bigdata resource files will be installed:
+#
+default['bigdata'][:home] = "/var/lib/bigdata"
+
+#
+# Who runs bigdata? This is applicable to NSS and HA installs only:
+#
+default['bigdata'][:user] = "bigdata"
+default['bigdata'][:group] = "bigdata"
+default['bigdata'][:base_version] = "1.3.1"
+
+#
+# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory:
+#
+default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code"
+
+
+case node['bigdata'][:install_flavor]
+when "nss"
+ # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer:
+ default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+when "tomcat"
+ # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7:
+ default['tomcat'][:base_version] = 7
+
+ # JRE options options to set for Tomcat, the following is strongly recommended:
+ default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC"
+
+ # A SourceForge URL to use for downloading the bigdata.war file:
+ default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war"
+
+ # Where the bigdata contents reside under Tomcat:
+ default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata"
+
+ # Where the log4j.properites file can be found:
+ default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ end
+when "ha"
+ # The URL to the bigdataHA release bundle.
+ default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Name of the federation of services (controls the Apache River GROUPS).
+ default['bigdata'][:fedname] = 'my-cluster-1'
+
+ # Name of the replication cluster to which this HAJournalServer will belong.
+ default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
+
+ # Where to find the Apache River service registrars (can also use multicast).
+ default['bigdata'][:river_locator1] = 'bigdataA'
+ default['bigdata'][:river_locator2] = 'bigdataB'
+ default['bigdata'][:river_locator3] = 'bigdataC'
+
+ # Where to find the Apache Zookeeper ensemble.
+ default['bigdata'][:zk_server1] = 'bigdataA'
+ default['bigdata'][:zk_server2] = 'bigdataB'
+ default['bigdata'][:zk_server3] = 'bigdataC'
+end
+
+
+###################################################################################
+#
+# Set the RWStore.properties attributes that apply for all installation scenarios.
+#
+###################################################################################
+
+# Where the RWStore.properties file can be found:
+default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties"
+
+
+default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW"
+
+# Setup for the RWStore recycler rather than session protection.
+default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1"
+
+default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000"
+default['bigdata']['btree.BTree.branchingFactor'] = "128"
+
+# 200M initial extent.
+default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200"
+default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200"
+
+# Setup for QUADS mode without the full text index.
+default['bigdata']['rdf.sail.truthMaintenance'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms"
+
+# Bump up the branching factor for the lexicon indices on the default kb.
+default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400"
+
+# Bump up the branching factor for the statement indices on the default kb.
+default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024"
+default['bigdata']['rdf.sail.bufferCapacity'] = "100000"
+
+#
+# Bigdata supports over a hundred properties and only the most commonly configured
+# are set here as Chef attributes. Any number of additional properties may be
+# configured by Chef. To do so, add the desired property in this (attributes/default.rb)
+# file as well as in the templates/default/RWStore.properties.erb file. The
+# "vocabularyClass" property (below) for inline URIs is used as example additional
+# entry:
+#
+# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass"
+
+
+#################################################################
+#
+# The following attributes are defaults for the MapGraph recipe.
+#
+#################################################################
+
+# The subversion branch to use when building from source:
+default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk"
+
+# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory:
+default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code"
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,157 @@
+#
+# Where bigdata resource files will be installed:
+#
+default['bigdata'][:home] = "/var/lib/bigdata"
+
+#
+# Who runs bigdata? This is applicable to NSS and HA installs only:
+#
+default['bigdata'][:user] = "bigdata"
+default['bigdata'][:group] = "bigdata"
+default['bigdata'][:base_version] = "1.3.1"
+
+#
+# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory:
+#
+default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code"
+
+
+case node['bigdata'][:install_flavor]
+when "nss"
+ # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer:
+ default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+when "tomcat"
+ # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7:
+ default['tomcat'][:base_version] = 7
+
+ # JRE options options to set for Tomcat, the following is strongly recommended:
+ default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC"
+
+ # A SourceForge URL to use for downloading the bigdata.war file:
+ default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war"
+
+ # Where the bigdata contents reside under Tomcat:
+ default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata"
+
+ # Where the log4j.properites file can be found:
+ default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ end
+when "ha"
+ # The URL to the bigdataHA release bundle.
+ default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Name of the federation of services (controls the Apache River GROUPS).
+ default['bigdata'][:fedname] = 'my-cluster-1'
+
+ # Name of the replication cluster to which this HAJournalServer will belong.
+ default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
+
+ # Where to find the Apache River service registrars (can also use multicast).
+ default['bigdata'][:river_locator1] = 'bigdataA'
+ default['bigdata'][:river_locator2] = 'bigdataB'
+ default['bigdata'][:river_locator3] = 'bigdataC'
+
+ # Where to find the Apache Zookeeper ensemble.
+ default['bigdata'][:zk_server1] = 'bigdataA'
+ default['bigdata'][:zk_server2] = 'bigdataB'
+ default['bigdata'][:zk_server3] = 'bigdataC'
+end
+
+
+###################################################################################
+#
+# Set the RWStore.properties attributes that apply for all installation scenarios.
+#
+###################################################################################
+
+# Where the RWStore.properties file can be found:
+default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties"
+
+
+default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW"
+
+# Setup for the RWStore recycler rather than session protection.
+default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1"
+
+default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000"
+default['bigdata']['btree.BTree.branchingFactor'] = "128"
+
+# 200M initial extent.
+default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200"
+default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200"
+
+# Setup for QUADS mode without the full text index.
+default['bigdata']['rdf.sail.truthMaintenance'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms"
+
+# Bump up the branching factor for the lexicon indices on the default kb.
+default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400"
+
+# Bump up the branching factor for the statement indices on the default kb.
+default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024"
+default['bigdata']['rdf.sail.bufferCapacity'] = "100000"
+
+#
+# Bigdata supports over a hundred properties and only the most commonly configured
+# are set here as Chef attributes. Any number of additional properties may be
+# configured by Chef. To do so, add the desired property in this (attributes/default.rb)
+# file as well as in the templates/default/RWStore.properties.erb file. The
+# "vocabularyClass" property (below) for inline URIs is used as example additional
+# entry:
+#
+# defa...
[truncated message content] |
|
[Bigdata-commit] SF.net SVN: bigdata:[8423]
branches/BIGDATA_RELEASE_1_3_0/src/resources/ deployment
From: <dme...@us...> - 2014-05-27 18:29:31
|
Revision: 8423
http://sourceforge.net/p/bigdata/code/8423
Author: dmekonnen
Date: 2014-05-27 18:29:25 +0000 (Tue, 27 May 2014)
Log Message:
-----------
Set RWSTore.properties path correctly for HA
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-27 13:28:02 UTC (rev 8422)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-27 18:29:25 UTC (rev 8423)
@@ -2,8 +2,6 @@
class Bigdata < Formula
homepage "http://bigdata.com/"
-# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz"
-# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2"
url "http://bigdata.com/deploy/bigdata-1.3.1.tgz"
sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79"
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-27 13:28:02 UTC (rev 8422)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-27 18:29:25 UTC (rev 8423)
@@ -15,6 +15,8 @@
#
default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code"
+# Where the RWStore.properties file can be found:
+default['bigdata'][:properties] = node['bigdata'][:home] + "/RWStore.properties"
case node['bigdata'][:install_flavor]
when "nss"
@@ -45,7 +47,7 @@
default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war"
# Where the bigdata contents reside under Tomcat:
- default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata"
+ default['bigdata'][:web_home] = node['tomcat'][:webapp_dir] + "/bigdata"
# Where the log4j.properites file can be found:
default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties"
@@ -79,6 +81,9 @@
# Where the jetty resourceBase is defined:
default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+ # Where the RWStore.properties file can be found:
+ default['bigdata'][:properties] = node['bigdata'][:jetty_dir] + "/WEB-INF/RWStore.properties"
+
# Name of the federation of services (controls the Apache River GROUPS).
default['bigdata'][:fedname] = 'my-cluster-1'
@@ -103,10 +108,7 @@
#
###################################################################################
-# Where the RWStore.properties file can be found:
-default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties"
-
default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW"
# Setup for the RWStore recycler rather than session protection.
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-27 13:28:02 UTC (rev 8422)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-27 18:29:25 UTC (rev 8423)
@@ -167,7 +167,7 @@
#
execute "set absolute path to RWStore.properties" do
cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF"
- command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:jetty_dir]}/WEB-INF/RWStore.properties|' web.xml"
+ command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:properties]}|' web.xml"
end
#
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
[Bigdata-commit] SF.net SVN: bigdata:[8432]
branches/BIGDATA_RELEASE_1_3_0/src/resources/ deployment
From: <dme...@us...> - 2014-06-02 05:11:13
|
Revision: 8432
http://sourceforge.net/p/bigdata/code/8432
Author: dmekonnen
Date: 2014-06-02 05:11:05 +0000 (Mon, 02 Jun 2014)
Log Message:
-----------
Updates for cluster HA3 deployment to a VirtualBox provider
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -91,14 +91,18 @@
default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
# Where to find the Apache River service registrars (can also use multicast).
- default['bigdata'][:river_locator1] = 'bigdataA'
- default['bigdata'][:river_locator2] = 'bigdataB'
- default['bigdata'][:river_locator3] = 'bigdataC'
+ default['bigdata'][:river_locator1] = '33.33.33.10'
+ default['bigdata'][:river_locator2] = '33.33.33.11'
+ default['bigdata'][:river_locator3] = '33.33.33.12'
# Where to find the Apache Zookeeper ensemble.
default['bigdata'][:zk_server1] = 'bigdataA'
default['bigdata'][:zk_server2] = 'bigdataB'
default['bigdata'][:zk_server3] = 'bigdataC'
+
+ # set the JVM_OPTS as used by startHAService
+ default['bigdata'][:java_options] = "-server -Xmx4G -XX:MaxDirectMemorySize=3000m"
+ # default['bigdata'][:java_options] = "-server -Xmx4G -XX:MaxDirectMemorySize=3000m -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1046"
end
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -4,7 +4,7 @@
license 'GNU GPLv2'
description 'Installs/Configures Systap Bigdata High Availability'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
-version '0.1.4'
+version '0.1.5'
depends 'apt'
depends 'java', '>= 1.22.0'
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -106,7 +106,7 @@
user 'root'
group 'root'
cwd "#{node['bigdata'][:home]}/etc/init.d"
- command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\&1 \&\"|' bigdataHA"
+ command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\\&1\"|' bigdataHA"
end
execute "Change SystemProperty to Property in the 'host' attribute of jetty.xml" do
@@ -184,7 +184,7 @@
# Copy the /etc/default/bigdataHA template:
#
template "/etc/default/bigdataHA" do
- source "default/bigdataHA.erb"
+ source "etc/default/bigdataHA.erb"
user 'root'
group 'root'
mode 00644
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/etc/default/bigdataHA.erb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -0,0 +1,62 @@
+# Environment for bigdata HA services.
+#
+# binDir - The directory containing the installed scripts.
+# pidFile - The pid is written on this file.
+#
+# Note: You MUST provide the location of the executable scripts and the
+# pid file that is written by $binDir/startHAServices. These SHOULD be
+# absolute path names.
+
+BD_USER="<%= node['bigdata'][:user] %>"
+BD_GROUP="<%= node['bigdata'][:group] %>"
+
+binDir=<%= node['bigdata'][:home] %>/bin
+pidFile=<%= node['bigdata'][:home] %>/var/lock/pid
+
+##
+# ServiceStarter JVM options.
+#
+# The ServiceStarter is launched as a JVM with the following JVM options.
+# The other services (including the HAJournalServer) will run inside of
+# this JVM. This is where you specify the size of the Java heap and the
+# size of the direct memory heap (used for the write cache buffers and
+# some related things).
+##
+export JVM_OPTS="<%= node['bigdata'][:java_options] %>"
+
+##
+# The following variables configure the startHAServices script, which
+# passes them through to HAJournal.config.
+##
+
+# Name of the bigdata gederation of services. Override for real install.
+export FEDNAME=<%= node['bigdata'][:fedname] %>
+
+# This is different for each HA replication cluster in the same federation
+# of services. If you have multiple such replication cluster, then just
+# given each such cluster its own name.
+export LOGICAL_SERVICE_ID=<%= node['bigdata'][:logical_service_id] %>
+
+# Local directory where the service will store its state.
+export FED_DIR=<%= node['bigdata'][:home] %>
+export DATA_DIR=<%= node['bigdata'][:data_dir] %>
+
+# Apache River - NO default for "LOCATORS".
+export GROUPS="${FEDNAME}"
+export LOCATORS="jini://<%= node['bigdata'][:river_locator1] %>/,jini://<%= node['bigdata'][:river_locator2] %>/,jini://<%= node['bigdata'][:river_locator3] %>/"
+
+# Apache ZooKeeper - NO default.
+export ZK_SERVERS="<%= node['bigdata'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>"
+
+
+# All of these have defaults. Override as necessary.
+#export REPLICATION_FACTOR=3
+#export HA_PORT=9090
+#export JETTY_PORT=8080
+#export JETTY_XML=var/jetty/jetty.xml
+#export JETTY_RESOURCE_BASE=var/jetty/html
+#export COLLECT_QUEUE_STATISTICS=
+#export COLLECT_PLATFORM_STATISTICS=
+#export GANGLIA_REPORT=
+#export GANGLIA_LISTENER=
+#export SYSSTAT_DIR=
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py 2014-06-02 05:11:05 UTC (rev 8432)
@@ -37,10 +37,13 @@
def createJiniLocatorsSubstitution():
locators = ""
+ vbHostAddresses = [ "33.33.33.10", "33.33.33.11", "33.33.33.12" ]
+ index = 0
for host in hostMap:
- locators = locators + "jini://" + hostMap[host] + "/,"
+ locators = locators + "sudo sed -i 's|" + vbHostAddresses[index] + "|" + hostMap[host] + "|' /etc/default/bigdataHA ;"
+ index = index + 1
locators = locators[:-1]
- return "sudo sed -i 's|%JINI_LOCATORS%|" + locators + "|' /etc/default/bigdataHA"
+ return locators
if __name__ == '__main__':
@@ -60,7 +63,7 @@
group = ec2conn.get_all_security_groups( private_security_group_name )[0]
jini_locators = createJiniLocatorsSubstitution()
- # print "JINI_LOCATORS = " + jini_locators
+ print "JINI_LOCATORS = " + jini_locators
i = 1
for host in bigdataHosts:
@@ -87,9 +90,9 @@
# startHAServices does not exit as expected, so remote restart commands will hang.
# As a work around, we restart the host:
#
- print "Running: sudo /etc/init.d/zookeeper-server restart on host ", host
+ # print "Running: sudo /etc/init.d/zookeeper-server restart on host ", host
status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/zookeeper-server restart" )
- print "Running: sudo /etc/init.d/bigdata restart on host ", host
+ # print "Running: sudo /etc/init.d/bigdata restart on host ", host
status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/bigdataHA restart" )
# status, stdin, stderr = ssh_client.run( "sudo service bigdataHA restart" )
# host.reboot()
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -4,7 +4,7 @@
license 'GNU GPLv2'
description 'Installs/Configures Systap Bigdata High Availability'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
-version '0.1.4'
+version '0.1.5'
depends 'apt'
depends 'java', '>= 1.22.0'
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha3 2014-06-02 05:11:05 UTC (rev 8432)
@@ -0,0 +1,146 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# Vagraant.ha3 - Install the Bigdata High Availability Server with 3 Nodes with an VirtualBox Provider
+#
+# The launch synopsis for this Vagrantfile:
+#
+# % vagrant up
+# % vagrant halt
+# % vagrant up
+#
+# The "halt" and following "up" forces a restart of the services post-installation.
+# This is a temporary requirement until recipes are upated.
+
+Vagrant.require_plugin "vagrant-berkshelf"
+
+Vagrant.configure("2") do |config|
+
+config.vm.provider :virtualbox do |vb|
+ vb.customize ["modifyvm", :id, "--memory", "2048"]
+end
+
+script = <<SCRIPT
+ apt-get update
+ apt-get install -y curl
+ curl -L https://www.opscode.com/chef/install.sh | bash
+ mkdir -p /var/lib/zookeeper
+ echo "33.33.33.10 bigdataA" >> /etc/hosts
+ echo "33.33.33.11 bigdataB" >> /etc/hosts
+ echo "33.33.33.12 bigdataC" >> /etc/hosts
+SCRIPT
+
+$scriptA = "#{script}\n\techo 1 > /var/lib/zookeeper/myid\n"
+config.vm.define :bigdataA do |bigdataA|
+
+ bigdataA.vm.hostname = "bigdataA"
+ bigdataA.vm.box = "precise64"
+
+ bigdataA.berkshelf.enabled = true
+
+ bigdataA.vm.box_url = "http://files.vagrantup.com/precise64.box"
+
+ bigdataA.vm.network :private_network, ip: "33.33.33.10"
+
+ bigdataA.vm.provision :chef_solo do |chef|
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "ha"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :zookeeper => {
+ :zoocfg => { :clientPort=> '2081' }
+ }
+ }
+
+ bigdataA.vm.provision :shell, inline: $scriptA
+
+ chef.run_list = [
+ "recipe[bigdata::high_availability]"
+ ]
+
+ end
+
+end # bigdataA
+
+
+$scriptB = "#{script}\n\techo 2 > /var/lib/zookeeper/myid\n"
+config.vm.define :bigdataB do |bigdataB|
+
+ bigdataB.vm.hostname = "bigdataB"
+ bigdataB.vm.box = "precise64"
+
+ bigdataB.berkshelf.enabled = true
+
+ bigdataB.vm.box_url = "http://files.vagrantup.com/precise64.box"
+
+ bigdataB.vm.network :private_network, ip: "33.33.33.11"
+
+ bigdataB.vm.provision :chef_solo do |chef|
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "ha"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :zookeeper => {
+ :zoocfg => { :clientPort=> '2081' }
+ }
+ }
+
+ bigdataB.vm.provision :shell, inline: $scriptB
+
+ chef.run_list = [
+ "recipe[bigdata::high_availability]"
+ ]
+
+ end
+
+end # bigdataB
+
+
+$scriptC = "#{script}\n\techo 3 > /var/lib/zookeeper/myid\n"
+config.vm.define :bigdataC do |bigdataC|
+
+ bigdataC.vm.hostname = "bigdataC"
+ bigdataC.vm.box = "precise64"
+
+ bigdataC.berkshelf.enabled = true
+
+ bigdataC.vm.box_url = "http://files.vagrantup.com/precise64.box"
+
+ bigdataC.vm.network :private_network, ip: "33.33.33.12"
+
+ bigdataC.vm.provision :chef_solo do |chef|
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "ha"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :zookeeper => {
+ :zoocfg => { :clientPort=> '2081' }
+ }
+ }
+
+ bigdataC.vm.provision :shell, inline: $scriptC
+
+ chef.run_list = [
+ "recipe[bigdata::high_availability]"
+ ]
+
+ end
+
+end # bigdataC
+
+end
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb 2014-06-01 21:52:14 UTC (rev 8431)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb 2014-06-02 05:11:05 UTC (rev 8432)
@@ -4,7 +4,7 @@
license 'GNU GPLv2'
description 'Installs/Configures Systap Bigdata High Availability'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
-version '0.1.3'
+version '0.1.5'
depends 'apt'
depends 'java', '>= 1.22.0'
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
[Bigdata-commit] SF.net SVN: bigdata:[8435]
branches/BIGDATA_RELEASE_1_3_0/src/resources/ deployment
From: <dme...@us...> - 2014-06-02 16:41:57
|
Revision: 8435
http://sourceforge.net/p/bigdata/code/8435
Author: dmekonnen
Date: 2014-06-02 16:41:49 +0000 (Mon, 02 Jun 2014)
Log Message:
-----------
Adding 'replication_factor' attribute to support HA1
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 05:14:19 UTC (rev 8434)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-06-02 16:41:49 UTC (rev 8435)
@@ -90,6 +90,9 @@
# Name of the replication cluster to which this HAJournalServer will belong.
default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
+ # Set the REPLICATION_FACTOR. 1 = HA1, 3 = HA3, etc
+ default['bigdata'][:replication_factor] = 3
+
# Where to find the Apache River service registrars (can also use multicast).
default['bigdata'][:river_locator1] = '33.33.33.10'
default['bigdata'][:river_locator2] = '33.33.33.11'
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.ha1 2014-06-02 16:41:49 UTC (rev 8435)
@@ -0,0 +1,71 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+#
+# Vagraant.ha1 - Install the Bigdata High Availability Server with 1 node with a VirtualBox Provider
+#
+# The launch synopsis for this Vagrantfile:
+#
+# % vagrant up
+# % vagrant halt
+# % vagrant up
+#
+# The "halt" and following "up" forces a restart of the services post-installation.
+# This is a temporary requirement until recipes are upated.
+
+Vagrant.require_plugin "vagrant-berkshelf"
+
+Vagrant.configure("2") do |config|
+
+config.vm.provider :virtualbox do |vb|
+ vb.customize ["modifyvm", :id, "--memory", "2048"]
+end
+
+script = <<SCRIPT
+ apt-get update
+ apt-get install -y curl
+ curl -L https://www.opscode.com/chef/install.sh | bash
+ mkdir -p /var/lib/zookeeper
+ echo "33.33.33.10 bigdataA" >> /etc/hosts
+ echo "33.33.33.11 bigdataB" >> /etc/hosts
+ echo "33.33.33.12 bigdataC" >> /etc/hosts
+SCRIPT
+
+$scriptA = "#{script}\n\techo 1 > /var/lib/zookeeper/myid\n"
+config.vm.define :bigdataA do |bigdataA|
+
+ bigdataA.vm.hostname = "bigdataA"
+ bigdataA.vm.box = "precise64"
+
+ bigdataA.berkshelf.enabled = true
+
+ bigdataA.vm.box_url = "http://files.vagrantup.com/precise64.box"
+
+ bigdataA.vm.network :private_network, ip: "33.33.33.10"
+
+ bigdataA.vm.provision :chef_solo do |chef|
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "ha",
+ :replication_factor => 1
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :zookeeper => {
+ :zoocfg => { :clientPort=> '2081' }
+ }
+ }
+
+ bigdataA.vm.provision :shell, inline: $scriptA
+
+ chef.run_list = [
+ "recipe[bigdata::high_availability]"
+ ]
+
+ end
+
+end # bigdataA
+
+end
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|