|
From: <tho...@us...> - 2014-05-23 15:53:41
|
Revision: 8415
http://sourceforge.net/p/bigdata/code/8415
Author: thompsonbry
Date: 2014-05-23 15:53:35 +0000 (Fri, 23 May 2014)
Log Message:
-----------
Fix to broken commit for #941.
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4jHA.properties.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/zoo.cfg.erb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/Vagrantfile.aws.ha3.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createCluster.sh
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/createSecurityGroup.py
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/bigdata-aws-ha3-launcher/bin/setHosts.py
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Berksfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Gemfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Thorfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.mapgraph
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.nss.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/Vagrantfile.aws.tomcat.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/aws.rc
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/chefignore
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/AWS/metadata.rb
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Berksfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Gemfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Thorfile
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.dual-provider.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.nss.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/Vagrantfile.tomcat.build-from-svn
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/chefignore
branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/vagrant/samples/VirtualBox/metadata.rb
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/brew/bigdata.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,50 @@
+require "formula"
+
+class Bigdata < Formula
+ homepage "http://bigdata.com/"
+# url "http://bigdata.com/deploy/bigdata-1.3.0.tgz"
+# sha1 "c22fa05df965019b3132161507ce0e77a4a1f6e2"
+ url "http://bigdata.com/deploy/bigdata-1.3.1.tgz"
+ sha1 "bcfacd08b1e1c7429d3ca31b8632a20cdff1fb79"
+
+ def install
+ prefix.install "doc", "var", "bin"
+ libexec.install Dir["lib/*.jar"]
+
+ File.rename "#{bin}/bigdataNSS", "#{bin}/bigdata"
+
+ # Set the installation path as the root for the bin scripts:
+ inreplace "#{bin}/bigdata" do |s|
+ s.sub! "<%= BD_HOME %>", prefix
+ s.sub! "<%= INSTALL_TYPE %>", "BREW"
+ end
+
+ # Set the installation path as the root for bigdata.jnl file location (<bigdata_home>/data):
+ inreplace "#{prefix}/var/jetty/WEB-INF/RWStore.properties", "<%= BD_HOME %>", prefix
+
+ # Set the installation path as the root for log files (<bigdata_home>/log):
+ inreplace "#{prefix}/var/jetty/WEB-INF/classes/log4j.properties", "<%= BD_HOME %>", prefix
+ end
+
+ plist_options :startup => 'true', :manual => 'bigdata start'
+
+ def plist; <<-EOS.undent
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN"
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+ <plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>#{plist_name}</string>
+ <key>Program</key>
+ <string>#{bin}/bigdata</string>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>WorkingDirectory</key>
+ <string>#{prefix}</string>
+ </dict>
+ </plist>
+ EOS
+ end
+
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/README.md 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,186 @@
+Bigdata Cookbook
+================
+The Bigdata cookbook provides the [bigdata v1.3.1](http://www.bigdata.com/) opensource triplestore/graph database. The cookbook provides recipes to install the Bigdata server as a web application under Tomcat, with its own embedded Jetty server (NSS - the NanoSparqlServer). The recipes will install pre-configured packages by node and optionally may build and install the server directly from source archive.
+
+For more info on Bigdata please visit:
+
+* Bigdata Homepage: [http://www.bigdata.com/](http://www.bigdata.com/)
+* Bigdata SourceForge Page: [http://sourceforge.net/projects/bigdata/](http://sourceforge.net/projects/bigdata/)
+
+Requirements
+------------
+Chef 11 or higher<br/>
+Ruby 1.9 (preferably from the Chef full-stack installer)
+
+
+
+Attributes
+----------
+
+### General Attributes
+
+`node['bigdata'][:home]` - The root directory for bigdata contents (Default: `/var/lib/bigdata`)
+
+`node['bigdata'][:url]` - Where to download the bigdata package file from. (Defaults: Tomcat: http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/1.3.1/bigdata.war / NSS: http://bigdata.com/deploy/bigdata-1.3.1.tgz)
+
+`node['bigdata'][:data_dir]`
+ - Where the bigdata.jnl resides. Discussed in <a href="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer#Common_Startup_Problems">Common Startup Problmems</a>
+ (Defaults: Tomcat: `node['bigdata'][:home]`/data / NSS: `node['bigdata'][:home]`/var/data)
+
+`node['bigdata'][:log_dir]` - Where bigdata log files should reside (i.e. queryLog.csv, rules.log, queryRunStateLog.csv). (Default: Tomcat: `node['bigdata'][:home]`/var/log / NSS: `node['bigdata'][:home]`/var/log)
+
+`node['bigdata'][:properties]` - File path to the Bigdata properties file. (Default: `node['bigdata'][:home]`/RWStore.properties)
+
+`node['bigdata'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: Tomcat: https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA\_RELEASE\_1\_3\_0 / NSS: https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT\_BRANCH\_1\_3\_1)
+
+`node['bigdata'][:source]` - The directory to retrieve Subversion contents into. (Default: bigdata-code)
+
+`node['bigdata']['journal.AbstractJournal.bufferMode']` - Journal Buffer Mode (Default: DiskRW)
+
+`node['bigdata']['service.AbstractTransactionService.minReleaseAge']` - Minimum Release Age (Default: 1)
+
+`node['bigdata']['btree.writeRetentionQueue.capacity']` - Writing retention queue length. (Default: 4000)
+
+`node['bigdata']['btree.BTree.branchingFactor']` - Branching factor for the journal's B-Tree. (Default: 128)
+
+`node['bigdata']['journal.AbstractJournal.initialExtent']` - Journal's initial extent (Default: 209715200)
+
+`node['bigdata']['journal.AbstractJournal.maximumExtent']` - Journal's maximum extent (Default: 209715200)
+
+`node['bigdata']['rdf.sail.truthMaintenance']` - Switch Truth Maintenance on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.quads']` - Switch Quads Mode on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers']` - Switch statement identifiers on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.textIndex']` - Switch text indexing on/off. (Default: false)
+
+`node['bigdata']['rdf.store.AbstractTripleStore.axiomsClass']` - The class to handle RDF axioms. (Default: com.bigdata.rdf.axioms.NoAxioms)
+
+`node['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's Lexical B-Tree. (Default:- 400)
+
+`node['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor']` - Branching factor for the journal's SPO B-Tree. (Default: 1024)
+
+`node['bigdata']['rdf.sail.bufferCapacity']` - The number of statements to buffer before committing triples to the persistence layer. (Default: 100000)
+
+### Attributes for Tomcat Based Install
+
+`node['bigdata'][:web_home]` - The web application root directory for bigdata. (Default `node['tomcat'][:webapp_dir]`/bigdata)
+
+`node['bigdata'][:log4j_properties]` - File path to the log4j properties file. (Default `node['bigdata'][:web_home]`/WEB-INF/classes/log4j.properties)
+
+### Attributes for NanoSparqlServer (NSS) Based Install
+
+`node['bigdata'][:user]` - The user to install and run bigdata under. (Default: `bigdata`)
+
+`node['bigdata'][:group]` - The group to install and run bigdata under. (Default: `bigdata`)
+
+`node['bigdata'][:jetty_dir]` - The Jetty root directory. (Default: `node['bigdata'][:home]`/var/jetty)
+
+### Attributes for MapGraph
+
+`node['mapgraph'][:svn_branch]` - The Subversion branch to retrieve source files from. (Default: https://svn.code.sf.net/p/mpgraph/code/trunk)
+
+`node['mapgraph'][:source]` - The directory to retrieve Subversion contents into. (Default: mapgraph-code )
+
+
+Recipes
+-------
+
+A node recipe is not provided by the Bigdata cookbook. The user is given the option to install the Bigdata server under Tomcat or as a Jetty application. Under both options, Bigdata may optinally be built directly from the a Subversion source code branch.
+
+### tomcat
+
+Installs the [Tomcat](http://tomcat.apache.org/) server and then bigdata as a web application. Bigdata will be configured according to the attributes. If no attributes are given, Bigdata will be installed with the systems nodes.
+
+If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute.
+
+### nss
+
+Installs the Bigdata server to run in the [NanoSparqlServer](http://wiki.bigdata.com/wiki/index.php/NanoSparqlServer) (Jetty) mode.
+
+
+If the `build_from_svn` attribute is set to `true` Bigdata will be build from the Subversion repository given in the `svn_branch` attribute.
+
+
+### mapgraph
+
+Retrieves the [MapGraph](http://sourceforge.net/projects/mpgraph/) project from its Subversion archive at SourceForget and builds it.
+This recipe can only be used with GPU architecture and has only been validated against Amazon's "NVIDIA GRID GPU Driver" AMI.
+
+
+Usage
+-----
+
+
+### Vagrant Context
+
+Sample Vagrant configurations are available in the Bigdata Subversion source tree under [bigdata/src/resources/deployment/vagrant](http://sourceforge.net/p/bigdata/code/HEAD/tree/branches/DEPLOYMENT_BRANCH_1_3_1/bigdata/src/resources/deployment/vagrant/).
+
+#### Tomcat Example
+
+
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "tomcat",
+ :build_from_svn => true,
+ :svn_branch => "https://svn.code.sf.net/p/bigdata/code/branches/BTREE_BUFFER_BRANCH/"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ },
+ :tomcat => {
+ :base_version => "7"
+ }
+ }
+
+ chef.run_list = [
+ ...
+ "recipe[bigdata::tomcat]"
+ ...
+ ]
+
+
+
+#### NSS Example
+
+
+ chef.json = {
+ :bigdata => {
+ :install_flavor => "nss"
+ },
+ :java => {
+ :install_flavor => "oracle",
+ :jdk_version => "7",
+ :oracle => { 'accept_oracle_download_terms' => true }
+ }
+ }
+
+ chef.run_list = [
+ ...
+ "recipe[bigdata::nss]"
+ ...
+ ]
+
+
+### Trouble Shooting
+
+The Bigdta cookbook recipes have been tested thoroughly in the Vagrant context with VirtualBox and AWS providers using Ubuntu 12.04 and Oracle's JDK 7.
+
+When errors occur in the Vagrant context, it is most typically during the installation process where a network timeout has occurred during the retrieval of a dependent resource. simply continue with:
+
+ % vagrant provision
+
+Which should get past any intermit ant network issues. For assistance with installation and other issues, please visit the [Bigdata Support Forum](http://sourceforge.net/p/bigdata/discussion/676946).
+
+
+License and Authors
+-------------------
+Author:: Daniel Mekonnen [daniel<no-spam-at>systap.com]
+
+
+GNU GPLv2 - This pakcage may be resiributed under the same terms and conditions as the Bigdata project that it is a part of.
+
+ http://www.gnu.org/licenses/gpl-2.0.html
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/attributes/default.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,157 @@
+#
+# Where bigdata resource files will be installed:
+#
+default['bigdata'][:home] = "/var/lib/bigdata"
+
+#
+# Who runs bigdata? This is applicable to NSS and HA installs only:
+#
+default['bigdata'][:user] = "bigdata"
+default['bigdata'][:group] = "bigdata"
+default['bigdata'][:base_version] = "1.3.1"
+
+#
+# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory:
+#
+default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code"
+
+
+case node['bigdata'][:install_flavor]
+when "nss"
+ # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer:
+ default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+when "tomcat"
+ # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7:
+ default['tomcat'][:base_version] = 7
+
+ # JRE options options to set for Tomcat, the following is strongly recommended:
+ default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC"
+
+ # A SourceForge URL to use for downloading the bigdata.war file:
+ default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war"
+
+ # Where the bigdata contents reside under Tomcat:
+ default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata"
+
+ # Where the log4j.properites file can be found:
+ default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ end
+when "ha"
+ # The URL to the bigdataHA release bundle.
+ default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Name of the federation of services (controls the Apache River GROUPS).
+ default['bigdata'][:fedname] = 'my-cluster-1'
+
+ # Name of the replication cluster to which this HAJournalServer will belong.
+ default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
+
+ # Where to find the Apache River service registrars (can also use multicast).
+ default['bigdata'][:river_locator1] = 'bigdataA'
+ default['bigdata'][:river_locator2] = 'bigdataB'
+ default['bigdata'][:river_locator3] = 'bigdataC'
+
+ # Where to find the Apache Zookeeper ensemble.
+ default['bigdata'][:zk_server1] = 'bigdataA'
+ default['bigdata'][:zk_server2] = 'bigdataB'
+ default['bigdata'][:zk_server3] = 'bigdataC'
+end
+
+
+###################################################################################
+#
+# Set the RWStore.properties attributes that apply for all installation scenarios.
+#
+###################################################################################
+
+# Where the RWStore.properties file can be found:
+default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties"
+
+
+default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW"
+
+# Setup for the RWStore recycler rather than session protection.
+default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1"
+
+default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000"
+default['bigdata']['btree.BTree.branchingFactor'] = "128"
+
+# 200M initial extent.
+default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200"
+default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200"
+
+# Setup for QUADS mode without the full text index.
+default['bigdata']['rdf.sail.truthMaintenance'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms"
+
+# Bump up the branching factor for the lexicon indices on the default kb.
+default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400"
+
+# Bump up the branching factor for the statement indices on the default kb.
+default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024"
+default['bigdata']['rdf.sail.bufferCapacity'] = "100000"
+
+#
+# Bigdata supports over a hundred properties and only the most commonly configured
+# are set here as Chef attributes. Any number of additional properties may be
+# configured by Chef. To do so, add the desired property in this (attributes/default.rb)
+# file as well as in the templates/default/RWStore.properties.erb file. The
+# "vocabularyClass" property (below) for inline URIs is used as example additional
+# entry:
+#
+# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass"
+
+
+#################################################################
+#
+# The following attributes are defaults for the MapGraph recipe.
+#
+#################################################################
+
+# The subversion branch to use when building from source:
+default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk"
+
+# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory:
+default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code"
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/default.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,157 @@
+#
+# Where bigdata resource files will be installed:
+#
+default['bigdata'][:home] = "/var/lib/bigdata"
+
+#
+# Who runs bigdata? This is applicable to NSS and HA installs only:
+#
+default['bigdata'][:user] = "bigdata"
+default['bigdata'][:group] = "bigdata"
+default['bigdata'][:base_version] = "1.3.1"
+
+#
+# When "build_from_svn" is "true", code retrieved from subversion will be downloaded to the "source_dir" directory:
+#
+default['bigdata'][:source_dir] = "/home/ubuntu/bigdata-code"
+
+
+case node['bigdata'][:install_flavor]
+when "nss"
+ # The URL to the bigdata-nss bundle. The following is the same bundle used by the Bigdata Brew installer:
+ default['bigdata'][:url] = "http://bigdata.com/deploy/bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/var/log"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/var/data"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+when "tomcat"
+ # The Tomcat version to install. The Bigdata Chef cookbook has only been tested with Version 7:
+ default['tomcat'][:base_version] = 7
+
+ # JRE options options to set for Tomcat, the following is strongly recommended:
+ default['tomcat'][:java_options] = "-Djava.awt.headless=true -server -Xmx4G -XX:+UseG1GC"
+
+ # A SourceForge URL to use for downloading the bigdata.war file:
+ default['bigdata'][:url] = "http://hivelocity.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/bigdata.war"
+
+ # Where the bigdata contents reside under Tomcat:
+ default['bigdata'][:web_home] = default['tomcat'][:webapp_dir] + "/bigdata"
+
+ # Where the log4j.properites file can be found:
+ default['bigdata'][:log4j_properties] = default['bigdata'][:web_home] + "/WEB-INF/classes/log4j.properties"
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ end
+when "ha"
+ # The URL to the bigdataHA release bundle.
+ default['bigdata'][:url] = "http://softlayer-dal.dl.sourceforge.net/project/bigdata/bigdata/#{node['bigdata'][:base_version]}/REL.bigdata-#{node['bigdata'][:base_version]}.tgz"
+
+ # The subversion branch to use when building from source:
+ if node['bigdata'][:build_from_svn]
+ # default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0"
+ default['bigdata'][:svn_branch] = "https://svn.code.sf.net/p/bigdata/code/branches/DEPLOYMENT_BRANCH_1_3_1"
+ end
+
+ # Where the bigdata-ha.jnl file will live:
+ default['bigdata'][:data_dir] = node['bigdata'][:home] + "/data"
+
+ # Where the log files will live:
+ default['bigdata'][:log_dir] = node['bigdata'][:home] + "/log"
+
+ # Where the jetty resourceBase is defined:
+ default['bigdata'][:jetty_dir] = node['bigdata'][:home] + "/var/jetty"
+
+ # Name of the federation of services (controls the Apache River GROUPS).
+ default['bigdata'][:fedname] = 'my-cluster-1'
+
+ # Name of the replication cluster to which this HAJournalServer will belong.
+ default['bigdata'][:logical_service_id] = 'HA-Replication-Cluster-1'
+
+ # Where to find the Apache River service registrars (can also use multicast).
+ default['bigdata'][:river_locator1] = 'bigdataA'
+ default['bigdata'][:river_locator2] = 'bigdataB'
+ default['bigdata'][:river_locator3] = 'bigdataC'
+
+ # Where to find the Apache Zookeeper ensemble.
+ default['bigdata'][:zk_server1] = 'bigdataA'
+ default['bigdata'][:zk_server2] = 'bigdataB'
+ default['bigdata'][:zk_server3] = 'bigdataC'
+end
+
+
+###################################################################################
+#
+# Set the RWStore.properties attributes that apply for all installation scenarios.
+#
+###################################################################################
+
+# Where the RWStore.properties file can be found:
+default['bigdata'][:properties] = default['bigdata'][:home] + "/RWStore.properties"
+
+
+default['bigdata']['journal.AbstractJournal.bufferMode'] = "DiskRW"
+
+# Setup for the RWStore recycler rather than session protection.
+default['bigdata']['service.AbstractTransactionService.minReleaseAge']= "1"
+
+default['bigdata']['btree.writeRetentionQueue.capacity'] = "4000"
+default['bigdata']['btree.BTree.branchingFactor'] = "128"
+
+# 200M initial extent.
+default['bigdata']['journal.AbstractJournal.initialExtent'] = "209715200"
+default['bigdata']['journal.AbstractJournal.maximumExtent'] = "209715200"
+
+# Setup for QUADS mode without the full text index.
+default['bigdata']['rdf.sail.truthMaintenance'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.quads'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.textIndex'] = "false"
+default['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] = "com.bigdata.rdf.axioms.NoAxioms"
+
+# Bump up the branching factor for the lexicon indices on the default kb.
+default['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] = "400"
+
+# Bump up the branching factor for the statement indices on the default kb.
+default['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] = "1024"
+default['bigdata']['rdf.sail.bufferCapacity'] = "100000"
+
+#
+# Bigdata supports over a hundred properties and only the most commonly configured
+# are set here as Chef attributes. Any number of additional properties may be
+# configured by Chef. To do so, add the desired property in this (attributes/default.rb)
+# file as well as in the templates/default/RWStore.properties.erb file. The
+# "vocabularyClass" property (below) for inline URIs is used as example additional
+# entry:
+#
+# default['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] = "com.my.VocabularyClass"
+
+
+#################################################################
+#
+# The following attributes are defaults for the MapGraph recipe.
+#
+#################################################################
+
+# The subversion branch to use when building from source:
+default['mapgraph'][:svn_branch] = "https://svn.code.sf.net/p/mpgraph/code/trunk"
+
+# MapGraph code retrieved from subversion will be downloaded to the "source_dir" directory:
+default['mapgraph'][:source_dir] = "/home/ec2-user/mapgraph-code"
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/metadata.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,19 @@
+name 'bigdata'
+maintainer 'Daniel Mekonnen'
+maintainer_email 'daniel<no-spam-at>systap.com'
+license 'GNU GPLv2'
+description 'Installs/Configures Systap Bigdata High Availability'
+long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
+version '0.1.4'
+
+depends 'apt'
+depends 'java', '>= 1.22.0'
+depends 'ant'
+depends 'tomcat'
+depends 'subversion'
+depends 'lvm'
+depends 'hadoop'
+depends 'emacs'
+depends 'sysstat'
+
+supports 'ubuntu'
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/high_availability.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,220 @@
+#
+# Cookbook Name:: bigdata
+# Recipe:: high_availability
+#
+# Copyright 2014, Systap
+#
+
+#
+# Only do the following for Bigdata HA install
+#
+if node['bigdata'][:install_flavor] == "ha"
+
+ include_recipe "java"
+ include_recipe "sysstat"
+ include_recipe "hadoop::zookeeper_server"
+
+ #
+ # Create the bigdata systm group:
+ #
+ group node['bigdata'][:group] do
+ action :create
+ append true
+ end
+
+ #
+ # Create the bigdata systm user:
+ #
+ user node['bigdata'][:user] do
+ gid node['bigdata'][:group]
+ supports :manage_home => true
+ shell "/bin/false"
+ home node['bigdata'][:home]
+ system true
+ action :create
+ end
+
+ #
+ # Make sure the Bigdata home directory is owned by the bigdata user and group:
+ #
+ execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do
+ user "root"
+ group "root"
+ cwd node['bigdata'][:home]
+ command "chown -R #{node['bigdata'][:user]}:#{node['bigdata'][:group]} ."
+ end
+
+ if node['bigdata'][:build_from_svn]
+ include_recipe "ant"
+ include_recipe "subversion::client"
+ #
+ # Retrieve the Bigdata source from the specified subversion branch:
+ #
+ execute "checkout bigdata from svn repo" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd "/home/ubuntu"
+ command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}"
+ end
+
+ #
+ # Build the bigdata release package:
+ #
+ execute "ant deploy-artifact" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd node['bigdata'][:source_dir]
+ command "ant deploy-artifact"
+ end
+
+ #
+ # Extract the just built release package, thus installing it in the Bigdata home directory:
+ #
+ execute "deflate REL tar" do
+ user node['bigdata'][:user]
+ group node['bigdata'][:group]
+ cwd "#{node['bigdata'][:home]}/.."
+ command "tar xvf #{node['bigdata'][:source_dir]}/REL.bigdata-1.*.tgz"
+ end
+
+ else
+ #
+ # Retrieve the package prepared for Brew:
+ #
+ remote_file "/tmp/bigdata.tgz" do
+ owner node['bigdata'][:user]
+ group node['bigdata'][:group]
+ source node['bigdata'][:url]
+ end
+
+ #
+ # Extract the just retrieved release package, thus installing it in the Bigdata home directory:
+ #
+
+ execute "Extract and relocate the bigdata archive" do
+ user node['bigdata'][:user]
+ group node['bigdata'][:group]
+ cwd "#{node['bigdata'][:home]}/.."
+ command "tar xvf /tmp/bigdata.tgz"
+ end
+
+ #
+ # The following are assumed fixed in releases after 1.3.1 and in the current subversion branch:
+ #
+ if node['bigdata'][:base_version].gsub(/\./, '').to_i == 131
+ execute "Divert standard and error output into /dev/null" do
+ user 'root'
+ group 'root'
+ cwd "#{node['bigdata'][:home]}/etc/init.d"
+ command "sed -i 's|startHAServices\"|startHAServices > /dev/null 2>\&1 \&\"|' bigdataHA"
+ end
+
+ execute "Change SystemProperty to Property in the 'host' attribute of jetty.xml" do
+ user 'root'
+ group 'root'
+ cwd node['bigdata'][:jetty_dir]
+ command "sed -i 's|<Set name=\"host\"><SystemProperty|<Set name=\"host\"><Property|' jetty.xml"
+ end
+
+ execute "Change SystemProperty to Property in the 'port' attribute of jetty.xml" do
+ user 'root'
+ group 'root'
+ cwd node['bigdata'][:jetty_dir]
+ command "sed -i 's|<Set name=\"port\"><SystemProperty|<Set name=\"port\"><Property|' jetty.xml"
+ end
+
+ execute "Change SystemProperty to Property in the 'idleTimeout' attribute of jetty.xml" do
+ user 'root'
+ group 'root'
+ cwd node['bigdata'][:jetty_dir]
+ command "sed -i 's|<Set name=\"idleTimeout\"><SystemProperty|<Set name=\"idleTimeout\"><Property|' jetty.xml"
+ end
+ end
+ end
+
+ #
+ # Install hte bigdataHA service file:
+ #
+ execute "copy over the /etc/init.d/bigdataHA file" do
+ user 'root'
+ group 'root'
+ cwd "#{node['bigdata'][:home]}/etc/init.d"
+ command "cp bigdataHA /etc/init.d/bigdataHA; chmod 00755 /etc/init.d/bigdataHA"
+ end
+
+ #
+ # Create the log directory for bigdata:
+ #
+ directory node['bigdata'][:log_dir] do
+ owner node['bigdata'][:user]
+ group node['bigdata'][:group]
+ mode 00755
+ action :create
+ end
+
+ #
+ # Install the log4jHA.properties file:
+ #
+ template "#{node['bigdata'][:home]}/var/config/logging/log4jHA.properties" do
+ source "log4jHA.properties.erb"
+ owner node['bigdata'][:user]
+ group node['bigdata'][:group]
+ mode 00644
+ end
+
+ #
+ # Set the absolute path to the RWStore.properties file
+ #
+ execute "set absolute path to RWStore.properties" do
+ cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF"
+ command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:jetty_dir]}/WEB-INF/RWStore.properties|' web.xml"
+ end
+
+ #
+ # Install the RWStore.properties file:
+ #
+ template node['bigdata'][:properties] do
+ source "RWStore.properties.erb"
+ owner node['bigdata'][:user]
+ group node['bigdata'][:group]
+ mode 00644
+ end
+
+ #
+ # Copy the /etc/default/bigdataHA template:
+ #
+ template "/etc/default/bigdataHA" do
+ source "default/bigdataHA.erb"
+ user 'root'
+ group 'root'
+ mode 00644
+ end
+
+ #
+ # Setup the bigdataHA script as a service:
+ #
+ service "bigdataHA" do
+ supports :restart => true, :status => true
+ action [ :enable, :start ]
+ end
+
+ #
+ # Install the zoo.cfg file:
+ #
+ template "/etc/zookeeper/conf/zoo.cfg" do
+ source "zoo.cfg.erb"
+ owner 'root'
+ group 'root'
+ mode 00644
+ end
+
+ #
+ # The hadoop cookbook overlooks the log4j.properties file presently, but a future version may get this right:
+ #
+ execute "copy the distribution log4j.properties file" do
+ user 'root'
+ group 'root'
+ cwd "/etc/zookeeper/conf.chef"
+ command "cp ../conf.dist/log4j.properties ."
+ end
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/java7.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,32 @@
+# http://jamie.mccrindle.org/2013/07/installing-oracle-java-7-using-chef.html
+#
+# Cookbook Name:: java7
+# Recipe:: default
+#
+
+apt_repository "webupd8team" do
+ uri "http://ppa.launchpad.net/webupd8team/java/ubuntu"
+ components ['main']
+ distribution node['lsb']['codename']
+ keyserver "keyserver.ubuntu.com"
+ key "EEA14886"
+ deb_src true
+end
+
+execute "remove openjdk-6" do
+ command "apt-get -y remove --purge openjdk-6-jdk openjdk-6-jre openjdk-6-jre-headless openjdk-6-jre-lib"
+end
+
+
+# could be improved to run only on update
+execute "accept-license" do
+ command "echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections"
+end
+
+package "oracle-java7-installer" do
+ action :install
+end
+
+package "oracle-java7-set-default" do
+ action :install
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/mapgraph.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,51 @@
+#
+# Cookbook Name:: bigdata
+# Recipe:: mapgraph
+#
+# Copyright 2014, Systap
+#
+
+#
+# MapGraph Installer
+#
+include_recipe "java"
+
+
+#
+# Make sure the Bigdata home directory is owned by the bigdata user and group:
+#
+execute "pull mapgraph from svn repo" do
+ user 'ec2-user'
+ group 'ec2-user'
+ cwd "/home/ec2-user"
+ command "svn checkout #{node['mapgraph'][:svn_branch]} #{node['mapgraph'][:source_dir]}"
+end
+
+
+#
+# Build MapGgraph:
+#
+execute "make mapgraph" do
+ cwd node['mapgraph'][:source_dir]
+ command "make"
+end
+
+
+
+#
+# Run a basic test of MapGraph:
+#
+execute "test mapgraph" do
+ cwd node['mapgraph'][:source_dir]
+ command "./Algorithms/SSSP/SSSP -g smallRegressionGraphs/small.mtx"
+end
+
+
+#
+# "recursive true" did not work here
+#
+# directory node['bigdata'][:mapgraph_home] do
+# owner 'ec2-user'
+# group 'ec2-user'
+# recursive true
+# end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/nss.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,142 @@
+#
+# Cookbook Name:: bigdata
+# Recipe:: nss
+#
+# Copyright 2014, Systap
+#
+
+#
+# Only do the following for Bigdata NSS install
+#
+if node['bigdata'][:install_flavor] == "nss"
+
+ include_recipe "java"
+
+ #
+ # Create the bigdata systm group:
+ #
+ group node['bigdata'][:group] do
+ action :create
+ append true
+ end
+
+ #
+ # Create the bigdata systm user:
+ #
+ user node['bigdata'][:user] do
+ gid node['bigdata'][:group]
+ supports :manage_home => true
+ shell "/bin/false"
+ home node['bigdata'][:home]
+ system true
+ action :create
+ end
+
+
+ if node['bigdata'][:build_from_svn]
+ include_recipe "ant"
+ include_recipe "subversion::client"
+
+ #
+ # Retrieve the Bigdata source from the specified subversion branch:
+ #
+ execute "checkout bigdata from svn repo" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd "/home/ubuntu"
+ command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}"
+ end
+
+ #
+ # Build the bigdata release package:
+ #
+ execute "build the nss tar ball" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd node['bigdata'][:source_dir]
+ command "ant package-nss-brew"
+ end
+
+ #
+ # Extract the just built release package, thus installing it in the Bigdata home directory:
+ #
+ execute "Extract and relocate the bigdata archive" do
+ user node['bigdata'][:user]
+ group node['bigdata'][:group]
+ cwd "#{node['bigdata'][:home]}/.."
+ command "tar xvf #{node['bigdata'][:source_dir]}/REL-NSS.bigdata-1.*.tgz"
+ end
+ else
+ #
+ # Retrieve the package prepared for Brew:
+ #
+ remote_file "/tmp/bigdata.tgz" do
+ owner node['bigdata'][:user]
+ group node['bigdata'][:group]
+ source node['bigdata'][:url]
+ end
+
+ #
+ # Extract the just retrieved release package, thus installing it in the Bigdata home directory:
+ #
+ execute "Extract and relocate the bigdata archive" do
+ user node['bigdata'][:user]
+ group node['bigdata'][:group]
+ cwd "#{node['bigdata'][:home]}/.."
+ command "tar xvf /tmp/bigdata.tgz"
+ end
+ end
+
+ #
+ # Create a symbolic link of the bin/bigdataNSS script to /etc/init.d/bigdataNSS:
+ #
+ link "/etc/init.d/bigdataNSS" do
+ to "#{node['bigdata'][:home]}/bin/bigdataNSS"
+ end
+
+ #
+ # Set the install type in the bin/bigdataNSS script:
+ #
+ execute "set the INSTALL_TYPE in bin/bigdata" do
+ cwd "#{node['bigdata'][:home]}/bin"
+ command "sed -i 's|<%= INSTALL_TYPE %>|#{node['bigdata'][:install_flavor]}|' bigdataNSS"
+ end
+
+ #
+ # Set the Bigdata home directory in the bin/bigdataNSS file:
+ #
+ execute "set the BD_HOME in bin/bigdata" do
+ cwd "#{node['bigdata'][:home]}/bin"
+ command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' bigdataNSS"
+ end
+
+ #
+ # Set the absolute path to the bigdata.jnl file in RWStore.properties
+ #
+ execute "set the BD_HOME in RWStore.properties" do
+ cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF"
+ command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' RWStore.properties"
+ end
+
+ #
+ # Set the Bigdata home directory in the log4j.properties file to set the path for the log files:
+ #
+ execute "set the BD_HOME in log4j.properties" do
+ cwd "#{node['bigdata'][:jetty_dir]}/WEB-INF/classes"
+ command "sed -i 's|<%= BD_HOME %>|#{node['bigdata'][:home]}|' log4j.properties"
+ end
+
+ #
+ # Setup the bigdataNSS script as a service:
+ #
+ service "bigdataNSS" do
+ #
+ # Reenable this when the bin/bigdata script is updated to return a "1" for a successful status:
+ #
+ # See: http://comments.gmane.org/gmane.comp.sysutils.chef.user/2723
+ #
+ # supports :status => true, :start => true, :stop => true, :restart => true
+ supports :start => true, :stop => true, :restart => true
+ action [ :enable, :start ]
+ end
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/ssd.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,39 @@
+#
+# Cookbook Name:: bigdata
+# Recipe:: ssd
+#
+# Copyright 2014, Systap
+#
+
+#
+# SSD Setup
+#
+include_recipe "lvm"
+
+
+#
+# Create the directory that will be the mount target:
+#
+directory node['bigdata'][:data_dir] do
+ owner "root"
+ group "root"
+ mode 00755
+ action :create
+ recursive true
+end
+
+
+#
+# Create and mount the logical volume:
+#
+lvm_volume_group 'vg' do
+ action :create
+ physical_volumes ['/dev/xvdb', '/dev/xvdc']
+
+ logical_volume 'lv_bigdata' do
+ size '100%VG'
+ filesystem 'ext4'
+ mount_point location: node['bigdata'][:data_dir], options: 'noatime,nodiratime'
+ # stripes 4
+ end
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/recipes/tomcat.rb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,168 @@
+#
+# Cookbook Name:: bigdata
+# Recipe:: tomcat
+#
+# Copyright 2014, Systap
+#
+
+#
+# Only do the following for Bigdata Tomcat install
+#
+if node['bigdata'][:install_flavor] == "tomcat"
+
+ include_recipe "java"
+ include_recipe "tomcat"
+
+ #
+ # The tomcat cookbook provides an /etc/default/tomcat7 file that contains multiple JAVA_OPTS lines but allows you to
+ # modify only one of them during installation. As a consequence JAVA_OPTS conflicts may occur. We comment out the
+ # 2nd JAVA_OPTS line to avoid the potential for any conflicts (which do occur with our default java_options attribute).
+ #
+ # Conflicting collector combinations in option list; please refer to the release notes for the combinations allowed
+ # Error: Could not create the Java Virtual Machine.
+ #
+ execute "comment out 2nd JAVA_OPTS line in /etc/default/tomcat7" do
+ cwd "/etc/default"
+ command "sed -i 's|JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|#JAVA_OPTS=\"${JAVA_OPTS} -XX:+UseConcMarkSweepGC\"|' tomcat7"
+ end
+
+
+ if node['bigdata'][:build_from_svn]
+ include_recipe "ant"
+ include_recipe "subversion::client"
+
+ #
+ # Retrieve the Bigdata source from the specified subversion branch:
+ #
+ execute "checkout bigdata from svn repo" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd "/home/ubuntu"
+ command "svn checkout #{node['bigdata'][:svn_branch]} #{node['bigdata'][:source_dir]}"
+ end
+
+ #
+ # Build the bigdata.war file:
+ #
+ execute "build the war file" do
+ user 'ubuntu'
+ group 'ubuntu'
+ cwd node['bigdata'][:source_dir]
+ command "ant war"
+ end
+
+ #
+ # Install the WAR file:
+ #
+ remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do
+ source "file:///#{node['bigdata'][:source_dir]}/ant-build/bigdata.war"
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ end
+
+ else
+ #
+ # Install the WAR file from the SourceForge URL:
+ #
+ remote_file "#{node['tomcat'][:webapp_dir]}/bigdata.war" do
+ source node['bigdata'][:url]
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ end
+ end
+
+ #
+ # Create the JNL home directory
+ #
+ directory node['bigdata'][:data_dir] do
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ mode 00755
+ action :create
+ recursive true
+ end
+
+
+ #
+ # Create the Bigdata log home
+ #
+ directory node['bigdata'][:log_dir] do
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ mode 00755
+ action :create
+ recursive true
+ end
+
+
+ #
+ # Install the RWStore.properties file:
+ #
+ template node['bigdata'][:properties] do
+ source "RWStore.properties.erb"
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ mode 00644
+ end
+
+
+ #
+ # Install the log4j.properties file:
+ #
+ template node['bigdata'][:log4j_properties] do
+ source "log4j.properties.erb"
+ owner node['tomcat'][:user]
+ group node['tomcat'][:group]
+ mode 00644
+ retry_delay 15
+ retries 3
+ end
+
+
+ #
+ # Delete all log files so that the error and warning messages that appeared during the installation
+ # process do not unnecessarily alarm anyone.
+ #
+ execute "remove log files before retart" do
+ cwd "#{node['tomcat'][:log_dir]}"
+ command "rm *"
+ end
+
+ #
+ # The RWStore.properties path is the only property that needs to be adjusted in the web.xml file.
+ # Using a sed command to adjust the property avoids the need to maintain a web.xml template which
+ # in turn updates frequently relative to the other property files. Thus this recipe becomes
+ # suitable against a larger range of bigdata releases.
+ #
+ if node['bigdata'][:base_version].gsub(/\./, '').to_i >= 131
+ #
+ # Set the RWStore.properties path in the web.xml file:
+ #
+ execute "set absolute path for RWStore.properties" do
+ cwd "#{node['bigdata'][:web_home]}/WEB-INF"
+ command "sed -i 's|<param-value>../webapps/bigdata/WEB-INF/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml"
+ end
+
+ #
+ # Remove original RWStore.properties file to avoid user confusion
+ #
+ file "#{node['bigdata'][:web_home]}/WEB-INF/RWStore.properties" do
+ action :delete
+ end
+ else
+ #
+ # 1.3.0 and earlier uses a different path for RWStore.properties. We can remove this if block in 1.3.1
+ #
+ execute "set absolute path for RWStore.properties" do
+ cwd "#{node['bigdata'][:web_home]}/WEB-INF"
+ command "sed -i 's|<param-value>../webapps/bigdata/RWStore.properties|<param-value>#{node['bigdata'][:home]}/RWStore.properties|' web.xml"
+ end
+
+ #
+ # Remove original RWStore.properties file to avoid user confusion
+ #
+ file "#{node['bigdata'][:web_home]}/RWStore.properties" do
+ action :delete
+ end
+ end
+end
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/RWStore.properties.erb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,42 @@
+#
+# Note: These options are applied when the journal and the triple store are
+# first created.
+
+##
+## Journal options.
+##
+
+# The backing file. This contains all your data. You want to put this someplace
+# safe. The default locator will wind up in the directory from which you start
+# your servlet container.
+com.bigdata.journal.AbstractJournal.file=<%= node['bigdata'][:data_dir] %>/bigdata.jnl
+
+# The persistence engine. Use 'Disk' for the WORM or 'DiskRW' for the RWStore.
+com.bigdata.journal.AbstractJournal.bufferMode=<%= node['bigdata']['journal.AbstractJournal.bufferMode'] %>
+
+# Setup for the RWStore recycler rather than session protection.
+com.bigdata.service.AbstractTransactionService.minReleaseAge=<%= node['bigdata']['service.AbstractTransactionService.minReleaseAge'] %>
+
+com.bigdata.btree.writeRetentionQueue.capacity=<%= node['bigdata']['btree.writeRetentionQueue.capacity'] %>
+com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['btree.BTree.branchingFactor'] %>
+
+# 200M initial extent.
+com.bigdata.journal.AbstractJournal.initialExtent=<%= node['bigdata']['journal.AbstractJournal.initialExtent'] %>
+com.bigdata.journal.AbstractJournal.maximumExtent=<%= node['bigdata']['journal.AbstractJournal.maximumExtent'] %>
+
+##
+## Setup for QUADS mode without the full text index.
+##
+com.bigdata.rdf.sail.truthMaintenance=<%= node['bigdata']['rdf.sail.truthMaintenance'] %>
+com.bigdata.rdf.store.AbstractTripleStore.quads=<%= node['bigdata']['rdf.store.AbstractTripleStore.quads'] %>
+com.bigdata.rdf.store.AbstractTripleStore.statementIdentifiers=<%= node['bigdata']['rdf.store.AbstractTripleStore.statementIdentifiers'] %>
+com.bigdata.rdf.store.AbstractTripleStore.textIndex=<%= node['bigdata']['rdf.store.AbstractTripleStore.textIndex'] %>
+com.bigdata.rdf.store.AbstractTripleStore.axiomsClass=<%= node['bigdata']['rdf.store.AbstractTripleStore.axiomsClass'] %>
+
+# Bump up the branching factor for the lexicon indices on the default kb.
+com.bigdata.namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['namespace.kb.lex.com.bigdata.btree.BTree.branchingFactor'] %>
+
+# Bump up the branching factor for the statement indices on the default kb.
+com.bigdata.namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor=<%= node['bigdata']['namespace.kb.spo.com.bigdata.btree.BTree.branchingFactor'] %>
+com.bigdata.rdf.sail.bufferCapacity=<%= node['bigdata']['rdf.sail.sailBufferCapacity'] %>
+# com.bigdata.rdf.store.AbstractTripleStore.vocabularyClass=<%= node['bigdata']['rdf.store.AbstractTripleStore.vocabularyClass'] %>
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/default/bigdataHA.erb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,51 @@
+# Environment for bigdata HA services.
+#
+# binDir - The directory containing the installed scripts.
+# pidFile - The pid is written on this file.
+#
+# Note: You MUST provide the location of the executable scripts and the
+# pid file that is written by $binDir/startHAServices. These SHOULD be
+# absolute path names.
+
+BD_USER="<%= node['bigdata'][:user] %>"
+BD_GROUP="<%= node['bigdata'][:group] %>"
+
+binDir=<%= node['bigdata'][:home] %>/bin
+pidFile=<%= node['bigdata'][:home] %>/var/lock/pid
+
+##
+# The following variables configure the startHAServices script, which
+# passes them through to HAJournal.config.
+##
+
+# Name of the bigdata gederation of services. Override for real install.
+export FEDNAME=<%= node['bigdata'][:fedname] %>
+
+# This is different for each HA replication cluster in the same federation
+# of services. If you have multiple such replication cluster, then just
+# given each such cluster its own name.
+export LOGICAL_SERVICE_ID=<%= node['bigdata'][:logical_service_id] %>
+
+# Local directory where the service will store its state.
+export FED_DIR=<%= node['bigdata'][:home] %>
+export DATA_DIR=<%= node['bigdata'][:data_dir] %>
+
+# Apache River - NO default for "LOCATORS".
+export GROUPS="${FEDNAME}"
+export LOCATORS="%JINI_LOCATORS%"
+
+# Apache ZooKeeper - NO default.
+export ZK_SERVERS="<%= node['bigdata'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['bigdata'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>"
+
+
+# All of these have defaults. Override as necessary.
+#export REPLICATION_FACTOR=3
+#export HA_PORT=9090
+#export JETTY_PORT=8080
+#export JETTY_XML=var/jetty/jetty.xml
+#export JETTY_RESOURCE_BASE=var/jetty/html
+#export COLLECT_QUEUE_STATISTICS=
+#export COLLECT_PLATFORM_STATISTICS=
+#export GANGLIA_REPORT=
+#export GANGLIA_LISTENER=
+#export SYSSTAT_DIR=
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/deployment/chef/templates/default/log4j.properties.erb 2014-05-23 15:53:35 UTC (rev 8415)
@@ -0,0 +1,98 @@
+# Default log4j configuration. See the individual classes for the
+# specific loggers, but generally they are named for the class in
+# which they are defined.
+
+# Default log4j configuration for testing purposes.
+#
+# You probably want to set the default log level to ERROR.
+#
+#log4j.rootCategory=WARN, dest1
+#log4j.rootCategory=WARN, dest2
+log4j.rootCategory=WARN, file
+
+# Loggers.
+# Note: logging here at INFO or DEBUG will significantly impact throughput!
+log4j.logger.com.bigdata=WARN
+log4j.logger.com.bigdata.btree=WARN
+log4j.logger.com.bigdata.counters.History=ERROR
+log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR
+log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO
+log4j.logger.com.bigdata.journal.CompactTask=INFO
+log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR
+log4j.logger.com.bigdata.rdf.load=INFO
+log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO
+
+# Normal data loader (single threaded).
+# log4j.logger.com.bigdata.rdf.store.DataLoader=INFO
+
+
+# file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=<%= node['bigdata'][:log_dir] %>/bigdata.log
+log4j.appender.file.MaxFileSize=4MB
+log4j.appender.file.MaxBackupIndex=10
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{MMM dd, yyyy HH:mm:ss} %-5p: %F:%L: %m%n
+
+# dest1
+log4j.appender.dest1=org.apache.log4j.ConsoleAppender
+log4j.appender.dest1.layout=org.apache.log4j.PatternLayout
+log4j.appender.dest1.layout.ConversionPattern=%d{MMM dd, yyyy HH:mm:ss} %-5p: %F:%L: %m%n
+#log4j.appender.dest1.layout.ConversionPattern=%-5p: %r %l: %m%n
+#log4j.appender.dest1.layout.ConversionPattern=%-5p: %m%n
+#log4j.appender.dest1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+#log4j.appender.dest1.layout.ConversionPattern=%-4r(%d) [%t] %-5p %c(%l:%M) %x - %m%n
+
+## dest2 includes the thread name and elapsed milliseconds.
+## Note: %r is elapsed milliseconds.
+## Note: %t is the thread name.
+## See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html
+#log4j.appender.dest2=org.apache.log4j.ConsoleAppender
+#log4j.appender.dest2.layout=org.apache.log4j.PatternLayout
+#log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n
+#
+###
+## Rule execution log. This is a formatted log file (comma delimited).
+log4j.logger.com.bigdata.relation.rule.eval.RuleLog=INFO,ruleLog
+log4j.additivity.com.bigdata.relation.rule.eval.RuleLog=false
+log4j.appender.ruleLog=org.apache.log4j.FileAppender
+log4j.appender.ruleLog.Threshold=ALL
+log4j.appender.ruleLog.File=rules.log
+log4j.appender.ruleLog.File=<%= node['bigdata'][:log_dir] %>/rules.log
+log4j.appender.ruleLog.Append=true
+## I find that it is nicer to have this unbuffered since you can see what
+## is going on and to make sure that I have complete rule evaluation logs
+## on shutdown.
+log4j.appender.ruleLog.BufferedIO=false
+log4j.appender.ruleLog.layout=org.apache.log4j.PatternLayout
+log4j.appender.ruleLog.layout.ConversionPattern=%m
+#
+###
+## Summary query evaluation log (tab delimited file). Uncomment the next line to enable
+##log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog
+#log4j.additivity.com.bigdata.bop.engine.QueryLog=false
+#log4j.appender.queryLog=org.apache.log4j.FileAppender
+#log4j.appender.queryLog.Threshold=ALL
+#log4j.appender.queryLog.File=<%= node['bigdata'][:log_dir] %>/queryLog.csv
+#log4j.appender.queryLog.Append=true
+## I find that it is nicer to have this unbuffered since you can see what
+## is going on and to make sure that I have complete rule evaluation logs
+## on shutdown.
+#log4j.appender.queryLog.BufferedIO=false
+#...
[truncated message content] |