From: <dme...@us...> - 2014-04-14 12:59:13
|
Revision: 8118 http://sourceforge.net/p/bigdata/code/8118 Author: dmekonnen Date: 2014-04-14 12:59:10 +0000 (Mon, 14 Apr 2014) Log Message: ----------- Initial commit for BigdataHA cluster deployment automation. Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Berksfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,12 @@ +site :opscode + +cookbook "apt" +cookbook 'java', '~> 1.22.0' +cookbook 'ant' +cookbook 'subversion' +cookbook 'lvm' +cookbook "hadoop" +cookbook "emacs" +# cookbook "ganglia" + +metadata Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/CHANGELOG.txt 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,8 @@ +# CHANGELOG for systap-aws-bigdata-ha + +This file is used to list changes made in each version of systap-aws-bigdata-ha. + +## 0.1.0: + +* Initial release of systap-aws-bigdata-ha + Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Gemfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,3 @@ +source 'https://rubygems.org' + +gem 'berkshelf' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/README.txt 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,94 @@ +REQUIREMENTS +============ +This Vagrant resource has been tested against the following versions of required resources: + + Vagrant: 1.4.3 + Vagrant Plugins: + * nugrant (1.4.2) + * vagrant-aws (0.4.1) + * vagrant-berkshelf (1.3.7) + + Chef: 11.10.4 +Berkshelf: 2.0.10 + Python: 2.7.5 + Ruby: 1.9.3p448 (2013-06-27 revision 41675) [x86_64-darwin12.3.0] + Boto: 2.27.0 + + + +CONFIGURATION +============= + +AWS +--- +Your organization's AWS access credentials are essential to launching the cluster. Please retreive them before attempting to bring up the cluster: + + * AWS Access Key ID + * AWS Secreet Access Key + * AWS Keypair Name + * The SSH Private Key file corresponding to the keypair + * AWS Security Group for the cluster nodes to join [must minimally allow public TCP access to ports 22 and 8080] + + +All AWS settings reside in the "aws.rc" file. You must edit this file and set AWS values accordingly. + + +Vagrant +------- +Vagrant will need the required plugins (see above), if not already installed, they may be added with: + + % vagrant plugin install nugrant + % vagrant plugin install vagrant-aws + % vagrant plugin install vagrant-berkshelf + + +Boto: AWS API +------------- +The "Boto" python library for the AWS API must be installed in order to instantiate the cluster. If not already installed: + + % pip install boto + +alternately: + + % easy_install boto + + +If while running the python scripts the error message appears “ImportError: No module named boto”, you will need to set the +PYTHONPATH environment variable, for example: + + % export PYTHONPATH=/usr/local/lib/python2.7/site-packages + + + +LAUNCHING BIGDATA HA CLUSTER +============================ + +The cluster may be brought up with: + + % ./bin/createCluster.sh + +Launching the cluster may take up to 10 minutes. When complete the cluster creation script will present + + +SSH to a specific node: + + % vagrant ssh bigdataA + + +Stop & Start the cluster: + + % vagrant halt + % vagrant up + + +Terminating the cluster: + + % vagrant destroy + + +Trouble Shooting +---------------- +If a host is slow to startup there can be an initial connection failure. For example, the bigdataA "status" page may not +appear if bigdataB or bigdataC is slow to start up. In this case log into bigdataA ("vagrant ssh bigdataA") and restart +the service ("sudo /etc/init.d/bigdataA restart") and the host shall connect as expected. + Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Thorfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,5 @@ +# encoding: utf-8 + +require 'bundler' +require 'bundler/setup' +require 'berkshelf/thor' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/Vagrantfile 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,227 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +ENV['VAGRANT_DEFAULT_PROVIDER'] = 'aws' + +Vagrant.require_plugin "vagrant-berkshelf" + +Vagrant.configure("2") do |config| + # All Vagrant configuration is done here. The most common configuration + # options are documented and commented below. For a complete reference, + # please see the online documentation at vagrantup.com. + +config.vm.define :bigdataA do |bigdataA| + bigdataA.vm.box = "dummy" + bigdataA.vm.hostname = ENV['BIGDATA_HA_HOST_A'] + + bigdataA.berkshelf.enabled = true + + bigdataA.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_A'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataA.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataA.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataA + + +config.vm.define :bigdataB do |bigdataB| + bigdataB.vm.box = "dummy" + bigdataB.vm.hostname = ENV['BIGDATA_HA_HOST_B'] + + bigdataB.berkshelf.enabled = true + + bigdataB.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_B'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataB.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataB.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataB + + +config.vm.define :bigdataC do |bigdataC| + bigdataC.vm.box = "dummy" + bigdataC.vm.hostname = ENV['BIGDATA_HA_HOST_C'] + + bigdataC.berkshelf.enabled = true + + bigdataC.vm.provider :aws do |aws, override| + aws.access_key_id = ENV['AWS_ACCESS_KEY_ID'] + aws.secret_access_key = ENV['AWS_SECRET_ACCESS_KEY'] + aws.keypair_name = ENV['AWS_KEYPAIR_NAME'] + + aws.ami = ENV['AWS_AMI'] + + aws.block_device_mapping = [ + { + :DeviceName => "/dev/sdb", + :VirtualName => "ephemeral0" + }, + { + :DeviceName => "/dev/sdc", + :VirtualName => "ephemeral1" + } + ] + + aws.region = ENV['AWS_REGION'] + aws.instance_type = ENV['AWS_INSTANCE_TYPE'] + aws.security_groups = [ ENV['AWS_SECURITY_GROUPS'], ENV['AWS_SECURITY_GROUP_PRIVATE'] ] + + aws.tags = { + 'Name' => ENV['BIGDATA_HA_HOST_C'] + } + + override.ssh.username = ENV['AWS_AMI_USERNAME'] + override.ssh.private_key_path = ENV['AWS_SSH_PRIVATE_KEY'] + end + + bigdataC.vm.provision :chef_solo do |chef| + chef.json = { + :java => { + "install_flavor" => "oracle", + :jdk_version => "7", + :oracle => { 'accept_oracle_download_terms' => true } + }, + :zookeeper => { + :zoocfg => { :clientPort=> '2081' } + } + } + + bigdataC.vm.provision :shell, inline: "sudo apt-get update ; sudo curl -L https://www.opscode.com/chef/install.sh | sudo bash" + + chef.run_list = [ + "recipe[apt]", + "recipe[java]", + "recipe[emacs]", # default is no x11 + # + # use this recipe only if the oracle server is timing out in the above 'java' recipe + # + # "recipe[systap-bigdataHA::java7]", + "recipe[ant]", + "recipe[hadoop::zookeeper_server]", + "recipe[subversion::client]", + "recipe[systap-bigdataHA::ssd]", + "recipe[systap-bigdataHA]" + # "recipe[ganglia]", + # "recipe[ganglia::web]" + ] + + end + +end # bigdataC + +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/attributes/default.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,32 @@ +# Who runs bigdata? +default['systap-bigdataHA'][:bigdata_user] = "bigdata" +default['systap-bigdataHA'][:bigdata_group] = "bigdata" + +# Where to find and build bigdata code +default['systap-bigdataHA'][:svn] = "https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0" +default['systap-bigdataHA'][:source] = "/home/ubuntu/bigdata-code" + +# Name of the federation of services (controls the Apache River GROUPS). +default['systap-bigdataHA'][:fedname] = 'my-cluster-1' + +# Path for local storage for this federation of services. +default['systap-bigdataHA'][:fed_dir] = '/var/lib/bigdata' + +# Where the bigdata-ha.jnl file will live: +default['systap-bigdataHA'][:data_dir] = node['systap-bigdataHA'][:fed_dir] + "/data" + +# Where the log files will live: +default['systap-bigdataHA'][:log_dir] = node['systap-bigdataHA'][:fed_dir] + "/logs" + +# Name of the replication cluster to which this HAJournalServer will belong. +default['systap-bigdataHA'][:logical_service_id] = 'HA-Replication-Cluster-1' + +# Where to find the Apache River service registrars (can also use multicast). +default['systap-bigdataHA'][:river_locator1] = 'bigdataA' +default['systap-bigdataHA'][:river_locator2] = 'bigdataB' +default['systap-bigdataHA'][:river_locator3] = 'bigdataC' + +# Where to find the Apache Zookeeper ensemble. +default['systap-bigdataHA'][:zk_server1] = 'bigdataA' +default['systap-bigdataHA'][:zk_server2] = 'bigdataB' +default['systap-bigdataHA'][:zk_server3] = 'bigdataC' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/aws.rc 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,45 @@ +# +# Set your organization's AWS access credentials here: +# +export AWS_ACCESS_KEY_ID="YOUR AWS ACCESS KEY ID" +export AWS_SECRET_ACCESS_KEY="YOUR AWS SECRET ACCESS KEY" +export AWS_SSH_PRIVATE_KEY="/path/to/your/private_key.pem" +export AWS_KEYPAIR_NAME="YOUR AWS KEYPAIR NAME" + + +# +# Add a single security group here (a list will be supported later). +# The security group must minimally allow outside access to ports 22 and 8080. +# +# SSH TCP 22 0.0.0.0/0 +# Custom TCP Rule TCP 8080 0.0.0.0/0 +# +export AWS_SECURITY_GROUPS="YOUR AWS SECURITY GROUP" + + +# +# Adjust as desired: +# +export AWS_REGION="us-east-1" + + +# +# Ubuntu 12.04 settings: +# +export AWS_AMI="ami-59a4a230" +export AWS_AMI_USERNAME="ubuntu" + + +# +# The SSD configuration assumes the m3.xlarge size. The SSD recipe should work for +# SSD devices (not more than two) of any size, but has not been tested. +# +export AWS_INSTANCE_TYPE="m3.xlarge" + + +# +# Default host names, adjust as desired: +# +export BIGDATA_HA_HOST_A="bigdataA" +export BIGDATA_HA_HOST_B="bigdataB" +export BIGDATA_HA_HOST_C="bigdataC" Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,11 @@ +#! /bin/sh + +export PYTHONPATH=/usr/local/lib/python2.7/site-packages + +source aws.rc +python ./bin/createSecurityGroup.py +source .aws_security_group +rm .aws_security_group +vagrant up +echo "Vagrant up completed. Setting host names..." +python ./bin/setHosts.py Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createCluster.sh ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,22 @@ +#! /usr/bin/python + +import os +from boto import ec2 +from boto.manage.cmdshell import sshclient_from_instance +import paramiko +from datetime import datetime + + +if __name__ == '__main__': + + # create a security group fo this cluster only. Just create it now so that it can be associated with the new + # instance at create time. Add rules to this group once the instance IP addresses are known. + + ec2conn = ec2.connection.EC2Connection( os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] ) + + group_name = "BDHA " + str( datetime.utcnow() ) + + group = ec2conn.create_security_group( group_name, "BigdataHA Security Group" ) + + envFile = open( ".aws_security_group", "w" ) + envFile.write( 'export AWS_SECURITY_GROUP_PRIVATE="' + group_name + '"') Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/createSecurityGroup.py ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,91 @@ +#! /usr/bin/python + +import os +import sys +from boto import ec2 +from boto.manage.cmdshell import sshclient_from_instance +import paramiko + +bigdataA = os.environ["BIGDATA_HA_HOST_A"] +bigdataB = os.environ["BIGDATA_HA_HOST_B"] +bigdataC = os.environ["BIGDATA_HA_HOST_C"] + +hostMap = {} +bigdataHosts = [None] * 3 + +def createHostAdditions( instances ): + hostsAdd = "\n" + for instance in instances: + data = instance.__dict__ + if bigdataA in data['tags']['Name']: + bigdataHosts[0] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataA + "\\n" + hostMap[ bigdataA ] = data[ 'private_ip_address' ] + elif bigdataB in data['tags']['Name']: + bigdataHosts[1] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataB + "\\n" + hostMap[ bigdataB ] = data[ 'private_ip_address' ] + elif bigdataC in data['tags']['Name']: + bigdataHosts[2] = instance + hostsAdd += data[ 'private_ip_address' ] + "\\t" + bigdataC + "\\n" + hostMap[ bigdataC ] = data[ 'private_ip_address' ] + + return hostsAdd + +def createZookeeperSubstitution( index, host, ipAddress ): + return "sudo sed -i 's|server." + index + "=" + host + "|server." + index + "=" + ipAddress + "|' /etc/zookeeper/conf/zoo.cfg" + +def createBigdataHASubstitution( host, ipAddress ): + return "sudo sed -i 's|" + host + "|" + ipAddress + "|' /etc/defaults/bigdataHA" + +if __name__ == '__main__': + + ec2conn = ec2.connection.EC2Connection( os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"] ) + runningFilter = {'instance-state-name':'running'} # only running states + reservations = ec2conn.get_all_instances( filters=runningFilter ) + instances = [i for r in reservations for i in r.instances] + + hostsAdd = createHostAdditions( instances ) + + # Create an SSH client for our instance + # key_path is the path to the SSH private key associated with instance + # user_name is the user to login as on the instance (e.g. ubuntu, ec2-user, etc.) + key_path = os.environ["AWS_SSH_PRIVATE_KEY"] + + private_security_group_name = os.environ["AWS_SECURITY_GROUP_PRIVATE"] + group = ec2conn.get_all_security_groups( private_security_group_name )[0] + + i = 1 + for host in bigdataHosts: + ssh_client = sshclient_from_instance( host, key_path, user_name='ubuntu' ) + # ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + # Run the command. Returns a tuple consisting of: + # The integer status of the command + # A string containing the output of the command + # A string containing the stderr output of the command + status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo \"" + hostsAdd + "\" >> /etc/hosts'" ) + status, stdin, stderr = ssh_client.run( "sudo sh -c 'echo " + str(i) + " > /var/lib/zookeeper/myid'" ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "1", bigdataA, hostMap[ bigdataA ] ) ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "2", bigdataB, hostMap[ bigdataB ] ) ) + status, stdin, stderr = ssh_client.run( createZookeeperSubstitution( "3", bigdataC, hostMap[ bigdataC ] ) ) + + name = host.__dict__['tags']['Name'] + hostAddress = host.__dict__['private_ip_address'] + # status, stdin, stderr = ssh_client.run( createBigdataHASubstitution( name, hostAddress ) ) + + hostAddress = hostAddress + "/32" + group.authorize( ip_protocol="tcp", from_port="0", to_port="65535", cidr_ip=hostAddress, src_group=None ) + + i += 1 + # + # startHAServices does not exit as expected, so remote restart commands will hang. + # As a work around, we restart the host: + # + # status, stdin, stderr = ssh_client.run( "sudo /etc/init.d/bigdataHA restart" ) + # status, stdin, stderr = ssh_client.run( "sudo service bigdataHA restart" ) + host.reboot() + + print "The hosts are now rebooting, this may take several minutes. \nOnce back up, you may confirm status by visiting:\n" + for host in bigdataHosts: + print "\thttp://" + host.__dict__['ip_address'] + ":8080/status\n" Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/bin/setHosts.py ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/chefignore 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,96 @@ +# Put files/directories that should be ignored in this file when uploading +# or sharing to the community site. +# Lines that start with '# ' are comments. + +# OS generated files # +###################### +.DS_Store +Icon? +nohup.out +ehthumbs.db +Thumbs.db + +# SASS # +######## +.sass-cache + +# EDITORS # +########### +\#* +.#* +*~ +*.sw[a-z] +*.bak +REVISION +TAGS* +tmtags +*_flymake.* +*_flymake +*.tmproj +.project +.settings +mkmf.log + +## COMPILED ## +############## +a.out +*.o +*.pyc +*.so +*.com +*.class +*.dll +*.exe +*/rdoc/ + +# Testing # +########### +.watchr +.rspec +spec/* +spec/fixtures/* +test/* +features/* +Guardfile +Procfile + +# SCM # +####### +.git +*/.git +.gitignore +.gitmodules +.gitconfig +.gitattributes +.svn +*/.bzr/* +*/.hg/* +*/.svn/* + +# Berkshelf # +############# +Berksfile +Berksfile.lock +cookbooks/* +tmp + +# Cookbooks # +############# +CONTRIBUTING +CHANGELOG* + +# Strainer # +############ +Colanderfile +Strainerfile +.colander +.strainer + +# Vagrant # +########### +.vagrant +Vagrantfile + +# Travis # +########## +.travis.yml Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/files/default/test/default_test.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,6 @@ +require 'minitest/spec' +describe_recipe 'systap-bigdata::test' do + it "is running the tomcat server" do + service('tomcat').must_be_running + end +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/metadata.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,15 @@ +name 'systap-bigdataHA' +maintainer 'Daniel Mekonnen' +maintainer_email 'daniel<no-spam-at>systap.com' +license 'All rights reserved' +description 'Installs/Configures Systap Bigdata High Availability' +long_description IO.read(File.join(File.dirname(__FILE__), 'README.txt')) +version '0.1.0' + +depends 'apt' +depends 'java', '>= 1.22.0' +depends 'ant' +depends 'subversion' +depends 'lvm' +depends 'hadoop' +depends 'emacs' Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/default.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,142 @@ +# +# Cookbook Name:: systap-bigdataHA +# Recipe:: default +# +# Copyright 2014, Systap +# +# + + +group "bigdata" do + action :create + append true +end + +user "#{node['systap-bigdataHA'][:bigdata_user]}" do + gid "#{node['systap-bigdataHA'][:bigdata_group]}" + supports :manage_home => true + shell "/bin/false" + home "#{node['systap-bigdataHA'][:fed_dir]}" + system true + action :create +end + +# directory node['systap-bigdataHA'][:fed_dir] do +execute "change the ownership of the bigdata home directory to bigdata, which strangely is not" do + user "root" + group "root" + cwd "#{node['systap-bigdataHA'][:fed_dir]}" + command "chown -R #{node['systap-bigdataHA'][:bigdata_user]}:#{node['systap-bigdataHA'][:bigdata_group]} ." +end + +execute "checkout bigdata from svn repo" do + user 'ubuntu' + group 'ubuntu' + cwd "/home/ubuntu" + command "svn checkout https://svn.code.sf.net/p/bigdata/code/branches/BIGDATA_RELEASE_1_3_0 #{node['systap-bigdataHA'][:source]}" +end + +execute "ant deploy-artifact" do + user 'ubuntu' + group 'ubuntu' + cwd "#{node['systap-bigdataHA'][:source]}" + command "ant deploy-artifact" +end + +execute "deflate REL tar" do + user 'bigdata' + group 'bigdata' + cwd "#{node['systap-bigdataHA'][:fed_dir]}/.." + command "tar xvf #{node['systap-bigdataHA'][:source]}/REL.bigdata-1.3.0-*.tgz" +end + +execute "copy over the /etc/init.d/bigdataHA file" do + user 'root' + group 'root' + cwd "#{node['systap-bigdataHA'][:fed_dir]}/etc/init.d" + command "cp bigdataHA /etc/init.d/bigdataHA; chmod 00755 /etc/init.d/bigdataHA" +end + +# +# Copy the /etc/init.d/bigdataHA template: +# +# template "/etc/init.d/bigdataHA" do +# source "init.d/bigdataHA.erb" +# user 'root' +# group 'root' +# mode 00755 +# end + +# +# Create the log directory for bigdata: +# +directory node['systap-bigdataHA'][:log_dir] do + owner "bigdata" + group "bigdata" + mode 00755 + action :create +end + +# +# Install the log4jHA.properties file: +# +template "#{node['systap-bigdataHA'][:fed_dir]}/var/config/logging/log4jHA.properties" do + source "log4jHA.properties.erb" + owner 'bigdata' + group 'bigdata' + mode 00644 +end + +# +# Install the log4jHA.properties file: +# +template "#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF/jetty.xml" do + source "jetty.xml.erb" + owner 'bigdata' + group 'bigdata' + mode 00644 +end + + +# +# Set the absolute path to the RWStore.properties file +# +execute "set absolute path to RWStore.properties" do + cwd "#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF" + command "sed -i 's|<param-value>WEB-INF/RWStore.properties|<param-value>#{node['systap-bigdataHA'][:fed_dir]}/var/jetty/WEB-INF/RWStore.properties|' web.xml" +end + +# +# Copy the /etc/default/bigdataHA template: +# +template "/etc/default/bigdataHA" do + source "default/bigdataHA.erb" + user 'root' + group 'root' + mode 00644 +end + +service "bigdataHA" do + supports :restart => true, :status => true + action [ :enable, :start ] +end + +# +# Install the zoo.cfg file: +# +template "/etc/zookeeper/conf/zoo.cfg" do + source "zoo.cfg.erb" + owner 'root' + group 'root' + mode 00644 +end + +# +# the hadoop cookbook overlooks the log4j.properties file presently, but a future version may get this right: +# +execute "copy the distribution log4j.properties file" do + user 'root' + group 'root' + cwd "/etc/zookeeper/conf.chef" + command "cp ../conf.dist/log4j.properties ." +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/java7.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,32 @@ +# http://jamie.mccrindle.org/2013/07/installing-oracle-java-7-using-chef.html +# +# Cookbook Name:: java7 +# Recipe:: default +# + +apt_repository "webupd8team" do + uri "http://ppa.launchpad.net/webupd8team/java/ubuntu" + components ['main'] + distribution node['lsb']['codename'] + keyserver "keyserver.ubuntu.com" + key "EEA14886" + deb_src true +end + +execute "remove openjdk-6" do + command "apt-get -y remove --purge openjdk-6-jdk openjdk-6-jre openjdk-6-jre-headless openjdk-6-jre-lib" +end + + +# could be improved to run only on update +execute "accept-license" do + command "echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections" +end + +package "oracle-java7-installer" do + action :install +end + +package "oracle-java7-set-default" do + action :install +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/recipes/ssd.rb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,29 @@ +# +# Cookbook Name:: planx-aws-bigdata-rdr +# Recipe:: ssd +# +include_recipe "lvm" + +# +# SSD Setup +# +directory node['systap-bigdataHA'][:data_dir] do + owner "root" + group "root" + mode 00755 + action :create + recursive true +end + + +lvm_volume_group 'vg' do + action :create + physical_volumes ['/dev/xvdb', '/dev/xvdc'] + + logical_volume 'lv_bigdata' do + size '100%VG' + filesystem 'ext4' + mount_point location: node['systap-bigdataHA'][:data_dir], options: 'noatime,nodiratime' + # stripes 4 + end +end Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/default/bigdataHA.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,51 @@ +# Environment for bigdata HA services. +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Note: You MUST provide the location of the executable scripts and the +# pid file that is written by $binDir/startHAServices. These SHOULD be +# absolute path names. + +BD_USER="<%= node['systap-bigdataHA'][:bigdata_user] %>" +BD_GROUP="<%= node['systap-bigdataHA'][:bigdata_group] %>" + +binDir=<%= node['systap-bigdataHA'][:fed_dir] %>/bin +pidFile=<%= node['systap-bigdataHA'][:fed_dir] %>/var/lock/pid + +## +# The following variables configure the startHAServices script, which +# passes them through to HAJournal.config. +## + +# Name of the bigdata gederation of services. Override for real install. +export FEDNAME=<%= node['systap-bigdataHA'][:fedname] %> + +# This is different for each HA replication cluster in the same federation +# of services. If you have multiple such replication cluster, then just +# given each such cluster its own name. +export LOGICAL_SERVICE_ID=<%= node['systap-bigdataHA'][:logical_service_id] %> + +# Local directory where the service will store its state. +export FED_DIR=<%= node['systap-bigdataHA'][:fed_dir] %> +export DATA_DIR=<%= node['systap-bigdataHA'][:data_dir] %> + +# Apache River - NO default for "LOCATORS". +export GROUPS="${FEDNAME}" +export LOCATORS="jini://<%= node['systap-bigdataHA'][:river_locator1] %>/,jini://<%= node['systap-bigdataHA'][:river_locator2] %>/,jini://<%= node['systap-bigdataHA'][:river_locator3] %>/" + +# Apache ZooKeeper - NO default. +export ZK_SERVERS="<%= node['systap-bigdataHA'][:zk_server1] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['systap-bigdataHA'][:zk_server2] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>,<%= node['systap-bigdataHA'][:zk_server3] %>:<%= node['zookeeper'][:zoocfg][:clientPort] %>" + + +# All of these have defaults. Override as necessary. +#export REPLICATION_FACTOR=3 +#export HA_PORT=9090 +#export JETTY_PORT=8080 +#export JETTY_XML=var/jetty/jetty.xml +#export JETTY_RESOURCE_BASE=var/jetty/html +#export COLLECT_QUEUE_STATISTICS= +#export COLLECT_PLATFORM_STATISTICS= +#export GANGLIA_REPORT= +#export GANGLIA_LISTENER= +#export SYSSTAT_DIR= Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,133 @@ +#!/bin/bash + +# init.d style script for bigdata HA services. The script can be used +# to 'start' or 'stop' services. +# +# Environment: +# +# binDir - The directory containing the installed scripts. +# pidFile - The pid is written on this file. +# +# Misc. +# +# See http://tldp.org/LDP/abs/html/index.html +# +# Note: Blank lines are significant in shell scripts. +# +# Note: Children must do "exit 0" to indicate success. +# +# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix + +# Source function library (just used for 'action'). If you don't have this +# it SHOULD automatically use the inline definition for "action()". +if [ -f "/etc/init.d/functions" ]; then + . /etc/init.d/functions +else +# Run some action. Log its output. No fancy colors. First argument is the +# label for the log file. Remaining arguments are the command to execute +# and its arguments, if any. + action() { + local STRING rc + STRING=$1 + echo -n "$STRING " + shift + sudo -u bigdata -g bigdata "$@" && echo -n "[OK]" || echo -n "[FAILED]" + rc=$? + echo + return $rc + } +fi + +# Where the scripts live. +cd `dirname $0` + +## +# Highly Recommended OS Tuning. +## + +# Do not swap out applications while there is free memory. +#/sbin/sysctl -w vm.swappiness=0 + +# Setup the environment. +source /etc/default/bigdataHA + +if [ -z "$binDir" ]; then + echo $"$0 : environment not setup: binDir is undefined." + exit 1; +fi +if [ -z "$pidFile" ]; then + echo $"$0 : environment not setup: pidFile is undefined" + exit 1; +fi + +# +# See how we were called. +# +case "$1" in + start) + + cd <%= node['systap-bigdataHA'][:fed_dir] %> +# +# Start the ServiceStarter and child services if not running. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + fi + fi + if [ ! -f "$pidFile" ]; then + action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices + else + echo $"`date` : `hostname` : running as $pid" + fi + ;; + stop) +# +# Stop the ServiceStarter and all child services. +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then +# The process has died so remove the old pid file. + echo $"`date` : `hostname` : $pid died?" + rm -f "$pidFile" + else + action $"`date` : `hostname` : bringing down services: " kill $pid + rm -f "$pidFile" + fi + fi + ;; + status) +# +# Report status for the ServicesManager (up or down). +# + if [ -f "$pidFile" ]; then + read pid < "$pidFile" + pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid ) + if [ -z "$pidno" ]; then + echo $"`date` : `hostname` : process died? pid=$pid." + else + echo $"`date` : `hostname` : running as $pid." + fi + else + echo $"`date` : `hostname` : not running." + fi + ;; + restart) + $0 stop + $0 start + ;; + *) +# +# Usage +# + echo $"Usage: $0 {start|stop|status|restart}" + exit 1 +esac + +exit 0 Property changes on: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/init.d/bigdataHA.erb ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* \ No newline at end of property Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/jetty.xml.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,60 @@ +<?xml version="1.0"?> +<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure.dtd"> +<!-- See http://www.eclipse.org/jetty/documentation/current/ --> +<!-- See http://wiki.eclipse.org/Jetty/Reference/jetty.xml_syntax --> +<Configure id="Server" class="org.eclipse.jetty.server.Server"> + + <!-- =========================================================== --> + <!-- Server Thread Pool --> + <!-- =========================================================== --> + <Set name="ThreadPool"> + <!-- Default queued blocking threadpool --> + <New class="org.eclipse.jetty.util.thread.QueuedThreadPool"> + <Set name="minThreads">10</Set> + <Set name="maxThreads">64</Set> + </New> + </Set> + + <!-- =========================================================== --> + <!-- Set connectors --> + <!-- =========================================================== --> + + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.nio.SelectChannelConnector"> + <Set name="host"><Property name="jetty.host" /></Set> + <Set name="port"><Property name="jetty.port" default="8080"/></Set> + </New> + </Arg> + </Call> + + <!-- =========================================================== --> + <!-- Set handler Collection Structure --> + <!-- =========================================================== --> + <Set name="handler"> + <New id="Handlers" class="org.eclipse.jetty.server.handler.HandlerCollection"> + <Set name="handlers"> + <Array type="org.eclipse.jetty.server.Handler"> + <Item> + <New id="WebAppContext" class="org.eclipse.jetty.webapp.WebAppContext"> + <!-- The location of the top-level of the bigdata webapp. --> + <Set name="resourceBase"> + <Property name="jetty.resourceBase" default="<%= node['systap-bigdataHA'][:fed_dir] %>/var/jetty" /> + </Set> + <Set name="contextPath">/</Set> + <Set name="descriptor"><%= node['systap-bigdataHA'][:fed_dir] %>/var/jetty/WEB-INF/web.xml</Set> + <Set name="parentLoaderPriority">true</Set> + <Set name="extractWAR">false</Set> + <Set name="welcomeFiles"> + <Array type="java.lang.String"> + <Item>index.html</Item> + </Array> + </Set> + </New> + </Item> + </Array> + </Set> + </New> + </Set> + +</Configure> Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/log4jHA.properties.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,85 @@ +## +# This is the default log4j configuration for distribution and CI tests. +## + +# Note: logging at INFO or DEBUG will significantly impact throughput! +log4j.rootCategory=WARN, dest2 + +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO + +# This will only work if you have the slf4j bridge setup. +#log4j.org.eclipse.jetty.util.log.Log=INFO + +# This can provide valuable information about open connections. +log4j.logger.com.bigdata.txLog=INFO + +# HA related loggers (debugging only) +log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.haLog=INFO +##log4j.logger.com.bigdata.rwstore=ALL +#log4j.logger.com.bigdata.journal=INFO +##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL +log4j.logger.com.bigdata.journal.jini.ha=INFO +##log4j.logger.com.bigdata.service.jini.lookup=ALL +log4j.logger.com.bigdata.quorum=INFO +log4j.logger.com.bigdata.quorum.zk=INFO +#log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain +##log4j.logger.com.bigdata.io.writecache=ALL + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2=org.apache.log4j.RollingFileAppender +log4j.appender.dest2.File=<%= node['systap-bigdataHA'][:log_dir] %>/HAJournalServer.log +log4j.appender.dest2.MaxFileSize=500MB +log4j.appender.dest2.MaxBackupIndex=20 +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + +## +# Summary query evaluation log (tab delimited file). +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=<%= node['systap-bigdataHA'][:log_dir] %>/queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=<%= node['systap-bigdataHA'][:log_dir] %>/queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata/src/resources/deployment/vagrant/systap-aws-bigdata-ha/templates/default/zoo.cfg.erb 2014-04-14 12:59:10 UTC (rev 8118) @@ -0,0 +1,15 @@ +clientPort=<%= node['zookeeper'][:zoocfg][:clientPort] %> +dataDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +dataLogDir=<%= node['zookeeper'][:zoocfg][:dataDir] %> +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the ensemble +server.1=<%= node['systap-bigdataHA'][:zk_server1] %>:2888:3888 +server.2=<%= node['systap-bigdataHA'][:zk_server2] %>:2888:3888 +server.3=<%= node['systap-bigdataHA'][:zk_server3] %>:2888:3888 This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |