|
From: <tho...@us...> - 2013-11-25 16:45:55
|
Revision: 7591
http://bigdata.svn.sourceforge.net/bigdata/?rev=7591&view=rev
Author: thompsonbry
Date: 2013-11-25 16:45:46 +0000 (Mon, 25 Nov 2013)
Log Message:
-----------
Continued work toward an rpm artifact.
Modified Paths:
--------------
branches/BIGDATA_RELEASE_1_3_0/build.xml
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
Added Paths:
-----------
branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/
branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA
Removed Paths:
-------------
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA
branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv
Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 16:43:44 UTC (rev 7590)
+++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-25 16:45:46 UTC (rev 7591)
@@ -880,6 +880,9 @@
<mkdir dir="${dist.var.config.jini}" />
<mkdir dir="${dist.doc}" />
<mkdir dir="${dist.doc.legal}" />
+ <mkdir dir="${dist.dir}/etc" />
+ <mkdir dir="${dist.dir}/etc/init.d" />
+ <mkdir dir="${dist.dir}/etc/bigdata" />
<!-- Copy the jar files created by the jar target to -->
<!-- an application-specific but non-version-specific -->
@@ -1051,12 +1054,12 @@
todir="${dist.bin}" />
<chmod file="${dist.bin}/startHAServices" perm="755" />
- <copy file="${src.resources}/bin/bigdataHA"
- todir="${dist.bin}" />
- <chmod file="${dist.bin}/bigdataHA" perm="755" />
+ <copy file="${src.resources}/etc/init.d/bigdataHA"
+ todir="${dist.dir}/etc/init.d" />
+ <chmod file="${dist.dir}/etc/init.d/bigdataHA" perm="755" />
- <copy file="${src.resources}/bin/bigdataHAEnv"
- todir="${dist.bin}" />
+ <copy file="${src.resources}/etc/bigdata/bigdataHA.config"
+ todir="${dist.dir}/etc/bigdata" />
<copy file="${src.resources}/bin/config/browser.config"
todir="${dist.bin.config}" />
@@ -1150,6 +1153,18 @@
<include name="**/LEGAL/*" />
</fileset>
</copy>
+
+ <!-- Stage README. -->
+ <copy file="${src.resources}/HAJournal/README"
+ todir="${dist.dir}/doc" />
+
+ <!-- Stage documentation from the wiki. -->
+ <get dest="${dist.doc}/HAJournalServer.html"
+ src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=HAJournalServer&printable=yes"
+ />
+ <get dest="${dist.doc}/NanoSparqlServer.html"
+ src="http://sourceforge.net/apps/mediawiki/bigdata/index.php?title=NanoSparqlServer&printable=yes"
+ />
</target>
@@ -1193,6 +1208,14 @@
bigdata/doc/LEGAL - license files for dependencies.
bigdata/doc/LICENSE.txt - bigdata license file.
bigdata/doc/NOTICE - copyright NOTICE files.
+ bigdata/doc/docs - javadoc (FIXME INSTALL JAVADOC, HA wiki page)
+ bigdata/etc/init.d/bigdataHA - HA services start/stop script.
+ bigdata/etc/bigdata/bigdataHA.config - HA services required config file.
+
+ Note: This directory structure is currently reused for the rpm, but the
+ top-level of the rpm directory structure includes the release version as
+ bigdata.X.Y.Z rather than just "bigdata". I think that this is a better
+ practice and the two may be converged.
-->
<target name="deploy-artifact" depends="clean, stage"
description="Create compressed tar file for deployment.">
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-25 16:43:44 UTC (rev 7590)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-25 16:45:46 UTC (rev 7591)
@@ -1,82 +1,141 @@
-This directory contains some sample configuration for a highly available
-Journal.
+Bigdata Highly Available Replication Cluster
-Note: The bigdata scripts bundled in this directory are designed to be run
-from the root directory of the SVN checkout of the bigdata code base. This
-is used for developers. The installation is done using the top-level ant
-build file and the "ant deploy-artifact" target.
+========== INSTALL ==========
-The basic procedure is:
+0. The nodes MUST have synchronized clocks, both for logging and to
+ ensure that the transaction services have closely correlated clocks
+ for assigning timestamps. Make sure that ntp or a similar service
+ is running to synchronize the clocks.
-0. The nodes MUST have synchronized clocks, both for logging and to ensure
- that the transaction services have closely correlated clocks for assigning
- timestamps. Make sure that ntp or a similar service is running to synchronize
- the clocks.
+1. Edit the various configuration files. At a minimum, you must edit
+ bigdataHA.conf.
-1. Edit the various configuration files. You will have to specify the
- replicationFactor for the HAJournal in the HAJournal.config file. Make
- sure to check all the configuration properties.
+2. Make sure that ZooKeeper is up and running with a consistent
+ configuration and that it is logging somewhere where you can find
+ the log later. For a highly available ZooKeeper configuration, you
+ need to be running at least 3 ZooKeeper nodes. Consult the
+ ZooKeeper documentation for more information.
-2. Make sure that zookeeper is up and running with a consistent configuration
- and that it is logging somewhere where you can find the log later. A good
- approach is to use nohup so the console output will wind up in the directory
- from which you start zookeeper. Do not put zookeeper in the background or
- it can block once the console buffer is full. For a highly available zk
- configuration, you need to be running at least 3 zk nodes. Consult the zk
- documentation for more information.
+ Bigdata does NOT start/stop Apache ZooKeeper. ZooKeeper is
+ generally administered separately. If you are not already using
+ Apache ZooKeeper, then you should install three VMs with Apache
+ ZooKeeper onto machines with fixed IP addresses.
-3. Start the ClassServer on each machine. This will let the service registrar
- find the downloadable jars on that machine.
+ Note: If you begin with a standalone ZooKeeper instance, then you
+ WILL NOT be able to automatically migrate to a highly available
+ configuration without stopping your standalone ZooKeeper instance.
+ Your life will be significantly easier if you begin with a highly
+ available ZooKeeper instance. Bigdata does not put a heavy load on
+ ZooKeeper, but running bigdata and ZooKeeper on the same instances
+ will make it more complex to administer your environment since
+ stopping a single node will reduce availability for both ZooKeeper
+ and bigdata. A recommended practice is to allocate three ZooKeeper
+ VMs with fixed IP addresses when you begin to setup your bigdata
+ cluster.
-4. Start the service registrar on at least one machine (as configured by
- the locators). A highly available jini/river service will run multiple
- service registrar and provide either multiple unicast locators or support
- multicast discovery of the service registrar. Consult the jini/river
- documentation for more information.
-
-5. Start the HAJournalServer on [k] machines, where [k] is the replication
- factor you specified in the HAJournal.config file. The quorum should
- meet once (k+1)/2 services join (majority rule). At this point one of
- the nodes will be elected as the leader. You can write on that node
- (e.g., using SPARQL UPDATE). You can read on any node that is joined
- with the met quorum.
-
- Note: The default log4j configuration writes onto a file named
- "HAJournalServer.log" -- that is where you need to look for errors
- and any other information about the running HAJournalServer process.
+3. Once Apache ZooKeeper is setup, do:
-A brief description of the files in this directory follows:
+ sudo /etc/init.d bigdataHA start
-HAJournal.env - A shell script containing sample configuration values. This
- is sourced by the various scripts. You need to review all
- of these settings.
+ This will start the ClassServer, the service registrar (Reggie),
+ and the HAJournalServer. All of these processes will run inside of
+ a single JVM named "ServiceStarter". See below for more information
+ on these services.
-HAJournal.config - A sample configuration file for the HAJournalServer. You
- need to review the settings in this file as well.
+========== KEY FILES ==========
-classServer.sh - A shell script that will start the jini class server (for
- downloadable code).
-
-lookupStarter.sh - A shell script that will start the jini service registrar.
+/etc/init.d/bigdataHA
-HAJournalServer.sh - A shell script that will start the HAJournalServer.
+ An init.d script to start/stop of bigdata HA.
- The server process will create a directory in which it
- logs the replicated writes in case other services need to
- resynchronize. This directory is named "HALog" by default
- and may be located on a normal disk. The ha-log files in
- that directory are pure append files and do not need to be
- on a fast disk. The ha-log files will be purged at any
- commit point when the quorum is fully met. These HALog files
- can get large if you are doing a long running update.
+/etc/bigdata/bigdataHA.conf - configuration for the HA installation.
-log4jHA.properties - A default log4j configuration file for use by the bigdata
- services.
-
-logging.properties - A default Java logging configuration. This may be used
- to control the log levels for jini/river components inside
- of the bigdata services. Those components use java logging
- rather than log4j.
+ This script is sourced by /etc/init.d/bigdataHA and provides the
+ critical configuration variables for your installation. The
+ environment variables set in this script are passed through into
+ startHAServices and from there into the HAJournal.config file. You
+ need to review these settings.
-policy.all - A default java permissions file. This file grants ALL permissions.
- You may specify a more rigorous security policy.
+The following are located under the installation root:
+
+bin/startHAServices
+
+ Script runs the Apache River ServiceStarter.
+
+bin/disco-tool
+
+ A utility that can be used to identify running Apache River
+ services.
+
+doc/
+
+ Documentation.
+
+lib/
+
+ The bigdata jar and other dependencies.
+
+lib-dl/
+
+ Downloadable jars for Apache River.
+
+lib-ext/
+
+ Security policy provider for Apache River.
+
+var/config/startHAServices.config
+
+ An Apache River ServiceStarter configuration for starting:
+
+ - ClassServer : This provides downloadable code for the lib-dl
+ directory required to run Reggie. An instance of this service
+ is started on every node.
+
+ - Reggie : This is the Apache River service registrar. Bigdata
+ services discover service registrars using locators and then
+ register themselves. The service registrar is also used by the
+ bigdata services to discover one another. The set of locators is
+ defined using the LOCATORS environment variable in
+ /etc/bigdata/bigdataHA.config; and
+
+ - HAJournalServer : This is the highly available bigdata graph
+ database engine and RDF/SPARQL end point. The service process
+ maintains all of its state in the "serviceDir". The location of
+ that directory is determined by the FED_DIR environment variable
+ and the HAJournal.config file. Important files in this
+ directory include:
+
+ serviceDir/service.id - the assigned ServiceID for this service.
+ serviceDir/bigdata-ha.jnl - the journal data file.
+ serviceDir/HALog/* - the transaction log files.
+ serviceDir/snapshot - full backups of the journal.
+
+var/config/HAJournal.config
+
+ An Apache River configuration file for HAJournalServer. You should
+ review the settings in this file. The most relevant will be the
+ configuration parameters for the default kb instance, especially
+ whether it is in triples mode or quads mode. The configuration
+ parameters for the journal are also specified in this file. Many,
+ but not all, parameters can be overridden through environment
+ variables defined in /etc/bigdata/bigdataHA.config. This file is
+ also used to configure the online backup policy (snapshotPolicy) and
+ point in time restore window (restorePolicy).
+
+var/logging/log4jHA.properties
+
+ The HAJournalServer log4j configuration file. Note: The default
+ log4j configuration writes onto a file named "HAJournalServer.log"
+ -- that is where you need to look for errors and any other
+ information about the running HAJournalServer process.
+
+var/logging/logging.properties
+
+ A default Java logging configuration. This may be used to control
+ the log levels for jini/river components inside of the bigdata
+ services. Those components use java logging rather than log4j.
+
+var/policy/policy.all
+
+ A default java permissions file. This file grants ALL permissions.
+ You may specify a more rigorous security policy.
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README.dev 2013-11-25 16:45:46 UTC (rev 7591)
@@ -0,0 +1,82 @@
+This directory contains some sample configuration for a highly available
+Journal.
+
+Note: The bigdata scripts bundled in this directory are designed to be run
+from the root directory of the SVN checkout of the bigdata code base. This
+is used for developers. The installation is done using the top-level ant
+build file and the "ant deploy-artifact" target.
+
+The basic procedure is:
+
+0. The nodes MUST have synchronized clocks, both for logging and to ensure
+ that the transaction services have closely correlated clocks for assigning
+ timestamps. Make sure that ntp or a similar service is running to synchronize
+ the clocks.
+
+1. Edit the various configuration files. You will have to specify the
+ replicationFactor for the HAJournal in the HAJournal.config file. Make
+ sure to check all the configuration properties.
+
+2. Make sure that zookeeper is up and running with a consistent configuration
+ and that it is logging somewhere where you can find the log later. A good
+ approach is to use nohup so the console output will wind up in the directory
+ from which you start zookeeper. Do not put zookeeper in the background or
+ it can block once the console buffer is full. For a highly available zk
+ configuration, you need to be running at least 3 zk nodes. Consult the zk
+ documentation for more information.
+
+3. Start the ClassServer on each machine. This will let the service registrar
+ find the downloadable jars on that machine.
+
+4. Start the service registrar on at least one machine (as configured by
+ the locators). A highly available jini/river service will run multiple
+ service registrar and provide either multiple unicast locators or support
+ multicast discovery of the service registrar. Consult the jini/river
+ documentation for more information.
+
+5. Start the HAJournalServer on [k] machines, where [k] is the replication
+ factor you specified in the HAJournal.config file. The quorum should
+ meet once (k+1)/2 services join (majority rule). At this point one of
+ the nodes will be elected as the leader. You can write on that node
+ (e.g., using SPARQL UPDATE). You can read on any node that is joined
+ with the met quorum.
+
+ Note: The default log4j configuration writes onto a file named
+ "HAJournalServer.log" -- that is where you need to look for errors
+ and any other information about the running HAJournalServer process.
+
+A brief description of the files in this directory follows:
+
+HAJournal.env - A shell script containing sample configuration values. This
+ is sourced by the various scripts. You need to review all
+ of these settings.
+
+HAJournal.config - A sample configuration file for the HAJournalServer. You
+ need to review the settings in this file as well.
+
+classServer.sh - A shell script that will start the jini class server (for
+ downloadable code).
+
+lookupStarter.sh - A shell script that will start the jini service registrar.
+
+HAJournalServer.sh - A shell script that will start the HAJournalServer.
+
+ The server process will create a directory in which it
+ logs the replicated writes in case other services need to
+ resynchronize. This directory is named "HALog" by default
+ and may be located on a normal disk. The ha-log files in
+ that directory are pure append files and do not need to be
+ on a fast disk. The ha-log files will be purged at any
+ commit point when the quorum is fully met. These HALog files
+ can get large if you are doing a long running update.
+
+log4jHA.properties - A default log4j configuration file for use by the bigdata
+ services.
+
+logging.properties - A default Java logging configuration. This may be used
+ to control the log levels for jini/river components inside
+ of the bigdata services. Those components use java logging
+ rather than log4j.
+
+policy.all - A default java permissions file. This file grants ALL permissions.
+ You may specify a more rigorous security policy.
Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA 2013-11-25 16:43:44 UTC (rev 7590)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA 2013-11-25 16:45:46 UTC (rev 7591)
@@ -1,131 +0,0 @@
-#!/bin/bash
-
-# init.d style script for bigdata HA services. The script can be used
-# to 'start' or 'stop' services.
-#
-# Environment:
-#
-# binDir - The directory containing the installed scripts.
-# pidFile - The pid is written on this file.
-#
-# Misc.
-#
-# See http://tldp.org/LDP/abs/html/index.html
-#
-# Note: Blank lines are significant in shell scripts.
-#
-# Note: Children must do "exit 0" to indicate success.
-#
-# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix
-
-# Source function library (just used for 'action'). If you don't have this
-# it SHOULD automatically use the inline definition for "action()".
-if [ -f "/etc/init.d/functions" ]; then
- . /etc/init.d/functions
-else
-# Run some action. Log its output. No fancy colors. First argument is the
-# label for the log file. Remaining arguments are the command to execute
-# and its arguments, if any.
- action() {
- local STRING rc
- STRING=$1
- echo -n "$STRING "
- shift
- "$@" && echo -n "[OK]" || echo -n "[FAILED]"
- rc=$?
- echo
- return $rc
- }
-fi
-
-# Where the scripts live.
-cd `dirname $0`
-
-##
-# Highly Recommended OS Tuning.
-##
-
-# Do not swap out applications while there is free memory.
-#/sbin/sysctl -w vm.swappiness=0
-
-# Setup the environment.
-source ./bigdataHAEnv
-
-if [ -z "$binDir" ]; then
- echo $"$0 : environment not setup: binDir is undefined."
- exit 1;
-fi
-if [ -z "$pidFile" ]; then
- echo $"$0 : environment not setup: pidFile is undefined"
- exit 1;
-fi
-
-#
-# See how we were called.
-#
-case "$1" in
- start)
-#
-# Start the ServiceStarter and child services if not running.
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
-# The process has died so remove the old pid file.
- echo $"`date` : `hostname` : $pid died?"
- rm -f "$pidFile"
- fi
- fi
- if [ ! -f "$pidFile" ]; then
- action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices
- else
- echo $"`date` : `hostname` : running as $pid"
- fi
- ;;
- stop)
-#
-# Stop the ServiceStarter and all child services.
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
-# The process has died so remove the old pid file.
- echo $"`date` : `hostname` : $pid died?"
- rm -f "$pidFile"
- else
- action $"`date` : `hostname` : bringing down services: " kill $pid
- rm -f "$pidFile"
- fi
- fi
- ;;
- status)
-#
-# Report status for the ServicesManager (up or down).
-#
- if [ -f "$pidFile" ]; then
- read pid < "$pidFile"
- pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
- if [ -z "$pidno" ]; then
- echo $"`date` : `hostname` : process died? pid=$pid."
- else
- echo $"`date` : `hostname` : running as $pid."
- fi
- else
- echo $"`date` : `hostname` : not running."
- fi
- ;;
- restart)
- $0 stop
- $0 start
- ;;
- *)
-#
-# Usage
-#
- echo $"Usage: $0 {start|stop|status|restart}"
- exit 1
-esac
-
-exit 0
Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv 2013-11-25 16:43:44 UTC (rev 7590)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHAEnv 2013-11-25 16:45:46 UTC (rev 7591)
@@ -1,11 +0,0 @@
-# Environment for bigdata HA services.
-#
-# binDir - The directory containing the installed scripts.
-# pidFile - The pid is written on this file.
-#
-# Note: You MUST provide the location of the executable scripts and the
-# pid file that is written by $binDir/startHAServices. These SHOULD be
-# absolute path names.
-
-#binDir=
-#pidFile=
Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-25 16:43:44 UTC (rev 7590)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/startHAServices 2013-11-25 16:45:46 UTC (rev 7591)
@@ -65,17 +65,6 @@
export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081";
fi
-# All of these have defaults.
-#export REPLICATION_FACTOR=3
-#export HA_PORT=9090
-#export NSS_PORT=8080
-#export QUERY_THREAD_POOL_SIZE=
-#export COLLECT_QUEUE_STATISTICS=
-#export COLLECT_PLATFORM_STATISTICS=
-#export GANGLIA_REPORT=
-#export GANGLIA_LISTENER=
-#export SYSSTAT_DIR=
-
export HA_OPTS="\
-DFEDNAME=${FEDNAME}\
-DLOGICAL_SERVICE_ID=${LOGICAL_SERVICE_ID}\
Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/bigdata/bigdataHA.config 2013-11-25 16:45:46 UTC (rev 7591)
@@ -0,0 +1,45 @@
+# Environment for bigdata HA services.
+#
+# binDir - The directory containing the installed scripts.
+# pidFile - The pid is written on this file.
+#
+# Note: You MUST provide the location of the executable scripts and the
+# pid file that is written by $binDir/startHAServices. These SHOULD be
+# absolute path names.
+
+#binDir=
+#pidFile=
+
+##
+# The following variables configure the startHAServices script, which
+# passes them through to HAJournal.config.
+##
+
+# Name of the bigdata gederation of services. Override for real install.
+export FEDNAME=bigdataInstallTest
+
+# This is different for each HA replication cluster in the same federation
+# of services. If you have multiple such replication cluster, then just
+# given each such cluster its own name.
+export LOGICAL_SERVICE_ID=HAJournalServer-1
+
+# Local directory where the service will store its state.
+export FED_DIR=/var/bigdata/${FEDNAME}
+
+# Apache River - NO default for "LOCATORS".
+export GROUPS="$FEDNAME"
+#export LOCATORS="jini://bigdata15/,jini://bigdata16/,jini://bigdata17/"
+
+# Apache ZooKeeper - NO default.
+#export ZK_SERVERS="bigdata15:2081,bigdata16:2081,bigdata17:2081";
+
+# All of these have defaults. Override as necessary.
+#export REPLICATION_FACTOR=3
+#export HA_PORT=9090
+#export NSS_PORT=8080
+#export QUERY_THREAD_POOL_SIZE=
+#export COLLECT_QUEUE_STATISTICS=
+#export COLLECT_PLATFORM_STATISTICS=
+#export GANGLIA_REPORT=
+#export GANGLIA_LISTENER=
+#export SYSSTAT_DIR=
Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA (from rev 7589, branches/BIGDATA_RELEASE_1_3_0/src/resources/bin/bigdataHA)
===================================================================
--- branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA (rev 0)
+++ branches/BIGDATA_RELEASE_1_3_0/src/resources/etc/init.d/bigdataHA 2013-11-25 16:45:46 UTC (rev 7591)
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+# init.d style script for bigdata HA services. The script can be used
+# to 'start' or 'stop' services.
+#
+# Environment:
+#
+# binDir - The directory containing the installed scripts.
+# pidFile - The pid is written on this file.
+#
+# Misc.
+#
+# See http://tldp.org/LDP/abs/html/index.html
+#
+# Note: Blank lines are significant in shell scripts.
+#
+# Note: Children must do "exit 0" to indicate success.
+#
+# Note: Convert DOS cr-lf to unix style in emacs: C-x RET f then unix
+
+# Source function library (just used for 'action'). If you don't have this
+# it SHOULD automatically use the inline definition for "action()".
+if [ -f "/etc/init.d/functions" ]; then
+ . /etc/init.d/functions
+else
+# Run some action. Log its output. No fancy colors. First argument is the
+# label for the log file. Remaining arguments are the command to execute
+# and its arguments, if any.
+ action() {
+ local STRING rc
+ STRING=$1
+ echo -n "$STRING "
+ shift
+ "$@" && echo -n "[OK]" || echo -n "[FAILED]"
+ rc=$?
+ echo
+ return $rc
+ }
+fi
+
+# Where the scripts live.
+cd `dirname $0`
+
+##
+# Highly Recommended OS Tuning.
+##
+
+# Do not swap out applications while there is free memory.
+#/sbin/sysctl -w vm.swappiness=0
+
+# Setup the environment.
+source bigdata/bigdataHA.config
+
+if [ -z "$binDir" ]; then
+ echo $"$0 : environment not setup: binDir is undefined."
+ exit 1;
+fi
+if [ -z "$pidFile" ]; then
+ echo $"$0 : environment not setup: pidFile is undefined"
+ exit 1;
+fi
+
+#
+# See how we were called.
+#
+case "$1" in
+ start)
+#
+# Start the ServiceStarter and child services if not running.
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+# The process has died so remove the old pid file.
+ echo $"`date` : `hostname` : $pid died?"
+ rm -f "$pidFile"
+ fi
+ fi
+ if [ ! -f "$pidFile" ]; then
+ action $"`date` : `hostname` : bringing up services: " $binDir/startHAServices
+ else
+ echo $"`date` : `hostname` : running as $pid"
+ fi
+ ;;
+ stop)
+#
+# Stop the ServiceStarter and all child services.
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+# The process has died so remove the old pid file.
+ echo $"`date` : `hostname` : $pid died?"
+ rm -f "$pidFile"
+ else
+ action $"`date` : `hostname` : bringing down services: " kill $pid
+ rm -f "$pidFile"
+ fi
+ fi
+ ;;
+ status)
+#
+# Report status for the ServicesManager (up or down).
+#
+ if [ -f "$pidFile" ]; then
+ read pid < "$pidFile"
+ pidno=$( ps ax | grep $pid | awk '{ print $1 }' | grep $pid )
+ if [ -z "$pidno" ]; then
+ echo $"`date` : `hostname` : process died? pid=$pid."
+ else
+ echo $"`date` : `hostname` : running as $pid."
+ fi
+ else
+ echo $"`date` : `hostname` : not running."
+ fi
+ ;;
+ restart)
+ $0 stop
+ $0 start
+ ;;
+ *)
+#
+# Usage
+#
+ echo $"Usage: $0 {start|stop|status|restart}"
+ exit 1
+esac
+
+exit 0
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|