From: <tho...@us...> - 2013-11-22 02:07:02
|
Revision: 7579 http://bigdata.svn.sourceforge.net/bigdata/?rev=7579&view=rev Author: thompsonbry Date: 2013-11-22 02:06:54 +0000 (Fri, 22 Nov 2013) Log Message: ----------- Added a ServiceStarter based script for launching: - classserver - reggie - HAJournalServer Do "ant deploy-artifact". Untar the resulting archive somewhere to install. edit bin/startHAServices.config to customize. Then do bin/startHAServices to start. Repeat on each node that will run the HAJournalServer. Note: startHAServices saves the pid of the ServiceStarter process. That pid could be used to write an init.d style script to start/stop the services listed above on a given node. Note: You can also do "ant stage" and then edit the dist/bigdata/... files in order to customize a deployment. Then create a tarball from that custom configuration. This tarball can then be wrapped up as an rpm, etc. as desired. This does NOT start zookeeper. The AbstractServer.run() method is now invoked from within HAJournalServer<init>() in order to be compatible with the ServiceStarter. See #766 (AWS installer) Modified Paths: -------------- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java branches/BIGDATA_RELEASE_1_3_0/build.xml branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README Added Paths: ----------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config Removed Paths: ------------- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/AbstractServer.java 2013-11-22 02:06:54 UTC (rev 7579) @@ -1816,7 +1816,13 @@ } /** - * Run the server (this should be invoked from <code>main</code>. + * Start the HAJournalServer and wait for it to terminate. + * <p> + * Note: This is invoked from within the constructor of the concrete service + * class. This ensures that all initialization of the service is complete + * and is compatible with the Apache River ServiceStarter (doing this in + * main() is not compatible since the ServiceStarter does not expect the + * service to implement Runnable). */ @Override public void run() { Modified: branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/bigdata-jini/src/java/com/bigdata/journal/jini/ha/HAJournalServer.java 2013-11-22 02:06:54 UTC (rev 7579) @@ -556,6 +556,17 @@ super(args, lifeCycle); + /* + * Start the HAJournalServer and wait for it to terminate. + * + * Note: This is invoked from within the constructor of the concrete + * service class. This ensures that all initialization of the service is + * complete and is compatible with the Apache River ServiceStarter + * (doing this in main() is not compatible since the ServiceStarter does + * not expect the service to implement Runnable). + */ + run(); + } /* @@ -4541,9 +4552,19 @@ final HAJournalServer server = new HAJournalServer(args, new FakeLifeCycle()); - // Wait for the HAJournalServer to terminate. - server.run(); + /* + * Note: The server.run() call was pushed into the constructor to be + * compatible with the ServiceStarter pattern. + */ +// // Wait for the HAJournalServer to terminate. +// server.run(); + /* + * Note: The System.exit() call here appears to be required for the + * timely release of allocated ports. Commenting out this line tends to + * cause startup failures in CI due to ports that are already (aka, + * "still") bound. + */ System.exit(0); } Modified: branches/BIGDATA_RELEASE_1_3_0/build.xml =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/build.xml 2013-11-22 02:06:54 UTC (rev 7579) @@ -1047,6 +1047,10 @@ todir="${dist.bin}" /> <chmod file="${dist.bin}/pstart" perm="755" /> + <copy file="${src.resources}/bin/startHAServices" + todir="${dist.bin}" /> + <chmod file="${dist.bin}/startHAServices" perm="755" /> + <copy file="${src.resources}/bin/config/browser.config" todir="${dist.bin.config}" /> <copy file="${src.resources}/bin/config/reggie.config" @@ -1082,6 +1086,10 @@ <copy file="${server.log4j.from.file}" todir="${logging.to.path}" /> + <property name="haserver.log4j.from.file" location="${src.resources}/HAJournal/log4jHA.properties" /> + <copy file="${haserver.log4j.from.file}" + todir="${logging.to.path}" /> + <!-- Stage service-specific logging config file --> <property name="bigdata-jini.root" location="${bigdata-jini.dir}/src/java/com/bigdata" /> @@ -1108,6 +1116,12 @@ <copy file="${src.resources.config}/bigdataCluster16.config" todir="${dist.var.config.jini}" /> + <!-- Stage the HAJournal service config file --> + <copy file="${src.resources}/HAJournal/HAJournal.config" + todir="${dist.var.config.jini}" /> + <copy file="${src.resources}/HAJournal/startHAServices.config" + todir="${dist.var.config.jini}" /> + <!-- Stage the infrastructure service config files --> <copy file="${src.resources.config}/jini/reggie.config" @@ -1117,7 +1131,7 @@ <copy file="${src.resources.config}/jini/startAll.config" todir="${dist.var.config.jini}" /> - <!-- Stage top-level license file and copyright NOTICE file. --> + <!-- Stage top-level license file and copyright NOTICE file. --> <copy toDir="${dist.doc}"> <fileset file="${bigdata.dir}/LICENSE.txt"/> <fileset file="${bigdata.dir}/NOTICE"/> @@ -1183,6 +1197,7 @@ <include name="bigdata/**" /> <exclude name="bigdata/bin/disco-tool" /> <exclude name="bigdata/bin/pstart" /> + <exclude name="bigdata/bin/startHAServices" /> </tarfileset> <!-- Add scripts separately, making them executable --> @@ -1190,6 +1205,7 @@ <tarfileset dir="${bigdata.dir}/dist" filemode="755"> <include name="bigdata/bin/disco-tool" /> <include name="bigdata/bin/pstart" /> + <include name="bigdata/bin/startHAServices" /> </tarfileset> </tar> @@ -1325,6 +1341,7 @@ <exclude name="dist/bigdata/bin/disco-tool" /> <exclude name="dist/bigdata/bin/pstart" /> + <exclude name="dist/bigdata/bin/startHAServices" /> </tarfileset> <!-- Add dist files separately, minus scripts --> @@ -1333,6 +1350,7 @@ <include name="dist/bigdata/**" /> <exclude name="dist/bigdata/bin/disco-tool" /> <exclude name="dist/bigdata/bin/pstart" /> + <exclude name="dist/bigdata/bin/startHAServices" /> </tarfileset> <!-- Add dist scripts separately, making them executable --> @@ -1340,6 +1358,7 @@ <tarfileset dir="${bigdata.dir}" prefix="${version}" filemode="755"> <include name="dist/bigdata/bin/disco-tool" /> <include name="dist/bigdata/bin/pstart" /> + <include name="dist/bigdata/bin/startHAServices" /> </tarfileset> </tar> Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.config 2013-11-22 02:06:54 UTC (rev 7579) @@ -63,23 +63,27 @@ private static fedname = System.getProperty("FEDNAME","benchmark"); // NanoSparqlServer (http) port. - private static nssPort = 8090; + private static nssPort = Integer.parseInt(System.getProperty("NSS_PORT","8090")); // write replication pipeline port (listener). - private static haPort = 9090; + private static haPort = Integer.parseInt(System.getProperty("HA_PORT","9090")); // The #of services in the write pipeline. - private static replicationFactor = 3; + private static replicationFactor = Integer.parseInt(System.getProperty("REPLICATION_FACTOR","3")); // The logical service identifier shared by all members of the quorum. - private static logicalServiceId = "HAJournal-1"; + private static logicalServiceId = System.getProperty("LOGICAL_SERVICE_ID","HAJournal-1"); // The ServiceID for *this* service -or- null to assign it dynamically. private static serviceId = null; + // The base directory for the federation. + private static fedDir = new File(System.getProperty("FED_DIR","."),fedname); + // The service directory (if serviceId is null, then you must override). // private static serviceDir = new File(fedname,""+serviceId); - private static serviceDir = new File(fedname,logicalServiceId+File.separator+"HAJournalServer"); + //private static serviceDir = new File(fedname,logicalServiceId+File.separator+"HAJournalServer"); + private static serviceDir = new File(fedDir,logicalServiceId+File.separator+"HAJournalServer"); // journal data directory. private static dataDir = serviceDir; @@ -235,7 +239,7 @@ * the CLIENT port for the zookeeper server instance. */ // ensemble - servers = "bigdata15:2081,bigdata16:2081,bigdata17:2081"; + servers = System.getProperty("ZK_SERVERS","bigdata15:2081,bigdata16:2081,bigdata17:2081"); /* Session timeout (optional). */ sessionTimeout = bigdata.sessionTimeout; @@ -349,16 +353,20 @@ */ // performance counters for internal queues. - //new NV(Journal.Options.COLLECT_QUEUE_STATISTICS,"true"), // off by default. + new NV(Journal.Options.COLLECT_QUEUE_STATISTICS, + System.getProperty("COLLECT_QUEUE_STATISTICS","false")), // platform and process performance counters (requires external s/w on some platforms) - //new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS,"true"), // off by default. - + new NV(Journal.Options.COLLECT_PLATFORM_STATISTICS, + System.getProperty("COLLECT_PLATFORM_STATISTICS","false")), + // uses bigdata-ganglia module to report service metrics to ganglia. - //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT,"true"), off by default. + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_REPORT, + System.getProperty("GANGLIA_REPORT","false")), // uses bigdata-ganglia module to build internal model of cluster load. - //new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN,"true"), // off by default. + new NV(com.bigdata.journal.GangliaPlugIn.Options.GANGLIA_LISTEN, + System.getProperty("GANGLIA_LISTENER","false")), }, bigdata.kb); @@ -373,7 +381,7 @@ create = true; - queryThreadPoolSize = 16; + queryThreadPoolSize = Integer.parseInt(System.getProperty("QUERY_THREAD_POOL_SIZE","16")); describeEachNamedGraph = true; Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/HAJournal.env 2013-11-22 02:06:54 UTC (rev 7579) @@ -39,7 +39,7 @@ # The log4j configuration file. Each service will log locally unless # you provide otherwise in your logging configuration. -LOG4J_CONFIG=file:src/resources/HAJournal/log4j.properties +LOG4J_CONFIG=file:src/resources/HAJournal/log4jHA.properties # The java logging configuration file. Each service will log locally unless # you provide otherwise in your logging configuration. Modified: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/README 2013-11-22 02:06:54 UTC (rev 7579) @@ -2,7 +2,9 @@ Journal. Note: The bigdata scripts bundled in this directory are designed to be run -from the root directory of the SVN checkout of the bigdata code base. +from the root directory of the SVN checkout of the bigdata code base. This +is used for developers. The installation is done using the top-level ant +build file and the "ant deploy-artifact" target. The basic procedure is: @@ -68,8 +70,8 @@ commit point when the quorum is fully met. These HALog files can get large if you are doing a long running update. -log4j.properties - A default log4j configuration file for use by the bigdata - services. +log4jHA.properties - A default log4j configuration file for use by the bigdata + services. logging.properties - A default Java logging configuration. This may be used to control the log levels for jini/river components inside @@ -78,4 +80,3 @@ policy.all - A default java permissions file. This file grants ALL permissions. You may specify a more rigorous security policy. - Deleted: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties 2013-11-22 01:52:38 UTC (rev 7578) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties 2013-11-22 02:06:54 UTC (rev 7579) @@ -1,80 +0,0 @@ -## -# This is the default log4j configuration for distribution and CI tests. -## - -# Note: logging at INFO or DEBUG will significantly impact throughput! -log4j.rootCategory=WARN, dest2 - -log4j.logger.com.bigdata=WARN -log4j.logger.com.bigdata.btree=WARN -log4j.logger.com.bigdata.counters.History=ERROR -log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR -log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO -log4j.logger.com.bigdata.journal.CompactTask=INFO -log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR -log4j.logger.com.bigdata.rdf.load=INFO -log4j.logger.com.bigdata.rdf.store.DataLoader=INFO -log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO - -log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=ALL - -# HA related loggers (debugging only) -#log4j.logger.com.bigdata.ha=INFO -#log4j.logger.com.bigdata.txLog=INFO -#log4j.logger.com.bigdata.haLog=INFO -##log4j.logger.com.bigdata.rwstore=ALL -#log4j.logger.com.bigdata.journal=INFO -##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL -#log4j.logger.com.bigdata.journal.jini.ha=INFO -##log4j.logger.com.bigdata.service.jini.lookup=ALL -#log4j.logger.com.bigdata.quorum=INFO -#log4j.logger.com.bigdata.quorum.zk=INFO -##log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain -##log4j.logger.com.bigdata.io.writecache=ALL - -# dest2 includes the thread name and elapsed milliseconds. -# Note: %r is elapsed milliseconds. -# Note: %t is the thread name. -# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html -#log4j.appender.dest2=org.apache.log4j.ConsoleAppender -log4j.appender.dest2=org.apache.log4j.RollingFileAppender -log4j.appender.dest2.File=HAJournalServer.log -log4j.appender.dest2.MaxFileSize=500MB -log4j.appender.dest2.MaxBackupIndex=20 -log4j.appender.dest2.layout=org.apache.log4j.PatternLayout -log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n - -## destPlain -#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender -#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout -#log4j.appender.destPlain.layout.ConversionPattern= - -## -# Summary query evaluation log (tab delimited file). -#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog -log4j.additivity.com.bigdata.bop.engine.QueryLog=false -log4j.appender.queryLog=org.apache.log4j.FileAppender -log4j.appender.queryLog.Threshold=ALL -log4j.appender.queryLog.File=queryLog.csv -log4j.appender.queryLog.Append=true -# I find that it is nicer to have this unbuffered since you can see what -# is going on and to make sure that I have complete rule evaluation logs -# on shutdown. -log4j.appender.queryLog.BufferedIO=false -log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout -log4j.appender.queryLog.layout.ConversionPattern=%m - -## -# BOp run state trace (tab delimited file). Uncomment the next line to enable. -#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog -log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false -log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender -log4j.appender.queryRunStateLog.Threshold=ALL -log4j.appender.queryRunStateLog.File=queryRunState.log -log4j.appender.queryRunStateLog.Append=true -# I find that it is nicer to have this unbuffered since you can see what -# is going on and to make sure that I have complete rule evaluation logs -# on shutdown. -log4j.appender.queryRunStateLog.BufferedIO=false -log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout -log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Copied: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties (from rev 7501, branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4j.properties) =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/log4jHA.properties 2013-11-22 02:06:54 UTC (rev 7579) @@ -0,0 +1,80 @@ +## +# This is the default log4j configuration for distribution and CI tests. +## + +# Note: logging at INFO or DEBUG will significantly impact throughput! +log4j.rootCategory=WARN, dest2 + +log4j.logger.com.bigdata=WARN +log4j.logger.com.bigdata.btree=WARN +log4j.logger.com.bigdata.counters.History=ERROR +log4j.logger.com.bigdata.counters.XMLUtility$MyHandler=ERROR +log4j.logger.com.bigdata.counters.query.CounterSetQuery=INFO +log4j.logger.com.bigdata.journal.CompactTask=INFO +log4j.logger.com.bigdata.relation.accesspath.BlockingBuffer=ERROR +log4j.logger.com.bigdata.rdf.load=INFO +log4j.logger.com.bigdata.rdf.store.DataLoader=INFO +log4j.logger.com.bigdata.resources.AsynchronousOverflowTask=INFO + +#log4j.logger.com.bigdata.rdf.sail.webapp.NanoSparqlServer=INFO + +# HA related loggers (debugging only) +#log4j.logger.com.bigdata.ha=INFO +#log4j.logger.com.bigdata.txLog=INFO +#log4j.logger.com.bigdata.haLog=INFO +##log4j.logger.com.bigdata.rwstore=ALL +#log4j.logger.com.bigdata.journal=INFO +##log4j.logger.com.bigdata.journal.AbstractBufferStrategy=ALL +#log4j.logger.com.bigdata.journal.jini.ha=INFO +##log4j.logger.com.bigdata.service.jini.lookup=ALL +#log4j.logger.com.bigdata.quorum=INFO +#log4j.logger.com.bigdata.quorum.zk=INFO +##log4j.logger.com.bigdata.quorum.quorumState=ALL,destPlain +##log4j.logger.com.bigdata.io.writecache=ALL + +# dest2 includes the thread name and elapsed milliseconds. +# Note: %r is elapsed milliseconds. +# Note: %t is the thread name. +# See http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html +#log4j.appender.dest2=org.apache.log4j.ConsoleAppender +log4j.appender.dest2=org.apache.log4j.RollingFileAppender +log4j.appender.dest2.File=HAJournalServer.log +log4j.appender.dest2.MaxFileSize=500MB +log4j.appender.dest2.MaxBackupIndex=20 +log4j.appender.dest2.layout=org.apache.log4j.PatternLayout +log4j.appender.dest2.layout.ConversionPattern=%-5p: %r %d{ISO8601} %X{hostname} %X{serviceUUID} %X{taskname} %X{timestamp} %X{resources} %t %l: %m%n + +## destPlain +#log4j.appender.destPlain=org.apache.log4j.ConsoleAppender +#log4j.appender.destPlain.layout=org.apache.log4j.PatternLayout +#log4j.appender.destPlain.layout.ConversionPattern= + +## +# Summary query evaluation log (tab delimited file). +#log4j.logger.com.bigdata.bop.engine.QueryLog=INFO,queryLog +log4j.additivity.com.bigdata.bop.engine.QueryLog=false +log4j.appender.queryLog=org.apache.log4j.FileAppender +log4j.appender.queryLog.Threshold=ALL +log4j.appender.queryLog.File=queryLog.csv +log4j.appender.queryLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryLog.BufferedIO=false +log4j.appender.queryLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryLog.layout.ConversionPattern=%m + +## +# BOp run state trace (tab delimited file). Uncomment the next line to enable. +#log4j.logger.com.bigdata.bop.engine.RunState$TableLog=INFO,queryRunStateLog +log4j.additivity.com.bigdata.bop.engine.RunState$TableLog=false +log4j.appender.queryRunStateLog=org.apache.log4j.FileAppender +log4j.appender.queryRunStateLog.Threshold=ALL +log4j.appender.queryRunStateLog.File=queryRunState.log +log4j.appender.queryRunStateLog.Append=true +# I find that it is nicer to have this unbuffered since you can see what +# is going on and to make sure that I have complete rule evaluation logs +# on shutdown. +log4j.appender.queryRunStateLog.BufferedIO=false +log4j.appender.queryRunStateLog.layout=org.apache.log4j.PatternLayout +log4j.appender.queryRunStateLog.layout.ConversionPattern=%m Added: branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config =================================================================== --- branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config (rev 0) +++ branches/BIGDATA_RELEASE_1_3_0/src/resources/HAJournal/startHAServices.config 2013-11-22 02:06:54 UTC (rev 7579) @@ -0,0 +1,59 @@ +import java.io.File; +import com.sun.jini.start.NonActivatableServiceDescriptor; +import com.sun.jini.start.ServiceDescriptor; +import com.sun.jini.config.ConfigUtil; + +/* + * Apache River ServiceStarter configuration. + * + * This configuration file is used to start the services required for + * a bigdata Highly Available Replication Cluster (HAJournalServer) on + * each node. + + ClassServer: Provides access to downloadable jars in LIBDL_DIR. + reggie: Provides implementations of ServiceRegistrar. +HAJournalServer: Bigdata HA server instance. + +*/ +com.sun.jini.start { + private static policyFile = System.getProperty("POLICY_FILE"); + private static host = ConfigUtil.getHostName(); + private static port = System.getProperty("JINI_CLASS_SERVER_PORT"); + private static jskdl = " http://" + host + ":" + port + "/jsk-dl.jar"; + private static libDir = System.getProperty("LIB_DIR"); + private static libDLDir = System.getProperty("LIBDL_DIR"); + private static configDir = System.getProperty("CONFIG_DIR")+File.separator+"jini"+File.separator; + + serviceDescriptors = new ServiceDescriptor[]{ + + // ClassServer + new NonActivatableServiceDescriptor( + "", + policyFile, + libDir+"classserver.jar", + "com.sun.jini.tool.ClassServer", + new String[]{ + "-port", port, + "-dir", libDLDir, + "-verbose" + }), + + // Service Registrar (aka LookUp Service aka LUS) + new NonActivatableServiceDescriptor( + "http://" + host + ":" + port + "/reggie-dl.jar" + jskdl, + policyFile, + libDir+"reggie.jar", + "com.sun.jini.reggie.TransientRegistrarImpl", + new String[] { configDir+"reggie.config" }), + + // HAJournalServer + new NonActivatableServiceDescriptor( + "", // TODO code base URL? + policyFile, + System.getProperty("HAJOURNAL_CLASSPATH"), + "com.bigdata.journal.jini.ha.HAJournalServer", + new String[] { configDir+"HAJournal.config" }) + + }; + +} This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |